diff options
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_av.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.c | 531 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.h | 48 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cq.c | 101 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_dev.h | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_doorbell.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_eq.c | 58 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_main.c | 32 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mcg.c | 63 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_memfree.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mr.c | 367 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.h | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 139 |
14 files changed, 697 insertions, 684 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index 085baf3..d58dcbe 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index cd9ed95..1557a52 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -431,6 +431,36 @@ static int mthca_cmd_imm(struct mthca_dev *dev, timeout, status); } +int mthca_cmd_init(struct mthca_dev *dev) +{ + sema_init(&dev->cmd.hcr_sem, 1); + sema_init(&dev->cmd.poll_sem, 1); + dev->cmd.use_events = 0; + + dev->hcr = ioremap(pci_resource_start(dev->pdev, 0) + MTHCA_HCR_BASE, + MTHCA_HCR_SIZE); + if (!dev->hcr) { + mthca_err(dev, "Couldn't map command register."); + return -ENOMEM; + } + + dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev, + MTHCA_MAILBOX_SIZE, + MTHCA_MAILBOX_SIZE, 0); + if (!dev->cmd.pool) { + iounmap(dev->hcr); + return -ENOMEM; + } + + return 0; +} + +void mthca_cmd_cleanup(struct mthca_dev *dev) +{ + pci_pool_destroy(dev->cmd.pool); + iounmap(dev->hcr); +} + /* * Switch to using events to issue FW commands (should be called after * event queue to command events has been initialized). @@ -489,6 +519,33 @@ void mthca_cmd_use_polling(struct mthca_dev *dev) up(&dev->cmd.poll_sem); } +struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, + unsigned int gfp_mask) +{ + struct mthca_mailbox *mailbox; + + mailbox = kmalloc(sizeof *mailbox, gfp_mask); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma); + if (!mailbox->buf) { + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + + return mailbox; +} + +void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox) +{ + if (!mailbox) + return; + + pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + int mthca_SYS_EN(struct mthca_dev *dev, u8 *status) { u64 out; @@ -513,20 +570,20 @@ int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status) static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, u64 virt, u8 *status) { - u32 *inbox; - dma_addr_t indma; + struct mthca_mailbox *mailbox; struct mthca_icm_iter iter; + __be64 *pages; int lg; int nent = 0; int i; int err = 0; int ts = 0, tc = 0; - inbox = pci_alloc_consistent(dev->pdev, PAGE_SIZE, &indma); - if (!inbox) - return -ENOMEM; - - memset(inbox, 0, PAGE_SIZE); + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + memset(mailbox->buf, 0, MTHCA_MAILBOX_SIZE); + pages = mailbox->buf; for (mthca_icm_first(icm, &iter); !mthca_icm_last(&iter); @@ -546,19 +603,17 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, } for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) { if (virt != -1) { - *((__be64 *) (inbox + nent * 4)) = - cpu_to_be64(virt); + pages[nent * 2] = cpu_to_be64(virt); virt += 1 << lg; } - *((__be64 *) (inbox + nent * 4 + 2)) = - cpu_to_be64((mthca_icm_addr(&iter) + - (i << lg)) | (lg - 12)); + pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) + + (i << lg)) | (lg - 12)); ts += 1 << (lg - 10); ++tc; - if (nent == PAGE_SIZE / 16) { - err = mthca_cmd(dev, indma, nent, 0, op, + if (nent == MTHCA_MAILBOX_SIZE / 16) { + err = mthca_cmd(dev, mailbox->dma, nent, 0, op, CMD_TIME_CLASS_B, status); if (err || *status) goto out; @@ -568,7 +623,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, } if (nent) - err = mthca_cmd(dev, indma, nent, 0, op, + err = mthca_cmd(dev, mailbox->dma, nent, 0, op, CMD_TIME_CLASS_B, status); switch (op) { @@ -585,7 +640,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, } out: - pci_free_consistent(dev->pdev, PAGE_SIZE, inbox, indma); + mthca_free_mailbox(dev, mailbox); return err; } @@ -606,8 +661,8 @@ int mthca_RUN_FW(struct mthca_dev *dev, u8 *status) int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) { + struct mthca_mailbox *mailbox; u32 *outbox; - dma_addr_t outdma; int err = 0; u8 lg; @@ -625,12 +680,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) #define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40 #define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48 - outbox = pci_alloc_consistent(dev->pdev, QUERY_FW_OUT_SIZE, &outdma); - if (!outbox) { - return -ENOMEM; - } + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; - err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_FW, + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW, CMD_TIME_CLASS_A, status); if (err) @@ -681,15 +736,15 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) } out: - pci_free_consistent(dev->pdev, QUERY_FW_OUT_SIZE, outbox, outdma); + mthca_free_mailbox(dev, mailbox); return err; } int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) { + struct mthca_mailbox *mailbox; u8 info; u32 *outbox; - dma_addr_t outdma; int err = 0; #define ENABLE_LAM_OUT_SIZE 0x100 @@ -700,11 +755,12 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) #define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4) #define ENABLE_LAM_INFO_ECC_MASK 0x3 - outbox = pci_alloc_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, &outdma); - if (!outbox) - return -ENOMEM; + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; - err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_ENABLE_LAM, + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM, CMD_TIME_CLASS_C, status); if (err) @@ -733,7 +789,7 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) (unsigned long long) dev->ddr_end); out: - pci_free_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, outbox, outdma); + mthca_free_mailbox(dev, mailbox); return err; } @@ -744,9 +800,9 @@ int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status) int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) { + struct mthca_mailbox *mailbox; u8 info; u32 *outbox; - dma_addr_t outdma; int err = 0; #define QUERY_DDR_OUT_SIZE 0x100 @@ -757,11 +813,12 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) #define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4) #define QUERY_DDR_INFO_ECC_MASK 0x3 - outbox = pci_alloc_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, &outdma); - if (!outbox) - return -ENOMEM; + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; - err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DDR, + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR, CMD_TIME_CLASS_A, status); if (err) @@ -787,15 +844,15 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) (unsigned long long) dev->ddr_end); out: - pci_free_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, outbox, outdma); + mthca_free_mailbox(dev, mailbox); return err; } int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, struct mthca_dev_lim *dev_lim, u8 *status) { + struct mthca_mailbox *mailbox; u32 *outbox; - dma_addr_t outdma; u8 field; u16 size; int err; @@ -860,11 +917,12 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, #define QUERY_DEV_LIM_LAMR_OFFSET 0x9f #define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0 - outbox = pci_alloc_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, &outdma); - if (!outbox) - return -ENOMEM; + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; - err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DEV_LIM, + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM, CMD_TIME_CLASS_A, status); if (err) @@ -1020,15 +1078,15 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, } out: - pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma); + mthca_free_mailbox(dev, mailbox); return err; } int mthca_QUERY_ADAPTER(struct mthca_dev *dev, struct mthca_adapter *adapter, u8 *status) { + struct mthca_mailbox *mailbox; u32 *outbox; - dma_addr_t outdma; int err; #define QUERY_ADAPTER_OUT_SIZE 0x100 @@ -1037,23 +1095,24 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev, #define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 - outbox = pci_alloc_consistent(dev->pdev, QUERY_ADAPTER_OUT_SIZE, &outdma); - if (!outbox) - return -ENOMEM; + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; - err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_ADAPTER, + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER, CMD_TIME_CLASS_A, status); if (err) goto out; - MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); - MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); + MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); + MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); - MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); + MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); out: - pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma); + mthca_free_mailbox(dev, mailbox); return err; } @@ -1061,8 +1120,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev, struct mthca_init_hca_param *param, u8 *status) { + struct mthca_mailbox *mailbox; u32 *inbox; - dma_addr_t indma; int err; #define INIT_HCA_IN_SIZE 0x200 @@ -1102,9 +1161,10 @@ int mthca_INIT_HCA(struct mthca_dev *dev, #define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10) #define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18) - inbox = pci_alloc_consistent(dev->pdev, INIT_HCA_IN_SIZE, &indma); - if (!inbox) - return -ENOMEM; + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; memset(inbox, 0, INIT_HCA_IN_SIZE); @@ -1167,10 +1227,9 @@ int mthca_INIT_HCA(struct mthca_dev *dev, MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); } - err = mthca_cmd(dev, indma, 0, 0, CMD_INIT_HCA, - HZ, status); + err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status); - pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); + mthca_free_mailbox(dev, mailbox); return err; } @@ -1178,8 +1237,8 @@ int mthca_INIT_IB(struct mthca_dev *dev, struct mthca_init_ib_param *param, int port, u8 *status) { + struct mthca_mailbox *mailbox; u32 *inbox; - dma_addr_t indma; int err; u32 flags; @@ -1199,9 +1258,10 @@ int mthca_INIT_IB(struct mthca_dev *dev, #define INIT_IB_NODE_GUID_OFFSET 0x18 #define INIT_IB_SI_GUID_OFFSET 0x20 - inbox = pci_alloc_consistent(dev->pdev, INIT_IB_IN_SIZE, &indma); - if (!inbox) - return -ENOMEM; + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; memset(inbox, 0, INIT_IB_IN_SIZE); @@ -1221,10 +1281,10 @@ int mthca_INIT_IB(struct mthca_dev *dev, MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET); MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET); - err = mthca_cmd(dev, indma, port, 0, CMD_INIT_IB, + err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB, CMD_TIME_CLASS_A, status); - pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); + mthca_free_mailbox(dev, mailbox); return err; } @@ -1241,8 +1301,8 @@ int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status) int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, int port, u8 *status) { + struct mthca_mailbox *mailbox; u32 *inbox; - dma_addr_t indma; int err; u32 flags = 0; @@ -1253,9 +1313,10 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, #define SET_IB_CAP_MASK_OFFSET 0x04 #define SET_IB_SI_GUID_OFFSET 0x08 - inbox = pci_alloc_consistent(dev->pdev, SET_IB_IN_SIZE, &indma); - if (!inbox) - return -ENOMEM; + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; memset(inbox, 0, SET_IB_IN_SIZE); @@ -1266,10 +1327,10 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET); MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET); - err = mthca_cmd(dev, indma, port, 0, CMD_SET_IB, + err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB, CMD_TIME_CLASS_B, status); - pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); + mthca_free_mailbox(dev, mailbox); return err; } @@ -1280,20 +1341,22 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) { + struct mthca_mailbox *mailbox; u64 *inbox; - dma_addr_t indma; int err; - inbox = pci_alloc_consistent(dev->pdev, 16, &indma); - if (!inbox) - return -ENOMEM; + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; inbox[0] = cpu_to_be64(virt); inbox[1] = cpu_to_be64(dma_addr); - err = mthca_cmd(dev, indma, 1, 0, CMD_MAP_ICM, CMD_TIME_CLASS_B, status); + err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM, + CMD_TIME_CLASS_B, status); - pci_free_consistent(dev->pdev, 16, inbox, indma); + mthca_free_mailbox(dev, mailbox); if (!err) mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n", @@ -1338,69 +1401,26 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, return 0; } -int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry, +int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int mpt_index, u8 *status) { - dma_addr_t indma; - int err; - - indma = pci_map_single(dev->pdev, mpt_entry, - MTHCA_MPT_ENTRY_SIZE, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(indma)) - return -ENOMEM; - - err = mthca_cmd(dev, indma, mpt_index, 0, CMD_SW2HW_MPT, - CMD_TIME_CLASS_B, status); - - pci_unmap_single(dev->pdev, indma, - MTHCA_MPT_ENTRY_SIZE, PCI_DMA_TODEVICE); - return err; + return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT, + CMD_TIME_CLASS_B, status); } -int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry, +int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int mpt_index, u8 *status) { - dma_addr_t outdma = 0; - int err; - - if (mpt_entry) { - outdma = pci_map_single(dev->pdev, mpt_entry, - MTHCA_MPT_ENTRY_SIZE, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(outdma)) - return -ENOMEM; - } - - err = mthca_cmd_box(dev, 0, outdma, mpt_index, !mpt_entry, - CMD_HW2SW_MPT, - CMD_TIME_CLASS_B, status); - - if (mpt_entry) - pci_unmap_single(dev->pdev, outdma, - MTHCA_MPT_ENTRY_SIZE, - PCI_DMA_FROMDEVICE); - return err; + return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, + !mailbox, CMD_HW2SW_MPT, + CMD_TIME_CLASS_B, status); } -int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry, +int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int num_mtt, u8 *status) { - dma_addr_t indma; - int err; - - indma = pci_map_single(dev->pdev, mtt_entry, - (num_mtt + 2) * 8, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(indma)) - return -ENOMEM; - - err = mthca_cmd(dev, indma, num_mtt, 0, CMD_WRITE_MTT, - CMD_TIME_CLASS_B, status); - - pci_unmap_single(dev->pdev, indma, - (num_mtt + 2) * 8, PCI_DMA_TODEVICE); - return err; + return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT, + CMD_TIME_CLASS_B, status); } int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status) @@ -1418,92 +1438,38 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status); } -int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context, +int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int eq_num, u8 *status) { - dma_addr_t indma; - int err; - - indma = pci_map_single(dev->pdev, eq_context, - MTHCA_EQ_CONTEXT_SIZE, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(indma)) - return -ENOMEM; - - err = mthca_cmd(dev, indma, eq_num, 0, CMD_SW2HW_EQ, - CMD_TIME_CLASS_A, status); - - pci_unmap_single(dev->pdev, indma, - MTHCA_EQ_CONTEXT_SIZE, PCI_DMA_TODEVICE); - return err; + return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ, + CMD_TIME_CLASS_A, status); } -int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context, +int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int eq_num, u8 *status) { - dma_addr_t outdma = 0; - int err; - - outdma = pci_map_single(dev->pdev, eq_context, - MTHCA_EQ_CONTEXT_SIZE, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(outdma)) - return -ENOMEM; - - err = mthca_cmd_box(dev, 0, outdma, eq_num, 0, - CMD_HW2SW_EQ, - CMD_TIME_CLASS_A, status); - - pci_unmap_single(dev->pdev, outdma, - MTHCA_EQ_CONTEXT_SIZE, - PCI_DMA_FROMDEVICE); - return err; + return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0, + CMD_HW2SW_EQ, + CMD_TIME_CLASS_A, status); } -int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context, +int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num, u8 *status) { - dma_addr_t indma; - int err; - - indma = pci_map_single(dev->pdev, cq_context, - MTHCA_CQ_CONTEXT_SIZE, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(indma)) - return -ENOMEM; - - err = mthca_cmd(dev, indma, cq_num, 0, CMD_SW2HW_CQ, + return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ, CMD_TIME_CLASS_A, status); - - pci_unmap_single(dev->pdev, indma, - MTHCA_CQ_CONTEXT_SIZE, PCI_DMA_TODEVICE); - return err; } -int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context, +int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num, u8 *status) { - dma_addr_t outdma = 0; - int err; - - outdma = pci_map_single(dev->pdev, cq_context, - MTHCA_CQ_CONTEXT_SIZE, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(outdma)) - return -ENOMEM; - - err = mthca_cmd_box(dev, 0, outdma, cq_num, 0, - CMD_HW2SW_CQ, - CMD_TIME_CLASS_A, status); - - pci_unmap_single(dev->pdev, outdma, - MTHCA_CQ_CONTEXT_SIZE, - PCI_DMA_FROMDEVICE); - return err; + return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0, + CMD_HW2SW_CQ, + CMD_TIME_CLASS_A, status); } int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, - int is_ee, void *qp_context, u32 optmask, + int is_ee, struct mthca_mailbox *mailbox, u32 optmask, u8 *status) { static const u16 op[] = { @@ -1520,36 +1486,34 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, [MTHCA_TRANS_ANY2RST] = CMD_ERR2RST_QPEE }; u8 op_mod = 0; - - dma_addr_t indma; + int my_mailbox = 0; int err; if (trans < 0 || trans >= ARRAY_SIZE(op)) return -EINVAL; if (trans == MTHCA_TRANS_ANY2RST) { - indma = 0; op_mod = 3; /* don't write outbox, any->reset */ /* For debugging */ - qp_context = pci_alloc_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE, - &indma); - op_mod = 2; /* write outbox, any->reset */ + if (!mailbox) { + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (!IS_ERR(mailbox)) { + my_mailbox = 1; + op_mod = 2; /* write outbox, any->reset */ + } else + mailbox = NULL; + } } else { - indma = pci_map_single(dev->pdev, qp_context, - MTHCA_QP_CONTEXT_SIZE, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(indma)) - return -ENOMEM; - if (0) { int i; mthca_dbg(dev, "Dumping QP context:\n"); - printk(" opt param mask: %08x\n", be32_to_cpup(qp_context)); + printk(" opt param mask: %08x\n", be32_to_cpup(mailbox->buf)); for (i = 0; i < 0x100 / 4; ++i) { if (i % 8 == 0) printk(" [%02x] ", i * 4); - printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2])); + printk(" %08x", + be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); if ((i + 1) % 8 == 0) printk("\n"); } @@ -1557,55 +1521,39 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, } if (trans == MTHCA_TRANS_ANY2RST) { - err = mthca_cmd_box(dev, 0, indma, (!!is_ee << 24) | num, - op_mod, op[trans], CMD_TIME_CLASS_C, status); + err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, + (!!is_ee << 24) | num, op_mod, + op[trans], CMD_TIME_CLASS_C, status); - if (0) { + if (0 && mailbox) { int i; mthca_dbg(dev, "Dumping QP context:\n"); - printk(" %08x\n", be32_to_cpup(qp_context)); + printk(" %08x\n", be32_to_cpup(mailbox->buf)); for (i = 0; i < 0x100 / 4; ++i) { if (i % 8 == 0) printk("[%02x] ", i * 4); - printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2])); + printk(" %08x", + be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); if ((i + 1) % 8 == 0) printk("\n"); } } } else - err = mthca_cmd(dev, indma, (!!is_ee << 24) | num, + err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num, op_mod, op[trans], CMD_TIME_CLASS_C, status); - if (trans != MTHCA_TRANS_ANY2RST) - pci_unmap_single(dev->pdev, indma, - MTHCA_QP_CONTEXT_SIZE, PCI_DMA_TODEVICE); - else - pci_free_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE, - qp_context, indma); + if (my_mailbox) + mthca_free_mailbox(dev, mailbox); + return err; } int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, - void *qp_context, u8 *status) + struct mthca_mailbox *mailbox, u8 *status) { - dma_addr_t outdma = 0; - int err; - - outdma = pci_map_single(dev->pdev, qp_context, - MTHCA_QP_CONTEXT_SIZE, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(outdma)) - return -ENOMEM; - - err = mthca_cmd_box(dev, 0, outdma, (!!is_ee << 24) | num, 0, - CMD_QUERY_QPEE, - CMD_TIME_CLASS_A, status); - - pci_unmap_single(dev->pdev, outdma, - MTHCA_QP_CONTEXT_SIZE, - PCI_DMA_FROMDEVICE); - return err; + return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0, + CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status); } int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, @@ -1635,11 +1583,11 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, } int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, - int port, struct ib_wc* in_wc, struct ib_grh* in_grh, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, void *in_mad, void *response_mad, u8 *status) { - void *box; - dma_addr_t dma; + struct mthca_mailbox *inmailbox, *outmailbox; + void *inbox; int err; u32 in_modifier = port; u8 op_modifier = 0; @@ -1653,11 +1601,18 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, #define MAD_IFC_PKEY_OFFSET 0x10e #define MAD_IFC_GRH_OFFSET 0x140 - box = pci_alloc_consistent(dev->pdev, MAD_IFC_BOX_SIZE, &dma); - if (!box) - return -ENOMEM; + inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(inmailbox)) + return PTR_ERR(inmailbox); + inbox = inmailbox->buf; - memcpy(box, in_mad, 256); + outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(outmailbox)) { + mthca_free_mailbox(dev, inmailbox); + return PTR_ERR(outmailbox); + } + + memcpy(inbox, in_mad, 256); /* * Key check traps can't be generated unless we have in_wc to @@ -1671,97 +1626,65 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, if (in_wc) { u8 val; - memset(box + 256, 0, 256); + memset(inbox + 256, 0, 256); - MTHCA_PUT(box, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); - MTHCA_PUT(box, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); + MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); + MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); val = in_wc->sl << 4; - MTHCA_PUT(box, val, MAD_IFC_SL_OFFSET); + MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET); val = in_wc->dlid_path_bits | (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); - MTHCA_PUT(box, val, MAD_IFC_GRH_OFFSET); + MTHCA_PUT(inbox, val, MAD_IFC_GRH_OFFSET); - MTHCA_PUT(box, in_wc->slid, MAD_IFC_RLID_OFFSET); - MTHCA_PUT(box, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); + MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET); + MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); if (in_grh) - memcpy((u8 *) box + MAD_IFC_GRH_OFFSET, in_grh, 40); + memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40); op_modifier |= 0x10; in_modifier |= in_wc->slid << 16; } - err = mthca_cmd_box(dev, dma, dma + 512, in_modifier, op_modifier, + err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma, + in_modifier, op_modifier, CMD_MAD_IFC, CMD_TIME_CLASS_C, status); if (!err && !*status) - memcpy(response_mad, box + 512, 256); + memcpy(response_mad, outmailbox->buf, 256); - pci_free_consistent(dev->pdev, MAD_IFC_BOX_SIZE, box, dma); + mthca_free_mailbox(dev, inmailbox); + mthca_free_mailbox(dev, outmailbox); return err; } -int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm, - u8 *status) +int mthca_READ_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status) { - dma_addr_t outdma = 0; - int err; - - outdma = pci_map_single(dev->pdev, mgm, - MTHCA_MGM_ENTRY_SIZE, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(outdma)) - return -ENOMEM; - - err = mthca_cmd_box(dev, 0, outdma, index, 0, - CMD_READ_MGM, - CMD_TIME_CLASS_A, status); - - pci_unmap_single(dev->pdev, outdma, - MTHCA_MGM_ENTRY_SIZE, - PCI_DMA_FROMDEVICE); - return err; + return mthca_cmd_box(dev, 0, mailbox->dma, index, 0, + CMD_READ_MGM, CMD_TIME_CLASS_A, status); } -int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm, - u8 *status) +int mthca_WRITE_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status) { - dma_addr_t indma; - int err; - - indma = pci_map_single(dev->pdev, mgm, - MTHCA_MGM_ENTRY_SIZE, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(indma)) - return -ENOMEM; - - err = mthca_cmd(dev, indma, index, 0, CMD_WRITE_MGM, - CMD_TIME_CLASS_A, status); - - pci_unmap_single(dev->pdev, indma, - MTHCA_MGM_ENTRY_SIZE, PCI_DMA_TODEVICE); - return err; + return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM, + CMD_TIME_CLASS_A, status); } -int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash, - u8 *status) +int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + u16 *hash, u8 *status) { - dma_addr_t indma; u64 imm; int err; - indma = pci_map_single(dev->pdev, gid, 16, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(indma)) - return -ENOMEM; - - err = mthca_cmd_imm(dev, indma, &imm, 0, 0, CMD_MGID_HASH, + err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH, CMD_TIME_CLASS_A, status); - *hash = imm; - pci_unmap_single(dev->pdev, indma, 16, PCI_DMA_TODEVICE); + *hash = imm; return err; } diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index adf039b..ed517f1 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h @@ -37,8 +37,7 @@ #include <ib_verbs.h> -#define MTHCA_CMD_MAILBOX_ALIGN 16UL -#define MTHCA_CMD_MAILBOX_EXTRA (MTHCA_CMD_MAILBOX_ALIGN - 1) +#define MTHCA_MAILBOX_SIZE 4096 enum { /* command completed successfully: */ @@ -112,6 +111,11 @@ enum { DEV_LIM_FLAG_UD_MULTI = 1 << 21, }; +struct mthca_mailbox { + dma_addr_t dma; + void *buf; +}; + struct mthca_dev_lim { int max_srq_sz; int max_qp_sz; @@ -235,11 +239,17 @@ struct mthca_set_ib_param { u32 cap_mask; }; +int mthca_cmd_init(struct mthca_dev *dev); +void mthca_cmd_cleanup(struct mthca_dev *dev); int mthca_cmd_use_events(struct mthca_dev *dev); void mthca_cmd_use_polling(struct mthca_dev *dev); void mthca_cmd_event(struct mthca_dev *dev, u16 token, u8 status, u64 out_param); +struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, + unsigned int gfp_mask); +void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox); + int mthca_SYS_EN(struct mthca_dev *dev, u8 *status); int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status); int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); @@ -270,41 +280,39 @@ int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status); int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, u8 *status); -int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry, +int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int mpt_index, u8 *status); -int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry, +int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int mpt_index, u8 *status); -int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry, +int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int num_mtt, u8 *status); int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status); int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, int eq_num, u8 *status); -int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context, +int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int eq_num, u8 *status); -int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context, +int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int eq_num, u8 *status); -int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context, +int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num, u8 *status); -int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context, +int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num, u8 *status); int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, - int is_ee, void *qp_context, u32 optmask, + int is_ee, struct mthca_mailbox *mailbox, u32 optmask, u8 *status); int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, - void *qp_context, u8 *status); + struct mthca_mailbox *mailbox, u8 *status); int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, u8 *status); int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, - int port, struct ib_wc* in_wc, struct ib_grh* in_grh, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, void *in_mad, void *response_mad, u8 *status); -int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm, - u8 *status); -int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm, - u8 *status); -int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash, - u8 *status); +int mthca_READ_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status); +int mthca_WRITE_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status); +int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + u16 *hash, u8 *status); int mthca_NOP(struct mthca_dev *dev, u8 *status); -#define MAILBOX_ALIGN(x) ((void *) ALIGN((unsigned long) (x), MTHCA_CMD_MAILBOX_ALIGN)) - #endif /* MTHCA_CMD_H */ diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 2bf347b..766e9031 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -171,6 +172,17 @@ static inline void set_cqe_hw(struct mthca_cqe *cqe) cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; } +static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr) +{ + __be32 *cqe = cqe_ptr; + + (void) cqe; /* avoid warning if mthca_dbg compiled away... */ + mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", + be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), + be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), + be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); +} + /* * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index * should be correct before calling update_cons_index(). @@ -280,16 +292,12 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, int dbd; u32 new_wqe; - if (1 && cqe->syndrome != SYNDROME_WR_FLUSH_ERR) { - int j; - - mthca_dbg(dev, "%x/%d: error CQE -> QPN %06x, WQE @ %08x\n", - cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn), - be32_to_cpu(cqe->wqe)); - - for (j = 0; j < 8; ++j) - printk(KERN_DEBUG " [%2x] %08x\n", - j * 4, be32_to_cpu(((u32 *) cqe)[j])); + if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { + mthca_dbg(dev, "local QP operation err " + "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n", + be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), + cq->cqn, cq->cons_index); + dump_cqe(dev, cqe); } /* @@ -377,15 +385,6 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, return 0; } -static void dump_cqe(struct mthca_cqe *cqe) -{ - int j; - - for (j = 0; j < 8; ++j) - printk(KERN_DEBUG " [%2x] %08x\n", - j * 4, be32_to_cpu(((u32 *) cqe)[j])); -} - static inline int mthca_poll_one(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp **cur_qp, @@ -414,8 +413,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev, mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n", cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe)); - - dump_cqe(cqe); + dump_cqe(dev, cqe); } is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == @@ -638,19 +636,19 @@ static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) int size; if (cq->is_direct) - pci_free_consistent(dev->pdev, - (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, - cq->queue.direct.buf, - pci_unmap_addr(&cq->queue.direct, - mapping)); + dma_free_coherent(&dev->pdev->dev, + (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, + cq->queue.direct.buf, + pci_unmap_addr(&cq->queue.direct, + mapping)); else { size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE; for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) if (cq->queue.page_list[i].buf) - pci_free_consistent(dev->pdev, PAGE_SIZE, - cq->queue.page_list[i].buf, - pci_unmap_addr(&cq->queue.page_list[i], - mapping)); + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + cq->queue.page_list[i].buf, + pci_unmap_addr(&cq->queue.page_list[i], + mapping)); kfree(cq->queue.page_list); } @@ -670,8 +668,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size, npages = 1; shift = get_order(size) + PAGE_SHIFT; - cq->queue.direct.buf = pci_alloc_consistent(dev->pdev, - size, &t); + cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); if (!cq->queue.direct.buf) return -ENOMEM; @@ -709,7 +707,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size, for (i = 0; i < npages; ++i) { cq->queue.page_list[i].buf = - pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t); + dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); if (!cq->queue.page_list[i].buf) goto err_free; @@ -746,7 +745,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, struct mthca_cq *cq) { int size = nent * MTHCA_CQ_ENTRY_SIZE; - void *mailbox = NULL; + struct mthca_mailbox *mailbox; struct mthca_cq_context *cq_context; int err = -ENOMEM; u8 status; @@ -780,12 +779,11 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, goto err_out_ci; } - mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA, - GFP_KERNEL); - if (!mailbox) - goto err_out_mailbox; + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + goto err_out_arm; - cq_context = MAILBOX_ALIGN(mailbox); + cq_context = mailbox->buf; err = mthca_alloc_cq_buf(dev, size, cq); if (err) @@ -816,7 +814,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, cq_context->state_db = cpu_to_be32(cq->arm_db_index); } - err = mthca_SW2HW_CQ(dev, cq_context, cq->cqn, &status); + err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status); if (err) { mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err); goto err_out_free_mr; @@ -840,7 +838,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, cq->cons_index = 0; - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); return 0; @@ -849,8 +847,9 @@ err_out_free_mr: mthca_free_cq_buf(dev, cq); err_out_mailbox: - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); +err_out_arm: if (mthca_is_memfree(dev)) mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); @@ -870,28 +869,26 @@ err_out: void mthca_free_cq(struct mthca_dev *dev, struct mthca_cq *cq) { - void *mailbox; + struct mthca_mailbox *mailbox; int err; u8 status; might_sleep(); - mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA, - GFP_KERNEL); - if (!mailbox) { + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { mthca_warn(dev, "No memory for mailbox to free CQ.\n"); return; } - err = mthca_HW2SW_CQ(dev, MAILBOX_ALIGN(mailbox), cq->cqn, &status); + err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status); if (err) mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err); else if (status) - mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", - status); + mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); if (0) { - u32 *ctx = MAILBOX_ALIGN(mailbox); + u32 *ctx = mailbox->buf; int j; printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", @@ -919,11 +916,11 @@ void mthca_free_cq(struct mthca_dev *dev, if (mthca_is_memfree(dev)) { mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); - mthca_table_put(dev, dev->cq_table.table, cq->cqn); } + mthca_table_put(dev, dev->cq_table.table, cq->cqn); mthca_free(&dev->cq_table.alloc, cq->cqn); - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); } int __devinit mthca_init_cq_table(struct mthca_dev *dev) diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index e3d79e2..4127f09 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -46,8 +47,8 @@ #define DRV_NAME "ib_mthca" #define PFX DRV_NAME ": " -#define DRV_VERSION "0.06-pre" -#define DRV_RELDATE "November 8, 2004" +#define DRV_VERSION "0.06" +#define DRV_RELDATE "June 23, 2005" enum { MTHCA_FLAG_DDR_HIDDEN = 1 << 1, @@ -98,6 +99,7 @@ enum { }; struct mthca_cmd { + struct pci_pool *pool; int use_events; struct semaphore hcr_sem; struct semaphore poll_sem; @@ -379,6 +381,12 @@ void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar); int mthca_pd_alloc(struct mthca_dev *dev, struct mthca_pd *pd); void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd); +struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size); +void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt); +int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, + int start_index, u64 *buffer_list, int list_len); +int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, + u64 iova, u64 total_size, u32 access, struct mthca_mr *mr); int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, u32 access, struct mthca_mr *mr); int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, diff --git a/drivers/infiniband/hw/mthca/mthca_doorbell.h b/drivers/infiniband/hw/mthca/mthca_doorbell.h index 821039a..535fad7 100644 --- a/drivers/infiniband/hw/mthca/mthca_doorbell.h +++ b/drivers/infiniband/hw/mthca/mthca_doorbell.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index f46d615..cbcf2b4 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c @@ -469,7 +469,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, PAGE_SIZE; u64 *dma_list = NULL; dma_addr_t t; - void *mailbox = NULL; + struct mthca_mailbox *mailbox; struct mthca_eq_context *eq_context; int err = -ENOMEM; int i; @@ -494,17 +494,16 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, if (!dma_list) goto err_out_free; - mailbox = kmalloc(sizeof *eq_context + MTHCA_CMD_MAILBOX_EXTRA, - GFP_KERNEL); - if (!mailbox) + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) goto err_out_free; - eq_context = MAILBOX_ALIGN(mailbox); + eq_context = mailbox->buf; for (i = 0; i < npages; ++i) { - eq->page_list[i].buf = pci_alloc_consistent(dev->pdev, - PAGE_SIZE, &t); + eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, + PAGE_SIZE, &t, GFP_KERNEL); if (!eq->page_list[i].buf) - goto err_out_free; + goto err_out_free_pages; dma_list[i] = t; pci_unmap_addr_set(&eq->page_list[i], mapping, t); @@ -517,7 +516,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, eq->eqn = mthca_alloc(&dev->eq_table.alloc); if (eq->eqn == -1) - goto err_out_free; + goto err_out_free_pages; err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, dma_list, PAGE_SHIFT, npages, @@ -548,7 +547,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, eq_context->intr = intr; eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); - err = mthca_SW2HW_EQ(dev, eq_context, eq->eqn, &status); + err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status); if (err) { mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); goto err_out_free_mr; @@ -561,7 +560,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, } kfree(dma_list); - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); eq->eqn_mask = swab32(1 << eq->eqn); eq->cons_index = 0; @@ -579,17 +578,19 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, err_out_free_eq: mthca_free(&dev->eq_table.alloc, eq->eqn); - err_out_free: + err_out_free_pages: for (i = 0; i < npages; ++i) if (eq->page_list[i].buf) - pci_free_consistent(dev->pdev, PAGE_SIZE, - eq->page_list[i].buf, - pci_unmap_addr(&eq->page_list[i], - mapping)); + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + eq->page_list[i].buf, + pci_unmap_addr(&eq->page_list[i], + mapping)); + + mthca_free_mailbox(dev, mailbox); + err_out_free: kfree(eq->page_list); kfree(dma_list); - kfree(mailbox); err_out: return err; @@ -598,25 +599,22 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, static void mthca_free_eq(struct mthca_dev *dev, struct mthca_eq *eq) { - void *mailbox = NULL; + struct mthca_mailbox *mailbox; int err; u8 status; int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / PAGE_SIZE; int i; - mailbox = kmalloc(sizeof (struct mthca_eq_context) + MTHCA_CMD_MAILBOX_EXTRA, - GFP_KERNEL); - if (!mailbox) + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) return; - err = mthca_HW2SW_EQ(dev, MAILBOX_ALIGN(mailbox), - eq->eqn, &status); + err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status); if (err) mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); if (status) - mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", - status); + mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status); dev->eq_table.arm_mask &= ~eq->eqn_mask; @@ -625,7 +623,7 @@ static void mthca_free_eq(struct mthca_dev *dev, for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) { if (i % 4 == 0) printk("[%02x] ", i * 4); - printk(" %08x", be32_to_cpup(MAILBOX_ALIGN(mailbox) + i * 4)); + printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); if ((i + 1) % 4 == 0) printk("\n"); } @@ -638,7 +636,7 @@ static void mthca_free_eq(struct mthca_dev *dev, pci_unmap_addr(&eq->page_list[i], mapping)); kfree(eq->page_list); - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); } static void mthca_free_irqs(struct mthca_dev *dev) @@ -709,8 +707,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & dev->fw.arbel.eq_arm_base) + 4, 4, &dev->eq_regs.arbel.eq_arm)) { - mthca_err(dev, "Couldn't map interrupt clear register, " - "aborting.\n"); + mthca_err(dev, "Couldn't map EQ arm register, aborting.\n"); mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, dev->clr_base); @@ -721,8 +718,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) dev->fw.arbel.eq_set_ci_base, MTHCA_EQ_SET_CI_SIZE, &dev->eq_regs.arbel.eq_set_ci_base)) { - mthca_err(dev, "Couldn't map interrupt clear register, " - "aborting.\n"); + mthca_err(dev, "Couldn't map EQ CI register, aborting.\n"); mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & dev->fw.arbel.eq_arm_base) + 4, 4, dev->eq_regs.arbel.eq_arm); diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index d405903..09519b6 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -69,7 +70,7 @@ MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero"); #endif /* CONFIG_PCI_MSI */ static const char mthca_version[] __devinitdata = - "ib_mthca: Mellanox InfiniBand HCA driver v" + DRV_NAME ": Mellanox InfiniBand HCA driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; static struct mthca_profile default_profile = { @@ -927,13 +928,13 @@ static int __devinit mthca_init_one(struct pci_dev *pdev, */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || pci_resource_len(pdev, 0) != 1 << 20) { - dev_err(&pdev->dev, "Missing DCS, aborting."); + dev_err(&pdev->dev, "Missing DCS, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM) || pci_resource_len(pdev, 2) != 1 << 23) { - dev_err(&pdev->dev, "Missing UAR, aborting."); + dev_err(&pdev->dev, "Missing UAR, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } @@ -1004,25 +1005,18 @@ static int __devinit mthca_init_one(struct pci_dev *pdev, !pci_enable_msi(pdev)) mdev->mthca_flags |= MTHCA_FLAG_MSI; - sema_init(&mdev->cmd.hcr_sem, 1); - sema_init(&mdev->cmd.poll_sem, 1); - mdev->cmd.use_events = 0; - - mdev->hcr = ioremap(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE, MTHCA_HCR_SIZE); - if (!mdev->hcr) { - mthca_err(mdev, "Couldn't map command register, " - "aborting.\n"); - err = -ENOMEM; + if (mthca_cmd_init(mdev)) { + mthca_err(mdev, "Failed to init command interface, aborting.\n"); goto err_free_dev; } err = mthca_tune_pci(mdev); if (err) - goto err_iounmap; + goto err_cmd; err = mthca_init_hca(mdev); if (err) - goto err_iounmap; + goto err_cmd; if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) { mthca_warn(mdev, "HCA FW version %x.%x.%x is old (%x.%x.%x is current).\n", @@ -1070,8 +1064,8 @@ err_cleanup: err_close: mthca_close_hca(mdev); -err_iounmap: - iounmap(mdev->hcr); +err_cmd: + mthca_cmd_cleanup(mdev); err_free_dev: if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) @@ -1118,10 +1112,8 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev) iounmap(mdev->kar); mthca_uar_free(mdev, &mdev->driver_uar); mthca_cleanup_uar_table(mdev); - mthca_close_hca(mdev); - - iounmap(mdev->hcr); + mthca_cmd_cleanup(mdev); if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) pci_disable_msix(pdev); @@ -1163,7 +1155,7 @@ static struct pci_device_id mthca_pci_table[] = { MODULE_DEVICE_TABLE(pci, mthca_pci_table); static struct pci_driver mthca_driver = { - .name = "ib_mthca", + .name = DRV_NAME, .id_table = mthca_pci_table, .probe = mthca_init_one, .remove = __devexit_p(mthca_remove_one) diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c index 70a6553..5be7d94 100644 --- a/drivers/infiniband/hw/mthca/mthca_mcg.c +++ b/drivers/infiniband/hw/mthca/mthca_mcg.c @@ -66,22 +66,23 @@ static const u8 zero_gid[16]; /* automatically initialized to 0 */ * entry in hash chain and *mgm holds end of hash chain. */ static int find_mgm(struct mthca_dev *dev, - u8 *gid, struct mthca_mgm *mgm, + u8 *gid, struct mthca_mailbox *mgm_mailbox, u16 *hash, int *prev, int *index) { - void *mailbox; + struct mthca_mailbox *mailbox; + struct mthca_mgm *mgm = mgm_mailbox->buf; u8 *mgid; int err; u8 status; - mailbox = kmalloc(16 + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); - if (!mailbox) + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) return -ENOMEM; - mgid = MAILBOX_ALIGN(mailbox); + mgid = mailbox->buf; memcpy(mgid, gid, 16); - err = mthca_MGID_HASH(dev, mgid, hash, &status); + err = mthca_MGID_HASH(dev, mailbox, hash, &status); if (err) goto out; if (status) { @@ -103,7 +104,7 @@ static int find_mgm(struct mthca_dev *dev, *prev = -1; do { - err = mthca_READ_MGM(dev, *index, mgm, &status); + err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status); if (err) goto out; if (status) { @@ -129,14 +130,14 @@ static int find_mgm(struct mthca_dev *dev, *index = -1; out: - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); return err; } int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mthca_dev *dev = to_mdev(ibqp->device); - void *mailbox; + struct mthca_mailbox *mailbox; struct mthca_mgm *mgm; u16 hash; int index, prev; @@ -145,15 +146,15 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) int err; u8 status; - mailbox = kmalloc(sizeof *mgm + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); - if (!mailbox) - return -ENOMEM; - mgm = MAILBOX_ALIGN(mailbox); + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; if (down_interruptible(&dev->mcg_table.sem)) return -EINTR; - err = find_mgm(dev, gid->raw, mgm, &hash, &prev, &index); + err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); if (err) goto out; @@ -170,7 +171,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) goto out; } - err = mthca_READ_MGM(dev, index, mgm, &status); + err = mthca_READ_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { @@ -195,7 +196,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) goto out; } - err = mthca_WRITE_MGM(dev, index, mgm, &status); + err = mthca_WRITE_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { @@ -206,7 +207,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) if (!link) goto out; - err = mthca_READ_MGM(dev, prev, mgm, &status); + err = mthca_READ_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { @@ -217,7 +218,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) mgm->next_gid_index = cpu_to_be32(index << 5); - err = mthca_WRITE_MGM(dev, prev, mgm, &status); + err = mthca_WRITE_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { @@ -227,14 +228,14 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) out: up(&dev->mcg_table.sem); - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); return err; } int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mthca_dev *dev = to_mdev(ibqp->device); - void *mailbox; + struct mthca_mailbox *mailbox; struct mthca_mgm *mgm; u16 hash; int prev, index; @@ -242,15 +243,15 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) int err; u8 status; - mailbox = kmalloc(sizeof *mgm + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); - if (!mailbox) - return -ENOMEM; - mgm = MAILBOX_ALIGN(mailbox); + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; if (down_interruptible(&dev->mcg_table.sem)) return -EINTR; - err = find_mgm(dev, gid->raw, mgm, &hash, &prev, &index); + err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); if (err) goto out; @@ -285,7 +286,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) mgm->qp[loc] = mgm->qp[i - 1]; mgm->qp[i - 1] = 0; - err = mthca_WRITE_MGM(dev, index, mgm, &status); + err = mthca_WRITE_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { @@ -304,7 +305,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) if (be32_to_cpu(mgm->next_gid_index) >> 5) { err = mthca_READ_MGM(dev, be32_to_cpu(mgm->next_gid_index) >> 5, - mgm, &status); + mailbox, &status); if (err) goto out; if (status) { @@ -316,7 +317,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) } else memset(mgm->gid, 0, 16); - err = mthca_WRITE_MGM(dev, index, mgm, &status); + err = mthca_WRITE_MGM(dev, index, mailbox, &status); if (err) goto out; if (status) { @@ -327,7 +328,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) } else { /* Remove entry from AMGM */ index = be32_to_cpu(mgm->next_gid_index) >> 5; - err = mthca_READ_MGM(dev, prev, mgm, &status); + err = mthca_READ_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { @@ -338,7 +339,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) mgm->next_gid_index = cpu_to_be32(index << 5); - err = mthca_WRITE_MGM(dev, prev, mgm, &status); + err = mthca_WRITE_MGM(dev, prev, mailbox, &status); if (err) goto out; if (status) { @@ -350,7 +351,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) out: up(&dev->mcg_table.sem); - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); return err; } diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 637b30e..6d3b05d 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -179,9 +179,14 @@ out: void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) { - int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; + int i; u8 status; + if (!mthca_is_memfree(dev)) + return; + + i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; + down(&table->mutex); if (--table->icm[i]->refcount == 0) { @@ -256,6 +261,9 @@ void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, { int i; + if (!mthca_is_memfree(dev)) + return; + for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size) mthca_table_put(dev, table, i); } diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 8960fc2..cbe50fe 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -40,6 +40,12 @@ #include "mthca_cmd.h" #include "mthca_memfree.h" +struct mthca_mtt { + struct mthca_buddy *buddy; + int order; + u32 first_seg; +}; + /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ @@ -173,8 +179,8 @@ static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy) kfree(buddy->bits); } -static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order, - struct mthca_buddy *buddy) +static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order, + struct mthca_buddy *buddy) { u32 seg = mthca_buddy_alloc(buddy, order); @@ -191,14 +197,102 @@ static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order, return seg; } -static void mthca_free_mtt(struct mthca_dev *dev, u32 seg, int order, - struct mthca_buddy* buddy) +static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size, + struct mthca_buddy *buddy) { - mthca_buddy_free(buddy, seg, order); + struct mthca_mtt *mtt; + int i; - if (mthca_is_memfree(dev)) - mthca_table_put_range(dev, dev->mr_table.mtt_table, seg, - seg + (1 << order) - 1); + if (size <= 0) + return ERR_PTR(-EINVAL); + + mtt = kmalloc(sizeof *mtt, GFP_KERNEL); + if (!mtt) + return ERR_PTR(-ENOMEM); + + mtt->buddy = buddy; + mtt->order = 0; + for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1) + ++mtt->order; + + mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); + if (mtt->first_seg == -1) { + kfree(mtt); + return ERR_PTR(-ENOMEM); + } + + return mtt; +} + +struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size) +{ + return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy); +} + +void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt) +{ + if (!mtt) + return; + + mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order); + + mthca_table_put_range(dev, dev->mr_table.mtt_table, + mtt->first_seg, + mtt->first_seg + (1 << mtt->order) - 1); + + kfree(mtt); +} + +int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, + int start_index, u64 *buffer_list, int list_len) +{ + struct mthca_mailbox *mailbox; + u64 *mtt_entry; + int err = 0; + u8 status; + int i; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mtt_entry = mailbox->buf; + + while (list_len > 0) { + mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + + mtt->first_seg * MTHCA_MTT_SEG_SIZE + + start_index * 8); + mtt_entry[1] = 0; + for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) + mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] | + MTHCA_MTT_FLAG_PRESENT); + + /* + * If we have an odd number of entries to write, add + * one more dummy entry for firmware efficiency. + */ + if (i & 1) + mtt_entry[i + 2] = 0; + + err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status); + if (err) { + mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); + goto out; + } + if (status) { + mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n", + status); + err = -EINVAL; + goto out; + } + + list_len -= i; + start_index += i; + buffer_list += i; + } + +out: + mthca_free_mailbox(dev, mailbox); + return err; } static inline u32 tavor_hw_index_to_key(u32 ind) @@ -237,91 +331,18 @@ static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key) return tavor_key_to_hw_index(key); } -int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, - u32 access, struct mthca_mr *mr) +int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, + u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) { - void *mailbox = NULL; + struct mthca_mailbox *mailbox; struct mthca_mpt_entry *mpt_entry; u32 key; + int i; int err; u8 status; might_sleep(); - mr->order = -1; - key = mthca_alloc(&dev->mr_table.mpt_alloc); - if (key == -1) - return -ENOMEM; - mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); - - if (mthca_is_memfree(dev)) { - err = mthca_table_get(dev, dev->mr_table.mpt_table, key); - if (err) - goto err_out_mpt_free; - } - - mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA, - GFP_KERNEL); - if (!mailbox) { - err = -ENOMEM; - goto err_out_table; - } - mpt_entry = MAILBOX_ALIGN(mailbox); - - mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | - MTHCA_MPT_FLAG_MIO | - MTHCA_MPT_FLAG_PHYSICAL | - MTHCA_MPT_FLAG_REGION | - access); - mpt_entry->page_size = 0; - mpt_entry->key = cpu_to_be32(key); - mpt_entry->pd = cpu_to_be32(pd); - mpt_entry->start = 0; - mpt_entry->length = ~0ULL; - - memset(&mpt_entry->lkey, 0, - sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey)); - - err = mthca_SW2HW_MPT(dev, mpt_entry, - key & (dev->limits.num_mpts - 1), - &status); - if (err) { - mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); - goto err_out_table; - } else if (status) { - mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n", - status); - err = -EINVAL; - goto err_out_table; - } - - kfree(mailbox); - return err; - -err_out_table: - if (mthca_is_memfree(dev)) - mthca_table_put(dev, dev->mr_table.mpt_table, key); - -err_out_mpt_free: - mthca_free(&dev->mr_table.mpt_alloc, key); - kfree(mailbox); - return err; -} - -int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, - u64 *buffer_list, int buffer_size_shift, - int list_len, u64 iova, u64 total_size, - u32 access, struct mthca_mr *mr) -{ - void *mailbox; - u64 *mtt_entry; - struct mthca_mpt_entry *mpt_entry; - u32 key; - int err = -ENOMEM; - u8 status; - int i; - - might_sleep(); WARN_ON(buffer_size_shift >= 32); key = mthca_alloc(&dev->mr_table.mpt_alloc); @@ -335,75 +356,33 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, goto err_out_mpt_free; } - for (i = MTHCA_MTT_SEG_SIZE / 8, mr->order = 0; - i < list_len; - i <<= 1, ++mr->order) - ; /* nothing */ - - mr->first_seg = mthca_alloc_mtt(dev, mr->order, - &dev->mr_table.mtt_buddy); - if (mr->first_seg == -1) + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); goto err_out_table; - - /* - * If list_len is odd, we add one more dummy entry for - * firmware efficiency. - */ - mailbox = kmalloc(max(sizeof *mpt_entry, - (size_t) 8 * (list_len + (list_len & 1) + 2)) + - MTHCA_CMD_MAILBOX_EXTRA, - GFP_KERNEL); - if (!mailbox) - goto err_out_free_mtt; - - mtt_entry = MAILBOX_ALIGN(mailbox); - - mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + - mr->first_seg * MTHCA_MTT_SEG_SIZE); - mtt_entry[1] = 0; - for (i = 0; i < list_len; ++i) - mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] | - MTHCA_MTT_FLAG_PRESENT); - if (list_len & 1) { - mtt_entry[i + 2] = 0; - ++list_len; - } - - if (0) { - mthca_dbg(dev, "Dumping MPT entry\n"); - for (i = 0; i < list_len + 2; ++i) - printk(KERN_ERR "[%2d] %016llx\n", - i, (unsigned long long) be64_to_cpu(mtt_entry[i])); - } - - err = mthca_WRITE_MTT(dev, mtt_entry, list_len, &status); - if (err) { - mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); - goto err_out_mailbox_free; - } - if (status) { - mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n", - status); - err = -EINVAL; - goto err_out_mailbox_free; } - - mpt_entry = MAILBOX_ALIGN(mailbox); + mpt_entry = mailbox->buf; mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | MTHCA_MPT_FLAG_MIO | MTHCA_MPT_FLAG_REGION | access); + if (!mr->mtt) + mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL); mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12); mpt_entry->key = cpu_to_be32(key); mpt_entry->pd = cpu_to_be32(pd); mpt_entry->start = cpu_to_be64(iova); mpt_entry->length = cpu_to_be64(total_size); + memset(&mpt_entry->lkey, 0, sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey)); - mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + - mr->first_seg * MTHCA_MTT_SEG_SIZE); + + if (mr->mtt) + mpt_entry->mtt_seg = + cpu_to_be64(dev->mr_table.mtt_base + + mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE); if (0) { mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); @@ -416,45 +395,70 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, } } - err = mthca_SW2HW_MPT(dev, mpt_entry, + err = mthca_SW2HW_MPT(dev, mailbox, key & (dev->limits.num_mpts - 1), &status); - if (err) + if (err) { mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); - else if (status) { + goto err_out_mailbox; + } else if (status) { mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n", status); err = -EINVAL; + goto err_out_mailbox; } - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); return err; -err_out_mailbox_free: - kfree(mailbox); - -err_out_free_mtt: - mthca_free_mtt(dev, mr->first_seg, mr->order, &dev->mr_table.mtt_buddy); +err_out_mailbox: + mthca_free_mailbox(dev, mailbox); err_out_table: - if (mthca_is_memfree(dev)) - mthca_table_put(dev, dev->mr_table.mpt_table, key); + mthca_table_put(dev, dev->mr_table.mpt_table, key); err_out_mpt_free: mthca_free(&dev->mr_table.mpt_alloc, key); return err; } -/* Free mr or fmr */ -static void mthca_free_region(struct mthca_dev *dev, u32 lkey, int order, - u32 first_seg, struct mthca_buddy *buddy) +int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, + u32 access, struct mthca_mr *mr) { - if (order >= 0) - mthca_free_mtt(dev, first_seg, order, buddy); + mr->mtt = NULL; + return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); +} - if (mthca_is_memfree(dev)) - mthca_table_put(dev, dev->mr_table.mpt_table, - arbel_key_to_hw_index(lkey)); +int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, + u64 *buffer_list, int buffer_size_shift, + int list_len, u64 iova, u64 total_size, + u32 access, struct mthca_mr *mr) +{ + int err; + + mr->mtt = mthca_alloc_mtt(dev, list_len); + if (IS_ERR(mr->mtt)) + return PTR_ERR(mr->mtt); + + err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len); + if (err) { + mthca_free_mtt(dev, mr->mtt); + return err; + } + + err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova, + total_size, access, mr); + if (err) + mthca_free_mtt(dev, mr->mtt); + + return err; +} + +/* Free mr or fmr */ +static void mthca_free_region(struct mthca_dev *dev, u32 lkey) +{ + mthca_table_put(dev, dev->mr_table.mpt_table, + arbel_key_to_hw_index(lkey)); mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); } @@ -476,15 +480,15 @@ void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr) mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n", status); - mthca_free_region(dev, mr->ibmr.lkey, mr->order, mr->first_seg, - &dev->mr_table.mtt_buddy); + mthca_free_region(dev, mr->ibmr.lkey); + mthca_free_mtt(dev, mr->mtt); } int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, u32 access, struct mthca_fmr *mr) { struct mthca_mpt_entry *mpt_entry; - void *mailbox; + struct mthca_mailbox *mailbox; u64 mtt_seg; u32 key, idx; u8 status; @@ -522,31 +526,24 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base + sizeof *(mr->mem.tavor.mpt) * idx; - for (i = MTHCA_MTT_SEG_SIZE / 8, mr->order = 0; - i < list_len; - i <<= 1, ++mr->order) - ; /* nothing */ - - mr->first_seg = mthca_alloc_mtt(dev, mr->order, - dev->mr_table.fmr_mtt_buddy); - if (mr->first_seg == -1) + mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy); + if (IS_ERR(mr->mtt)) goto err_out_table; - mtt_seg = mr->first_seg * MTHCA_MTT_SEG_SIZE; + mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE; if (mthca_is_memfree(dev)) { mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, - mr->first_seg); + mr->mtt->first_seg); BUG_ON(!mr->mem.arbel.mtts); } else mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg; - mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA, - GFP_KERNEL); - if (!mailbox) + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) goto err_out_free_mtt; - mpt_entry = MAILBOX_ALIGN(mailbox); + mpt_entry = mailbox->buf; mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | MTHCA_MPT_FLAG_MIO | @@ -571,7 +568,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, } } - err = mthca_SW2HW_MPT(dev, mpt_entry, + err = mthca_SW2HW_MPT(dev, mailbox, key & (dev->limits.num_mpts - 1), &status); if (err) { @@ -585,19 +582,17 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, goto err_out_mailbox_free; } - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); return 0; err_out_mailbox_free: - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); err_out_free_mtt: - mthca_free_mtt(dev, mr->first_seg, mr->order, - dev->mr_table.fmr_mtt_buddy); + mthca_free_mtt(dev, mr->mtt); err_out_table: - if (mthca_is_memfree(dev)) - mthca_table_put(dev, dev->mr_table.mpt_table, key); + mthca_table_put(dev, dev->mr_table.mpt_table, key); err_out_mpt_free: mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey); @@ -609,8 +604,9 @@ int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr) if (fmr->maps) return -EBUSY; - mthca_free_region(dev, fmr->ibmr.lkey, fmr->order, fmr->first_seg, - dev->mr_table.fmr_mtt_buddy); + mthca_free_region(dev, fmr->ibmr.lkey); + mthca_free_mtt(dev, fmr->mtt); + return 0; } @@ -826,7 +822,8 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) if (dev->limits.reserved_mtts) { i = fls(dev->limits.reserved_mtts - 1); - if (mthca_alloc_mtt(dev, i, dev->mr_table.fmr_mtt_buddy) == -1) { + if (mthca_alloc_mtt_range(dev, i, + dev->mr_table.fmr_mtt_buddy) == -1) { mthca_warn(dev, "MTT table of order %d is too small.\n", dev->mr_table.fmr_mtt_buddy->max_order); err = -ENOMEM; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 159f4e6..0b5adfd 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -52,7 +53,7 @@ static int mthca_query_device(struct ib_device *ibdev, if (!in_mad || !out_mad) goto out; - memset(props, 0, sizeof props); + memset(props, 0, sizeof *props); props->fw_ver = mdev->fw_ver; @@ -558,6 +559,7 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, convert_access(acc), mr); if (err) { + kfree(page_list); kfree(mr); return ERR_PTR(err); } diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 619710f..4d976cc 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -54,18 +54,18 @@ struct mthca_uar { int index; }; +struct mthca_mtt; + struct mthca_mr { - struct ib_mr ibmr; - int order; - u32 first_seg; + struct ib_mr ibmr; + struct mthca_mtt *mtt; }; struct mthca_fmr { - struct ib_fmr ibmr; + struct ib_fmr ibmr; struct ib_fmr_attr attr; - int order; - u32 first_seg; - int maps; + struct mthca_mtt *mtt; + int maps; union { struct { struct mthca_mpt_entry __iomem *mpt; diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index ca73bab..163a8ef 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -357,6 +357,9 @@ static const struct { [UD] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY), + [UC] = (IB_QP_PKEY_INDEX | + IB_QP_PORT | + IB_QP_ACCESS_FLAGS), [RC] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), @@ -378,6 +381,9 @@ static const struct { [UD] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY), + [UC] = (IB_QP_PKEY_INDEX | + IB_QP_PORT | + IB_QP_ACCESS_FLAGS), [RC] = (IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS), @@ -388,6 +394,11 @@ static const struct { [IB_QPS_RTR] = { .trans = MTHCA_TRANS_INIT2RTR, .req_param = { + [UC] = (IB_QP_AV | + IB_QP_PATH_MTU | + IB_QP_DEST_QPN | + IB_QP_RQ_PSN | + IB_QP_MAX_DEST_RD_ATOMIC), [RC] = (IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | @@ -398,6 +409,9 @@ static const struct { .opt_param = { [UD] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), + [UC] = (IB_QP_ALT_PATH | + IB_QP_ACCESS_FLAGS | + IB_QP_PKEY_INDEX), [RC] = (IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX), @@ -413,6 +427,8 @@ static const struct { .trans = MTHCA_TRANS_RTR2RTS, .req_param = { [UD] = IB_QP_SQ_PSN, + [UC] = (IB_QP_SQ_PSN | + IB_QP_MAX_QP_RD_ATOMIC), [RC] = (IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | @@ -423,6 +439,11 @@ static const struct { .opt_param = { [UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), + [UC] = (IB_QP_CUR_STATE | + IB_QP_ALT_PATH | + IB_QP_ACCESS_FLAGS | + IB_QP_PKEY_INDEX | + IB_QP_PATH_MIG_STATE), [RC] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | @@ -442,6 +463,9 @@ static const struct { .opt_param = { [UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), + [UC] = (IB_QP_ACCESS_FLAGS | + IB_QP_ALT_PATH | + IB_QP_PATH_MIG_STATE), [RC] = (IB_QP_ACCESS_FLAGS | IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE | @@ -462,6 +486,10 @@ static const struct { .opt_param = { [UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), + [UC] = (IB_QP_CUR_STATE | + IB_QP_ALT_PATH | + IB_QP_ACCESS_FLAGS | + IB_QP_PATH_MIG_STATE), [RC] = (IB_QP_CUR_STATE | IB_QP_ALT_PATH | IB_QP_ACCESS_FLAGS | @@ -476,6 +504,14 @@ static const struct { .opt_param = { [UD] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), + [UC] = (IB_QP_AV | + IB_QP_MAX_QP_RD_ATOMIC | + IB_QP_MAX_DEST_RD_ATOMIC | + IB_QP_CUR_STATE | + IB_QP_ALT_PATH | + IB_QP_ACCESS_FLAGS | + IB_QP_PKEY_INDEX | + IB_QP_PATH_MIG_STATE), [RC] = (IB_QP_AV | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | @@ -501,6 +537,7 @@ static const struct { .opt_param = { [UD] = (IB_QP_CUR_STATE | IB_QP_QKEY), + [UC] = (IB_QP_CUR_STATE), [RC] = (IB_QP_CUR_STATE | IB_QP_MIN_RNR_TIMER), [MLX] = (IB_QP_CUR_STATE | @@ -552,7 +589,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; - void *mailbox = NULL; + struct mthca_mailbox *mailbox; struct mthca_qp_param *qp_param; struct mthca_qp_context *qp_context; u32 req_param, opt_param; @@ -609,10 +646,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) return -EINVAL; } - mailbox = kmalloc(sizeof (*qp_param) + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); - if (!mailbox) - return -ENOMEM; - qp_param = MAILBOX_ALIGN(mailbox); + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + qp_param = mailbox->buf; qp_context = &qp_param->context; memset(qp_param, 0, sizeof *qp_param); @@ -683,7 +720,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) if (attr_mask & IB_QP_AV) { qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f; qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid); - qp_context->pri_path.static_rate = (!!attr->ah_attr.static_rate) << 3; + qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate; if (attr->ah_attr.ah_flags & IB_AH_GRH) { qp_context->pri_path.g_mylmc |= 1 << 7; qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index; @@ -724,9 +761,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); } - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { - qp_context->params1 |= cpu_to_be32(min(attr->max_dest_rd_atomic ? - ffs(attr->max_dest_rd_atomic) - 1 : 0, + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ? + ffs(attr->max_rd_atomic) - 1 : 0, 7) << 21); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); } @@ -764,10 +801,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) qp->atomic_rd_en = attr->qp_access_flags; } - if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { u8 rra_max; - if (qp->resp_depth && !attr->max_rd_atomic) { + if (qp->resp_depth && !attr->max_dest_rd_atomic) { /* * Lowering our responder resources to zero. * Turn off RDMA/atomics as responder. @@ -778,7 +815,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) MTHCA_QP_OPTPAR_RAE); } - if (!qp->resp_depth && attr->max_rd_atomic) { + if (!qp->resp_depth && attr->max_dest_rd_atomic) { /* * Increasing our responder resources from * zero. Turn on RDMA/atomics as appropriate. @@ -799,7 +836,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) } for (rra_max = 0; - 1 << rra_max < attr->max_rd_atomic && + 1 << rra_max < attr->max_dest_rd_atomic && rra_max < dev->qp_table.rdb_shift; ++rra_max) ; /* nothing */ @@ -807,7 +844,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) qp_context->params2 |= cpu_to_be32(rra_max << 21); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); - qp->resp_depth = attr->max_rd_atomic; + qp->resp_depth = attr->max_dest_rd_atomic; } qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); @@ -835,7 +872,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) } err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, - qp->qpn, 0, qp_param, 0, &status); + qp->qpn, 0, mailbox, 0, &status); if (status) { mthca_warn(dev, "modify QP %d returned status %02x.\n", state_table[cur_state][new_state].trans, status); @@ -845,7 +882,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) if (!err) qp->state = new_state; - kfree(mailbox); + mthca_free_mailbox(dev, mailbox); if (is_sqp(dev, qp)) store_attrs(to_msqp(qp), attr, attr_mask); @@ -934,7 +971,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n", size, shift); - qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t); + qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size, + &t, GFP_KERNEL); if (!qp->queue.direct.buf) goto err_out; @@ -973,7 +1011,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, for (i = 0; i < npages; ++i) { qp->queue.page_list[i].buf = - pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t); + dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); if (!qp->queue.page_list[i].buf) goto err_out_free; @@ -996,16 +1035,15 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, err_out_free: if (qp->is_direct) { - pci_free_consistent(dev->pdev, size, - qp->queue.direct.buf, - pci_unmap_addr(&qp->queue.direct, mapping)); + dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf, + pci_unmap_addr(&qp->queue.direct, mapping)); } else for (i = 0; i < npages; ++i) { if (qp->queue.page_list[i].buf) - pci_free_consistent(dev->pdev, PAGE_SIZE, - qp->queue.page_list[i].buf, - pci_unmap_addr(&qp->queue.page_list[i], - mapping)); + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + qp->queue.page_list[i].buf, + pci_unmap_addr(&qp->queue.page_list[i], + mapping)); } @@ -1073,11 +1111,12 @@ static void mthca_free_memfree(struct mthca_dev *dev, if (mthca_is_memfree(dev)) { mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); - mthca_table_put(dev, dev->qp_table.rdb_table, - qp->qpn << dev->qp_table.rdb_shift); - mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); - mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); } + + mthca_table_put(dev, dev->qp_table.rdb_table, + qp->qpn << dev->qp_table.rdb_shift); + mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); + mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); } static void mthca_wq_init(struct mthca_wq* wq) @@ -1529,6 +1568,26 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; + case UC: + switch (wr->opcode) { + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cpu_to_be64(wr->wr.rdma.remote_addr); + ((struct mthca_raddr_seg *) wqe)->rkey = + cpu_to_be32(wr->wr.rdma.rkey); + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + case UD: ((struct mthca_tavor_ud_seg *) wqe)->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key); @@ -1814,9 +1873,29 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, sizeof (struct mthca_atomic_seg); break; + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cpu_to_be64(wr->wr.rdma.remote_addr); + ((struct mthca_raddr_seg *) wqe)->rkey = + cpu_to_be32(wr->wr.rdma.rkey); + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case UC: + switch (wr->opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: - case IB_WR_RDMA_READ: ((struct mthca_raddr_seg *) wqe)->raddr = cpu_to_be64(wr->wr.rdma.remote_addr); ((struct mthca_raddr_seg *) wqe)->rkey = |