diff options
Diffstat (limited to 'drivers/infiniband')
50 files changed, 470 insertions, 241 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 842cd0b..eff591d 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -40,7 +40,6 @@ #include <linux/err.h> #include <linux/idr.h> #include <linux/interrupt.h> -#include <linux/pci.h> #include <linux/random.h> #include <linux/rbtree.h> #include <linux/spinlock.h> diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 1d796e7..a06bcc6 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c @@ -43,6 +43,8 @@ #include "core_priv.h" +#define PFX "fmr_pool: " + enum { IB_FMR_MAX_REMAPS = 32, @@ -150,7 +152,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) #ifdef DEBUG if (fmr->ref_count !=0) { - printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d", + printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d", fmr, fmr->ref_count); } #endif @@ -168,7 +170,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) ret = ib_unmap_fmr(&fmr_list); if (ret) - printk(KERN_WARNING "ib_unmap_fmr returned %d", ret); + printk(KERN_WARNING PFX "ib_unmap_fmr returned %d", ret); spin_lock_irq(&pool->pool_lock); list_splice(&unmap_list, &pool->free_list); @@ -226,20 +228,20 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, device = pd->device; if (!device->alloc_fmr || !device->dealloc_fmr || !device->map_phys_fmr || !device->unmap_fmr) { - printk(KERN_WARNING "Device %s does not support fast memory regions", + printk(KERN_INFO PFX "Device %s does not support FMRs\n", device->name); return ERR_PTR(-ENOSYS); } attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) { - printk(KERN_WARNING "couldn't allocate device attr struct"); + printk(KERN_WARNING PFX "couldn't allocate device attr struct"); return ERR_PTR(-ENOMEM); } ret = ib_query_device(device, attr); if (ret) { - printk(KERN_WARNING "couldn't query device"); + printk(KERN_WARNING PFX "couldn't query device: %d", ret); kfree(attr); return ERR_PTR(ret); } @@ -253,7 +255,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, pool = kmalloc(sizeof *pool, GFP_KERNEL); if (!pool) { - printk(KERN_WARNING "couldn't allocate pool struct"); + printk(KERN_WARNING PFX "couldn't allocate pool struct"); return ERR_PTR(-ENOMEM); } @@ -270,7 +272,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, GFP_KERNEL); if (!pool->cache_bucket) { - printk(KERN_WARNING "Failed to allocate cache in pool"); + printk(KERN_WARNING PFX "Failed to allocate cache in pool"); ret = -ENOMEM; goto out_free_pool; } @@ -294,7 +296,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, "ib_fmr(%s)", device->name); if (IS_ERR(pool->thread)) { - printk(KERN_WARNING "couldn't start cleanup thread"); + printk(KERN_WARNING PFX "couldn't start cleanup thread"); ret = PTR_ERR(pool->thread); goto out_free_pool; } @@ -311,8 +313,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64), GFP_KERNEL); if (!fmr) { - printk(KERN_WARNING "failed to allocate fmr struct " - "for FMR %d", i); + printk(KERN_WARNING PFX "failed to allocate fmr " + "struct for FMR %d", i); goto out_fail; } @@ -323,7 +325,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr); if (IS_ERR(fmr->fmr)) { - printk(KERN_WARNING "fmr_create failed for FMR %d", i); + printk(KERN_WARNING PFX "fmr_create failed " + "for FMR %d", i); kfree(fmr); goto out_fail; } @@ -378,7 +381,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) } if (i < pool->pool_size) - printk(KERN_WARNING "pool still has %d regions registered", + printk(KERN_WARNING PFX "pool still has %d regions registered", pool->pool_size - i); kfree(pool->cache_bucket); @@ -463,8 +466,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, list_add(&fmr->list, &pool->free_list); spin_unlock_irqrestore(&pool->pool_lock, flags); - printk(KERN_WARNING "fmr_map returns %d\n", - result); + printk(KERN_WARNING PFX "fmr_map returns %d\n", result); return ERR_PTR(result); } @@ -516,7 +518,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) #ifdef DEBUG if (fmr->ref_count < 0) - printk(KERN_WARNING "FMR %p has ref count %d < 0", + printk(KERN_WARNING PFX "FMR %p has ref count %d < 0", fmr, fmr->ref_count); #endif diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 891d1fa..223b1aa 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -39,7 +39,6 @@ #include <linux/err.h> #include <linux/idr.h> #include <linux/interrupt.h> -#include <linux/pci.h> #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/workqueue.h> diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 6edfecf..85ccf13 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2771,7 +2771,7 @@ static int ib_mad_port_open(struct ib_device *device, cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; port_priv->cq = ib_create_cq(port_priv->device, ib_mad_thread_completion_handler, - NULL, port_priv, cq_size); + NULL, port_priv, cq_size, 0); if (IS_ERR(port_priv->cq)) { printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index de89717..9be5cc0 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -39,7 +39,6 @@ #include <linux/completion.h> #include <linux/err.h> -#include <linux/pci.h> #include <linux/workqueue.h> #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 4a579b3..1e13ab4 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -34,7 +34,6 @@ #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> -#include <linux/pci.h> #include <linux/bitops.h> #include <linux/random.h> diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 9a7eaad..6469406 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -40,7 +40,6 @@ #include <linux/random.h> #include <linux/spinlock.h> #include <linux/slab.h> -#include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/kref.h> #include <linux/idr.h> diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 8199b83..d97ded2 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -40,7 +40,6 @@ #include <linux/err.h> #include <linux/fs.h> #include <linux/cdev.h> -#include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/poll.h> #include <linux/rwsem.h> diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4fd75af..bab6676 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -802,6 +802,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, INIT_LIST_HEAD(&obj->async_list); cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, + cmd.comp_vector, file->ucontext, &udata); if (IS_ERR(cq)) { ret = PTR_ERR(cq); diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index f8bc822..d44e547 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -752,7 +752,7 @@ static void ib_uverbs_add_one(struct ib_device *device) spin_unlock(&map_lock); uverbs_dev->ib_dev = device; - uverbs_dev->num_comp_vectors = 1; + uverbs_dev->num_comp_vectors = device->num_comp_vectors; uverbs_dev->dev = cdev_alloc(); if (!uverbs_dev->dev) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index ccdf93d..86ed8af 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -609,11 +609,11 @@ EXPORT_SYMBOL(ib_destroy_qp); struct ib_cq *ib_create_cq(struct ib_device *device, ib_comp_handler comp_handler, void (*event_handler)(struct ib_event *, void *), - void *cq_context, int cqe) + void *cq_context, int cqe, int comp_vector) { struct ib_cq *cq; - cq = device->create_cq(device, cqe, NULL, NULL); + cq = device->create_cq(device, cqe, comp_vector, NULL, NULL); if (!IS_ERR(cq)) { cq->device = device; diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h index 04a9db5..fa58200 100644 --- a/drivers/infiniband/hw/amso1100/c2.h +++ b/drivers/infiniband/hw/amso1100/c2.h @@ -519,7 +519,7 @@ extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq); extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index); extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index); extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); -extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); +extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); /* CM */ extern int c2_llp_connect(struct iw_cm_id *cm_id, diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c index 5175c99..d2b3366 100644 --- a/drivers/infiniband/hw/amso1100/c2_cq.c +++ b/drivers/infiniband/hw/amso1100/c2_cq.c @@ -217,17 +217,19 @@ int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) return npolled; } -int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) { struct c2_mq_shared __iomem *shared; struct c2_cq *cq; + unsigned long flags; + int ret = 0; cq = to_c2cq(ibcq); shared = cq->mq.peer; - if (notify == IB_CQ_NEXT_COMP) + if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP) writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type); - else if (notify == IB_CQ_SOLICITED) + else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type); else return -EINVAL; @@ -241,7 +243,13 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) */ readb(&shared->armed); - return 0; + if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) { + spin_lock_irqsave(&cq->lock, flags); + ret = !c2_mq_empty(&cq->mq); + spin_unlock_irqrestore(&cq->lock, flags); + } + + return ret; } static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 607c09b..1091662 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -290,7 +290,7 @@ static int c2_destroy_qp(struct ib_qp *ib_qp) return 0; } -static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, +static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *context, struct ib_udata *udata) { @@ -795,6 +795,7 @@ int c2_register_device(struct c2_dev *dev) memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); dev->ibdev.phys_port_cnt = 1; + dev->ibdev.num_comp_vectors = 1; dev->ibdev.dma_device = &dev->pcidev->dev; dev->ibdev.query_device = c2_query_device; dev->ibdev.query_port = c2_query_port; diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index f5e9aee..76049af 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -114,7 +114,10 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, return -EIO; } } + + return 1; } + return 0; } diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index 90d7b89..ff7290e 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h @@ -38,6 +38,7 @@ #include "firmware_exports.h" #define T3_MAX_SGE 4 +#define T3_MAX_INLINE 64 #define Q_EMPTY(rptr,wptr) ((rptr)==(wptr)) #define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \ diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 3b4b0ac..b2faff5 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1109,6 +1109,15 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) PDBG("%s ep %p\n", __FUNCTION__, ep); + /* + * We get 2 abort replies from the HW. The first one must + * be ignored except for scribbling that we need one more. + */ + if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) { + ep->flags |= ABORT_REQ_IN_PROGRESS; + return CPL_RET_BUF_DONE; + } + close_complete_upcall(ep); state_set(&ep->com, DEAD); release_ep_resources(ep); @@ -1189,6 +1198,7 @@ static int listen_stop(struct iwch_listen_ep *ep) } req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + req->cpu_idx = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); skb->priority = 1; ep->com.tdev->send(ep->com.tdev, skb); @@ -1475,6 +1485,15 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) int ret; int state; + /* + * We get 2 peer aborts from the HW. The first one must + * be ignored except for scribbling that we need one more. + */ + if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) { + ep->flags |= PEER_ABORT_IN_PROGRESS; + return CPL_RET_BUF_DONE; + } + if (is_neg_adv_abort(req->status)) { PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h index 0c6f281..21a388c 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.h +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h @@ -143,6 +143,11 @@ enum iwch_ep_state { DEAD, }; +enum iwch_ep_flags { + PEER_ABORT_IN_PROGRESS = (1 << 0), + ABORT_REQ_IN_PROGRESS = (1 << 1), +}; + struct iwch_ep_common { struct iw_cm_id *cm_id; struct iwch_qp *qp; @@ -181,6 +186,7 @@ struct iwch_ep { u16 plen; u32 ird; u32 ord; + u32 flags; }; static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index af28a31..a891493 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -139,7 +139,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq) return 0; } -static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, +static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { @@ -292,7 +292,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) #endif } -static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct iwch_dev *rhp; struct iwch_cq *chp; @@ -303,7 +303,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) chp = to_iwch_cq(ibcq); rhp = chp->rhp; - if (notify == IB_CQ_SOLICITED) + if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) cq_op = CQ_ARM_SE; else cq_op = CQ_ARM_AN; @@ -317,9 +317,11 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); spin_unlock_irqrestore(&chp->lock, flag); - if (err) + if (err < 0) printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, chp->cq.cqid); + if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) + err = 0; return err; } @@ -780,6 +782,9 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, if (rqsize > T3_MAX_RQ_SIZE) return ERR_PTR(-EINVAL); + if (attrs->cap.max_inline_data > T3_MAX_INLINE) + return ERR_PTR(-EINVAL); + /* * NOTE: The SQ and total WQ sizes don't need to be * a power of two. However, all the code assumes @@ -1107,6 +1112,7 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.node_type = RDMA_NODE_RNIC; memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; + dev->ibdev.num_comp_vectors = 1; dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); dev->ibdev.query_device = iwch_query_device; dev->ibdev.query_port = iwch_query_port; diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 0a472c9..714dddb 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -471,43 +471,62 @@ int iwch_bind_mw(struct ib_qp *qp, return err; } -static void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, int tagged) +static inline void build_term_codes(struct respQ_msg_t *rsp_msg, + u8 *layer_type, u8 *ecode) { - switch (t3err) { + int status = TPT_ERR_INTERNAL_ERR; + int tagged = 0; + int opcode = -1; + int rqtype = 0; + int send_inv = 0; + + if (rsp_msg) { + status = CQE_STATUS(rsp_msg->cqe); + opcode = CQE_OPCODE(rsp_msg->cqe); + rqtype = RQ_TYPE(rsp_msg->cqe); + send_inv = (opcode == T3_SEND_WITH_INV) || + (opcode == T3_SEND_WITH_SE_INV); + tagged = (opcode == T3_RDMA_WRITE) || + (rqtype && (opcode == T3_READ_RESP)); + } + + switch (status) { case TPT_ERR_STAG: - if (tagged == 1) { - *layer_type = LAYER_DDP|DDP_TAGGED_ERR; - *ecode = DDPT_INV_STAG; - } else if (tagged == 2) { + if (send_inv) { + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; + *ecode = RDMAP_CANT_INV_STAG; + } else { *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_INV_STAG; } break; case TPT_ERR_PDID: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + if ((opcode == T3_SEND_WITH_INV) || + (opcode == T3_SEND_WITH_SE_INV)) + *ecode = RDMAP_CANT_INV_STAG; + else + *ecode = RDMAP_STAG_NOT_ASSOC; + break; case TPT_ERR_QPID: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + *ecode = RDMAP_STAG_NOT_ASSOC; + break; case TPT_ERR_ACCESS: - if (tagged == 1) { - *layer_type = LAYER_DDP|DDP_TAGGED_ERR; - *ecode = DDPT_STAG_NOT_ASSOC; - } else if (tagged == 2) { - *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; - *ecode = RDMAP_STAG_NOT_ASSOC; - } + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + *ecode = RDMAP_ACC_VIOL; break; case TPT_ERR_WRAP: *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_TO_WRAP; break; case TPT_ERR_BOUND: - if (tagged == 1) { + if (tagged) { *layer_type = LAYER_DDP|DDP_TAGGED_ERR; *ecode = DDPT_BASE_BOUNDS; - } else if (tagged == 2) { + } else { *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; *ecode = RDMAP_BASE_BOUNDS; - } else { - *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; - *ecode = DDPU_MSG_TOOBIG; } break; case TPT_ERR_INVALIDATE_SHARED_MR: @@ -591,8 +610,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) { union t3_wr *wqe; struct terminate_message *term; - int status; - int tagged = 0; struct sk_buff *skb; PDBG("%s %d\n", __FUNCTION__, __LINE__); @@ -610,17 +627,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) /* immediate data starts here. */ term = (struct terminate_message *)wqe->send.sgl; - if (rsp_msg) { - status = CQE_STATUS(rsp_msg->cqe); - if (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE) - tagged = 1; - if ((CQE_OPCODE(rsp_msg->cqe) == T3_READ_REQ) || - (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) - tagged = 2; - } else { - status = TPT_ERR_INTERNAL_ERR; - } - build_term_codes(status, &term->layer_etype, &term->ecode, tagged); + build_term_codes(rsp_msg, &term->layer_etype, &term->ecode); build_fw_riwrh((void *)wqe, T3_WR_SEND, T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 1, qhp->ep->hwtid, 5); diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index e2cdc1a..67f0670 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c @@ -113,7 +113,7 @@ struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) return ret; } -struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, +struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index 95fd59f..e14b029 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h @@ -123,7 +123,7 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq); void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq); -struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, +struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata); @@ -135,7 +135,7 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); int ehca_peek_cq(struct ib_cq *cq, int wc_cnt); -int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify); +int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags); struct ib_qp *ehca_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 4700085..2d37054 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -313,6 +313,7 @@ int ehca_init_device(struct ehca_shca *shca) shca->ib_device.node_type = RDMA_NODE_IB_CA; shca->ib_device.phys_port_cnt = shca->num_ports; + shca->ib_device.num_comp_vectors = 1; shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev; shca->ib_device.query_device = ehca_query_device; shca->ib_device.query_port = ehca_query_port; @@ -375,7 +376,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) return -EPERM; } - ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10); + ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0); if (IS_ERR(ibcq)) { ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); return PTR_ERR(ibcq); diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 08d3f89..caec9de 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c @@ -634,11 +634,13 @@ poll_cq_exit0: return ret; } -int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify) +int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags) { struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); + unsigned long spl_flags; + int ret = 0; - switch (cq_notify) { + switch (notify_flags & IB_CQ_SOLICITED_MASK) { case IB_CQ_SOLICITED: hipz_set_cqx_n0(my_cq, 1); break; @@ -649,5 +651,11 @@ int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify) return -EINVAL; } - return 0; + if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) { + spin_lock_irqsave(&my_cq->spinlock, spl_flags); + ret = ipz_qeit_is_valid(&my_cq->ipz_queue); + spin_unlock_irqrestore(&my_cq->spinlock, spl_flags); + } + + return ret; } diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h index 8199c45..57f141a 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h @@ -140,6 +140,14 @@ static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue) return cqe; } +static inline int ipz_qeit_is_valid(struct ipz_queue *queue) +{ + struct ehca_cqe *cqe = ipz_qeit_get(queue); + u32 cqe_flags = cqe->cqe_flags; + + return cqe_flags >> 7 == (queue->toggle_state & 1); +} + /* * returns and resets Queue Entry iterator * returns address (kv) of first Queue Entry diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index ea78e6d..3e9241b 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c @@ -204,7 +204,7 @@ static void send_complete(unsigned long data) * * Called by ib_create_cq() in the generic verbs code. */ -struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, +struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { @@ -243,33 +243,21 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, * See ipath_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { - struct ipath_mmap_info *ip; - __u64 offset = (__u64) wc; int err; + u32 s = sizeof *wc + sizeof(struct ib_wc) * entries; - err = ib_copy_to_udata(udata, &offset, sizeof(offset)); - if (err) { - ret = ERR_PTR(err); + cq->ip = ipath_create_mmap_info(dev, s, context, wc); + if (!cq->ip) { + ret = ERR_PTR(-ENOMEM); goto bail_wc; } - /* Allocate info for ipath_mmap(). */ - ip = kmalloc(sizeof(*ip), GFP_KERNEL); - if (!ip) { - ret = ERR_PTR(-ENOMEM); - goto bail_wc; + err = ib_copy_to_udata(udata, &cq->ip->offset, + sizeof(cq->ip->offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_ip; } - cq->ip = ip; - ip->context = context; - ip->obj = wc; - kref_init(&ip->ref); - ip->mmap_cnt = 0; - ip->size = PAGE_ALIGN(sizeof(*wc) + - sizeof(struct ib_wc) * entries); - spin_lock_irq(&dev->pending_lock); - ip->next = dev->pending_mmaps; - dev->pending_mmaps = ip; - spin_unlock_irq(&dev->pending_lock); } else cq->ip = NULL; @@ -277,12 +265,18 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, if (dev->n_cqs_allocated == ib_ipath_max_cqs) { spin_unlock(&dev->n_cqs_lock); ret = ERR_PTR(-ENOMEM); - goto bail_wc; + goto bail_ip; } dev->n_cqs_allocated++; spin_unlock(&dev->n_cqs_lock); + if (cq->ip) { + spin_lock_irq(&dev->pending_lock); + list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + /* * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. * The number of entries should be >= the number requested or return @@ -301,12 +295,12 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, goto done; +bail_ip: + kfree(cq->ip); bail_wc: vfree(wc); - bail_cq: kfree(cq); - done: return ret; } @@ -340,17 +334,18 @@ int ipath_destroy_cq(struct ib_cq *ibcq) /** * ipath_req_notify_cq - change the notification type for a completion queue * @ibcq: the completion queue - * @notify: the type of notification to request + * @notify_flags: the type of notification to request * * Returns 0 for success. * * This may be called from interrupt context. Also called by * ib_req_notify_cq() in the generic verbs code. */ -int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) { struct ipath_cq *cq = to_icq(ibcq); unsigned long flags; + int ret = 0; spin_lock_irqsave(&cq->lock, flags); /* @@ -358,9 +353,15 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). */ if (cq->notify != IB_CQ_NEXT_COMP) - cq->notify = notify; + cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; + + if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && + cq->queue->head != cq->queue->tail) + ret = 1; + spin_unlock_irqrestore(&cq->lock, flags); - return 0; + + return ret; } /** @@ -443,13 +444,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) if (cq->ip) { struct ipath_ibdev *dev = to_idev(ibcq->device); struct ipath_mmap_info *ip = cq->ip; + u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe; - ip->obj = wc; - ip->size = PAGE_ALIGN(sizeof(*wc) + - sizeof(struct ib_wc) * cqe); + ipath_update_mmap_info(dev, ip, s, wc); spin_lock_irq(&dev->pending_lock); - ip->next = dev->pending_mmaps; - dev->pending_mmaps = ip; + if (list_empty(&ip->pending_mmaps)) + list_add(&ip->pending_mmaps, &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index ed55979..036ed1e 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -38,7 +38,6 @@ #include <linux/pagemap.h> #include <linux/init.h> #include <linux/namei.h> -#include <linux/pci.h> #include "ipath_kernel.h" diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c index e46aa4e..05a1d2b 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.c +++ b/drivers/infiniband/hw/ipath/ipath_layer.c @@ -37,7 +37,6 @@ */ #include <linux/io.h> -#include <linux/pci.h> #include <asm/byteorder.h> #include "ipath_kernel.h" diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c index a82157d..937bc33 100644 --- a/drivers/infiniband/hw/ipath/ipath_mmap.c +++ b/drivers/infiniband/hw/ipath/ipath_mmap.c @@ -46,6 +46,11 @@ void ipath_release_mmap_info(struct kref *ref) { struct ipath_mmap_info *ip = container_of(ref, struct ipath_mmap_info, ref); + struct ipath_ibdev *dev = to_idev(ip->context->device); + + spin_lock_irq(&dev->pending_lock); + list_del(&ip->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); vfree(ip->obj); kfree(ip); @@ -60,14 +65,12 @@ static void ipath_vma_open(struct vm_area_struct *vma) struct ipath_mmap_info *ip = vma->vm_private_data; kref_get(&ip->ref); - ip->mmap_cnt++; } static void ipath_vma_close(struct vm_area_struct *vma) { struct ipath_mmap_info *ip = vma->vm_private_data; - ip->mmap_cnt--; kref_put(&ip->ref, ipath_release_mmap_info); } @@ -87,7 +90,7 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) struct ipath_ibdev *dev = to_idev(context->device); unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; - struct ipath_mmap_info *ip, **pp; + struct ipath_mmap_info *ip, *pp; int ret = -EINVAL; /* @@ -96,15 +99,16 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) * CQ, QP, or SRQ is soon followed by a call to mmap(). */ spin_lock_irq(&dev->pending_lock); - for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) { + list_for_each_entry_safe(ip, pp, &dev->pending_mmaps, + pending_mmaps) { /* Only the creator is allowed to mmap the object */ - if (context != ip->context || (void *) offset != ip->obj) + if (context != ip->context || (__u64) offset != ip->offset) continue; /* Don't allow a mmap larger than the object. */ if (size > ip->size) break; - *pp = ip->next; + list_del_init(&ip->pending_mmaps); spin_unlock_irq(&dev->pending_lock); ret = remap_vmalloc_range(vma, ip->obj, 0); @@ -119,3 +123,51 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) done: return ret; } + +/* + * Allocate information for ipath_mmap + */ +struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev, + u32 size, + struct ib_ucontext *context, + void *obj) { + struct ipath_mmap_info *ip; + + ip = kmalloc(sizeof *ip, GFP_KERNEL); + if (!ip) + goto bail; + + size = PAGE_ALIGN(size); + + spin_lock_irq(&dev->mmap_offset_lock); + if (dev->mmap_offset == 0) + dev->mmap_offset = PAGE_SIZE; + ip->offset = dev->mmap_offset; + dev->mmap_offset += size; + spin_unlock_irq(&dev->mmap_offset_lock); + + INIT_LIST_HEAD(&ip->pending_mmaps); + ip->size = size; + ip->context = context; + ip->obj = obj; + kref_init(&ip->ref); + +bail: + return ip; +} + +void ipath_update_mmap_info(struct ipath_ibdev *dev, + struct ipath_mmap_info *ip, + u32 size, void *obj) { + size = PAGE_ALIGN(size); + + spin_lock_irq(&dev->mmap_offset_lock); + if (dev->mmap_offset == 0) + dev->mmap_offset = PAGE_SIZE; + ip->offset = dev->mmap_offset; + dev->mmap_offset += size; + spin_unlock_irq(&dev->mmap_offset_lock); + + ip->size = size; + ip->obj = obj; +} diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 16db9ac..bfef08e 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c @@ -844,34 +844,36 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, * See ipath_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { - struct ipath_mmap_info *ip; - __u64 offset = (__u64) qp->r_rq.wq; int err; - err = ib_copy_to_udata(udata, &offset, sizeof(offset)); - if (err) { - ret = ERR_PTR(err); - goto bail_rwq; - } + if (!qp->r_rq.wq) { + __u64 offset = 0; - if (qp->r_rq.wq) { - /* Allocate info for ipath_mmap(). */ - ip = kmalloc(sizeof(*ip), GFP_KERNEL); - if (!ip) { + err = ib_copy_to_udata(udata, &offset, + sizeof(offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_rwq; + } + } else { + u32 s = sizeof(struct ipath_rwq) + + qp->r_rq.size * sz; + + qp->ip = + ipath_create_mmap_info(dev, s, + ibpd->uobject->context, + qp->r_rq.wq); + if (!qp->ip) { ret = ERR_PTR(-ENOMEM); goto bail_rwq; } - qp->ip = ip; - ip->context = ibpd->uobject->context; - ip->obj = qp->r_rq.wq; - kref_init(&ip->ref); - ip->mmap_cnt = 0; - ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + - qp->r_rq.size * sz); - spin_lock_irq(&dev->pending_lock); - ip->next = dev->pending_mmaps; - dev->pending_mmaps = ip; - spin_unlock_irq(&dev->pending_lock); + + err = ib_copy_to_udata(udata, &(qp->ip->offset), + sizeof(qp->ip->offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_ip; + } } } @@ -885,6 +887,12 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, dev->n_qps_allocated++; spin_unlock(&dev->n_qps_lock); + if (qp->ip) { + spin_lock_irq(&dev->pending_lock); + list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + ret = &qp->ibqp; goto bail; diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index b4b88d0..1915771 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c @@ -98,13 +98,21 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, case OP(RDMA_READ_RESPONSE_LAST): case OP(RDMA_READ_RESPONSE_ONLY): case OP(ATOMIC_ACKNOWLEDGE): - qp->s_ack_state = OP(ACKNOWLEDGE); + /* + * We can increment the tail pointer now that the last + * response has been sent instead of only being + * constructed. + */ + if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) + qp->s_tail_ack_queue = 0; /* FALLTHROUGH */ + case OP(SEND_ONLY): case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { if (qp->s_flags & IPATH_S_ACK_PENDING) goto normal; + qp->s_ack_state = OP(ACKNOWLEDGE); goto bail; } @@ -117,12 +125,8 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, if (len > pmtu) { len = pmtu; qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); - } else { + } else qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); - if (++qp->s_tail_ack_queue > - IPATH_MAX_RDMA_ATOMIC) - qp->s_tail_ack_queue = 0; - } ohdr->u.aeth = ipath_compute_aeth(qp); hwords++; qp->s_ack_rdma_psn = e->psn; @@ -139,8 +143,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, cpu_to_be32(e->atomic_data); hwords += sizeof(ohdr->u.at) / sizeof(u32); bth2 = e->psn; - if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) - qp->s_tail_ack_queue = 0; } bth0 = qp->s_ack_state << 24; break; @@ -156,8 +158,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, ohdr->u.aeth = ipath_compute_aeth(qp); hwords++; qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); - if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) - qp->s_tail_ack_queue = 0; } bth0 = qp->s_ack_state << 24; bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; @@ -171,7 +171,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, * the ACK before setting s_ack_state to ACKNOWLEDGE * (see above). */ - qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); + qp->s_ack_state = OP(SEND_ONLY); qp->s_flags &= ~IPATH_S_ACK_PENDING; qp->s_cur_sge = NULL; if (qp->s_nak_state) @@ -223,23 +223,18 @@ int ipath_make_rc_req(struct ipath_qp *qp, /* Sending responses has higher priority over sending requests. */ if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || (qp->s_flags & IPATH_S_ACK_PENDING) || - qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) && + qp->s_ack_state != OP(ACKNOWLEDGE)) && ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) goto done; if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || - qp->s_rnr_timeout) + qp->s_rnr_timeout || qp->s_wait_credit) goto bail; /* Limit the number of packets sent without an ACK. */ if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { qp->s_wait_credit = 1; dev->n_rc_stalls++; - spin_lock(&dev->pending_lock); - if (list_empty(&qp->timerwait)) - list_add_tail(&qp->timerwait, - &dev->pending[dev->pending_index]); - spin_unlock(&dev->pending_lock); goto bail; } @@ -587,9 +582,12 @@ static void send_rc_ack(struct ipath_qp *qp) u32 hwords; struct ipath_ib_header hdr; struct ipath_other_headers *ohdr; + unsigned long flags; /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ - if (qp->r_head_ack_queue != qp->s_tail_ack_queue) + if (qp->r_head_ack_queue != qp->s_tail_ack_queue || + (qp->s_flags & IPATH_S_ACK_PENDING) || + qp->s_ack_state != OP(ACKNOWLEDGE)) goto queue_ack; /* Construct the header. */ @@ -640,11 +638,11 @@ static void send_rc_ack(struct ipath_qp *qp) dev->n_rc_qacks++; queue_ack: - spin_lock_irq(&qp->s_lock); + spin_lock_irqsave(&qp->s_lock, flags); qp->s_flags |= IPATH_S_ACK_PENDING; qp->s_nak_state = qp->r_nak_state; qp->s_ack_psn = qp->r_ack_psn; - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); /* Call ipath_do_rc_send() in another thread. */ tasklet_hi_schedule(&qp->s_task); @@ -1261,6 +1259,7 @@ ack_err: wc.dlid_path_bits = 0; wc.port_num = 0; ipath_sqerror_qp(qp, &wc); + spin_unlock_irqrestore(&qp->s_lock, flags); bail: return; } @@ -1294,6 +1293,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, struct ipath_ack_entry *e; u8 i, prev; int old_req; + unsigned long flags; if (diff > 0) { /* @@ -1327,7 +1327,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, psn &= IPATH_PSN_MASK; e = NULL; old_req = 1; - spin_lock_irq(&qp->s_lock); + spin_lock_irqsave(&qp->s_lock, flags); for (i = qp->r_head_ack_queue; ; i = prev) { if (i == qp->s_tail_ack_queue) old_req = 0; @@ -1425,7 +1425,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, * after all the previous RDMA reads and atomics. */ if (i == qp->r_head_ack_queue) { - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); qp->r_nak_state = 0; qp->r_ack_psn = qp->r_psn - 1; goto send_ack; @@ -1439,11 +1439,10 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, break; } qp->r_nak_state = 0; - spin_unlock_irq(&qp->s_lock); tasklet_hi_schedule(&qp->s_task); unlock_done: - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); done: return 1; @@ -1453,10 +1452,12 @@ send_ack: static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) { - spin_lock_irq(&qp->s_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->s_lock, flags); qp->state = IB_QPS_ERR; ipath_error_qp(qp, err); - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); } /** diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c index 9403350..03acae6 100644 --- a/drivers/infiniband/hw/ipath/ipath_srq.c +++ b/drivers/infiniband/hw/ipath/ipath_srq.c @@ -139,33 +139,24 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, * See ipath_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { - struct ipath_mmap_info *ip; - __u64 offset = (__u64) srq->rq.wq; int err; + u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz; - err = ib_copy_to_udata(udata, &offset, sizeof(offset)); - if (err) { - ret = ERR_PTR(err); + srq->ip = + ipath_create_mmap_info(dev, s, + ibpd->uobject->context, + srq->rq.wq); + if (!srq->ip) { + ret = ERR_PTR(-ENOMEM); goto bail_wq; } - /* Allocate info for ipath_mmap(). */ - ip = kmalloc(sizeof(*ip), GFP_KERNEL); - if (!ip) { - ret = ERR_PTR(-ENOMEM); - goto bail_wq; + err = ib_copy_to_udata(udata, &srq->ip->offset, + sizeof(srq->ip->offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_ip; } - srq->ip = ip; - ip->context = ibpd->uobject->context; - ip->obj = srq->rq.wq; - kref_init(&ip->ref); - ip->mmap_cnt = 0; - ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + - srq->rq.size * sz); - spin_lock_irq(&dev->pending_lock); - ip->next = dev->pending_mmaps; - dev->pending_mmaps = ip; - spin_unlock_irq(&dev->pending_lock); } else srq->ip = NULL; @@ -181,21 +172,27 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, if (dev->n_srqs_allocated == ib_ipath_max_srqs) { spin_unlock(&dev->n_srqs_lock); ret = ERR_PTR(-ENOMEM); - goto bail_wq; + goto bail_ip; } dev->n_srqs_allocated++; spin_unlock(&dev->n_srqs_lock); + if (srq->ip) { + spin_lock_irq(&dev->pending_lock); + list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + ret = &srq->ibsrq; goto done; +bail_ip: + kfree(srq->ip); bail_wq: vfree(srq->rq.wq); - bail_srq: kfree(srq); - done: return ret; } @@ -312,13 +309,13 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, if (srq->ip) { struct ipath_mmap_info *ip = srq->ip; struct ipath_ibdev *dev = to_idev(srq->ibsrq.device); + u32 s = sizeof(struct ipath_rwq) + size * sz; - ip->obj = wq; - ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + - size * sz); + ipath_update_mmap_info(dev, ip, s, wq); spin_lock_irq(&dev->pending_lock); - ip->next = dev->pending_mmaps; - dev->pending_mmaps = ip; + if (list_empty(&ip->pending_mmaps)) + list_add(&ip->pending_mmaps, + &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } } else if (attr_mask & IB_SRQ_LIMIT) { diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c index 9307f71..d8b5e4c 100644 --- a/drivers/infiniband/hw/ipath/ipath_stats.c +++ b/drivers/infiniband/hw/ipath/ipath_stats.c @@ -31,8 +31,6 @@ * SOFTWARE. */ -#include <linux/pci.h> - #include "ipath_kernel.h" struct infinipath_stats ipath_stats; diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c index ffa6318..4dc398d 100644 --- a/drivers/infiniband/hw/ipath/ipath_sysfs.c +++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c @@ -32,7 +32,6 @@ */ #include <linux/ctype.h> -#include <linux/pci.h> #include "ipath_kernel.h" #include "ipath_common.h" diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 18c6df2..12933e7 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1476,7 +1476,10 @@ int ipath_register_ib_device(struct ipath_devdata *dd) ret = -ENOMEM; goto err_lk; } + INIT_LIST_HEAD(&idev->pending_mmaps); spin_lock_init(&idev->pending_lock); + idev->mmap_offset = PAGE_SIZE; + spin_lock_init(&idev->mmap_offset_lock); INIT_LIST_HEAD(&idev->pending[0]); INIT_LIST_HEAD(&idev->pending[1]); INIT_LIST_HEAD(&idev->pending[2]); @@ -1558,6 +1561,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; + dev->num_comp_vectors = 1; dev->dma_device = &dd->pcidev->dev; dev->query_device = ipath_query_device; dev->modify_device = ipath_modify_device; diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index 7c4929f..7064fc2 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h @@ -173,12 +173,12 @@ struct ipath_ah { * this as its vm_private_data. */ struct ipath_mmap_info { - struct ipath_mmap_info *next; + struct list_head pending_mmaps; struct ib_ucontext *context; void *obj; + __u64 offset; struct kref ref; unsigned size; - unsigned mmap_cnt; }; /* @@ -422,7 +422,7 @@ struct ipath_qp { #define IPATH_S_RDMAR_PENDING 0x04 #define IPATH_S_ACK_PENDING 0x08 -#define IPATH_PSN_CREDIT 2048 +#define IPATH_PSN_CREDIT 512 /* * Since struct ipath_swqe is not a fixed size, we can't simply index into @@ -485,9 +485,10 @@ struct ipath_opcode_stats { struct ipath_ibdev { struct ib_device ibdev; - struct list_head dev_list; struct ipath_devdata *dd; - struct ipath_mmap_info *pending_mmaps; + struct list_head pending_mmaps; + spinlock_t mmap_offset_lock; + u32 mmap_offset; int ib_unit; /* This is the device number */ u16 sm_lid; /* in host order */ u8 sm_sl; @@ -734,13 +735,13 @@ int ipath_destroy_srq(struct ib_srq *ibsrq); int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); -struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, +struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata); int ipath_destroy_cq(struct ib_cq *ibcq); -int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); +int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); @@ -768,6 +769,15 @@ int ipath_dealloc_fmr(struct ib_fmr *ibfmr); void ipath_release_mmap_info(struct kref *ref); +struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev, + u32 size, + struct ib_ucontext *context, + void *obj); + +void ipath_update_mmap_info(struct ipath_ibdev *dev, + struct ipath_mmap_info *ip, + u32 size, void *obj); + int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index efd79ef..cf0868f 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -726,11 +726,12 @@ repoll: return err == 0 || err == -EAGAIN ? npolled : err; } -int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) +int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags) { __be32 doorbell[2]; - doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? + doorbell[0] = cpu_to_be32(((flags & IB_CQ_SOLICITED_MASK) == + IB_CQ_SOLICITED ? MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : MTHCA_TAVOR_CQ_DB_REQ_NOT) | to_mcq(cq)->cqn); @@ -743,7 +744,7 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) return 0; } -int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct mthca_cq *cq = to_mcq(ibcq); __be32 doorbell[2]; @@ -755,7 +756,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) doorbell[0] = ci; doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) | - (notify == IB_CQ_SOLICITED ? 1 : 2)); + ((flags & IB_CQ_SOLICITED_MASK) == + IB_CQ_SOLICITED ? 1 : 2)); mthca_write_db_rec(doorbell, cq->arm_db); @@ -766,7 +768,7 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) wmb(); doorbell[0] = cpu_to_be32((sn << 28) | - (notify == IB_CQ_SOLICITED ? + ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : MTHCA_ARBEL_CQ_DB_REQ_NOT) | cq->cqn); diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index b7e42ef..9bae3cc 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -495,8 +495,8 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev); int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); -int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); -int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); +int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); +int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); int mthca_init_cq(struct mthca_dev *dev, int nent, struct mthca_ucontext *ctx, u32 pdn, struct mthca_cq *cq); diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h index 5941441..a1ab068 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.h +++ b/drivers/infiniband/hw/mthca/mthca_memfree.h @@ -38,7 +38,6 @@ #define MTHCA_MEMFREE_H #include <linux/list.h> -#include <linux/pci.h> #include <linux/mutex.h> #define MTHCA_ICM_CHUNK_LEN \ diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 47e6fd46d..1c05486 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -663,6 +663,7 @@ static int mthca_destroy_qp(struct ib_qp *qp) } static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, + int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { @@ -1292,6 +1293,7 @@ int mthca_register_device(struct mthca_dev *dev) (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; + dev->ib_dev.num_comp_vectors = 1; dev->ib_dev.dma_device = &dev->pdev->dev; dev->ib_dev.query_device = mthca_query_device; dev->ib_dev.query_port = mthca_query_port; diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 8fe6fee..fee60c8 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -701,6 +701,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); } + if (ibqp->qp_type == IB_QPT_RC && + cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { + u8 sched_queue = ibqp->uobject ? 0x2 : 0x1; + + if (mthca_is_memfree(dev)) + qp_context->rlkey_arbel_sched_queue |= sched_queue; + else + qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue); + + qp_param->opt_param_mask |= + cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE); + } + if (attr_mask & IB_QP_TIMEOUT) { qp_context->pri_path.ackto = attr->timeout << 3; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index fd55826..87310ee 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -41,7 +41,6 @@ #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/workqueue.h> -#include <linux/pci.h> #include <linux/kref.h> #include <linux/if_infiniband.h> #include <linux/mutex.h> @@ -311,6 +310,7 @@ extern struct workqueue_struct *ipoib_workqueue; /* functions */ +int ipoib_poll(struct net_device *dev, int *budget); void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); struct ipoib_ah *ipoib_create_ah(struct net_device *dev, diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 0c4e59b..785bc85 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -370,7 +370,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) { p = wc->qp->qp_context; - if (time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { + if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { spin_lock_irqsave(&priv->lock, flags); p->jiffies = jiffies; /* Move this entry to list head, but do @@ -416,7 +416,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) skb->dev = dev; /* XXX get correct PACKET_ type here */ skb->pkt_type = PACKET_HOST; - netif_rx_ni(skb); + netif_receive_skb(skb); repost: if (unlikely(ipoib_cm_post_receive(dev, wr_id))) @@ -592,7 +592,9 @@ int ipoib_cm_dev_open(struct net_device *dev) priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); if (IS_ERR(priv->cm.id)) { printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); - return IS_ERR(priv->cm.id); + ret = PTR_ERR(priv->cm.id); + priv->cm.id = NULL; + return ret; } ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), @@ -601,6 +603,7 @@ int ipoib_cm_dev_open(struct net_device *dev) printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, IPOIB_CM_IETF_ID | priv->qp->qp_num); ib_destroy_cm_id(priv->cm.id); + priv->cm.id = NULL; return ret; } return 0; @@ -611,10 +614,11 @@ void ipoib_cm_dev_stop(struct net_device *dev) struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_rx *p; - if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) + if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) return; ib_destroy_cm_id(priv->cm.id); + priv->cm.id = NULL; spin_lock_irq(&priv->lock); while (!list_empty(&priv->cm.passive_ids)) { p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); @@ -789,7 +793,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, } p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p, - ipoib_sendq_size + 1); + ipoib_sendq_size + 1, 0); if (IS_ERR(p->cq)) { ret = PTR_ERR(p->cq); ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 1bdb910..68d72c6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -226,7 +226,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) skb->dev = dev; /* XXX get correct PACKET_ type here */ skb->pkt_type = PACKET_HOST; - netif_rx_ni(skb); + netif_receive_skb(skb); } else { ipoib_dbg_data(priv, "dropping loopback packet\n"); dev_kfree_skb_any(skb); @@ -280,28 +280,63 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) wc->status, wr_id, wc->vendor_err); } -static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc) +int ipoib_poll(struct net_device *dev, int *budget) { - if (wc->wr_id & IPOIB_CM_OP_SRQ) - ipoib_cm_handle_rx_wc(dev, wc); - else if (wc->wr_id & IPOIB_OP_RECV) - ipoib_ib_handle_rx_wc(dev, wc); - else - ipoib_ib_handle_tx_wc(dev, wc); + struct ipoib_dev_priv *priv = netdev_priv(dev); + int max = min(*budget, dev->quota); + int done; + int t; + int empty; + int n, i; + + done = 0; + empty = 0; + + while (max) { + t = min(IPOIB_NUM_WC, max); + n = ib_poll_cq(priv->cq, t, priv->ibwc); + + for (i = 0; i < n; ++i) { + struct ib_wc *wc = priv->ibwc + i; + + if (wc->wr_id & IPOIB_CM_OP_SRQ) { + ++done; + --max; + ipoib_cm_handle_rx_wc(dev, wc); + } else if (wc->wr_id & IPOIB_OP_RECV) { + ++done; + --max; + ipoib_ib_handle_rx_wc(dev, wc); + } else + ipoib_ib_handle_tx_wc(dev, wc); + } + + if (n != t) { + empty = 1; + break; + } + } + + dev->quota -= done; + *budget -= done; + + if (empty) { + netif_rx_complete(dev); + if (unlikely(ib_req_notify_cq(priv->cq, + IB_CQ_NEXT_COMP | + IB_CQ_REPORT_MISSED_EVENTS)) && + netif_rx_reschedule(dev, 0)) + return 1; + + return 0; + } + + return 1; } void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) { - struct net_device *dev = (struct net_device *) dev_ptr; - struct ipoib_dev_priv *priv = netdev_priv(dev); - int n, i; - - ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); - do { - n = ib_poll_cq(cq, IPOIB_NUM_WC, priv->ibwc); - for (i = 0; i < n; ++i) - ipoib_ib_handle_wc(dev, priv->ibwc + i); - } while (n == IPOIB_NUM_WC); + netif_rx_schedule(dev_ptr); } static inline int post_send(struct ipoib_dev_priv *priv, @@ -514,9 +549,10 @@ int ipoib_ib_dev_stop(struct net_device *dev) struct ib_qp_attr qp_attr; unsigned long begin; struct ipoib_tx_buf *tx_req; - int i; + int i, n; clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); + netif_poll_disable(dev); ipoib_cm_dev_stop(dev); @@ -568,6 +604,18 @@ int ipoib_ib_dev_stop(struct net_device *dev) goto timeout; } + do { + n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc); + for (i = 0; i < n; ++i) { + if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ) + ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); + else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) + ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); + else + ipoib_ib_handle_tx_wc(dev, priv->ibwc + i); + } + } while (n == IPOIB_NUM_WC); + msleep(1); } @@ -596,6 +644,9 @@ timeout: msleep(1); } + netif_poll_enable(dev); + ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP); + return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index b4c380c..0a428f2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -948,6 +948,8 @@ static void ipoib_setup(struct net_device *dev) dev->hard_header = ipoib_hard_header; dev->set_multicast_list = ipoib_set_mcast_list; dev->neigh_setup = ipoib_neigh_setup_dev; + dev->poll = ipoib_poll; + dev->weight = 100; dev->watchdog_timeo = HZ; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 7f3ec20..5c3c6a4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -187,7 +187,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) if (!ret) size += ipoib_recvq_size; - priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size); + priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); if (IS_ERR(priv->cq)) { printk(KERN_WARNING "%s: failed to create CQ\n", ca->name); goto out_free_mr; diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 278fcbc..3651072 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -201,7 +201,7 @@ static int iser_post_receive_control(struct iscsi_conn *conn) * what's common for both schemes is that the connection is not started */ if (conn->c_stage != ISCSI_CONN_STARTED) - rx_data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; + rx_data_size = ISCSI_DEF_MAX_RECV_SEG_LEN; else /* FIXME till user space sets conn->max_recv_dlength correctly */ rx_data_size = 128; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 1fc9674..89d6008 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -76,7 +76,7 @@ static int iser_create_device_ib_res(struct iser_device *device) iser_cq_callback, iser_cq_event_callback, (void *)device, - ISER_MAX_CQ_LEN); + ISER_MAX_CQ_LEN, 0); if (IS_ERR(device->cq)) goto cq_err; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 5e8ac57..39bf057 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -197,7 +197,7 @@ static int srp_create_target_ib(struct srp_target_port *target) return -ENOMEM; target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion, - NULL, target, SRP_CQ_SIZE); + NULL, target, SRP_CQ_SIZE, 0); if (IS_ERR(target->cq)) { ret = PTR_ERR(target->cq); goto out; @@ -1468,6 +1468,25 @@ static ssize_t show_dgid(struct class_device *cdev, char *buf) be16_to_cpu(((__be16 *) target->path.dgid.raw)[7])); } +static ssize_t show_orig_dgid(struct class_device *cdev, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(cdev)); + + if (target->state == SRP_TARGET_DEAD || + target->state == SRP_TARGET_REMOVED) + return -ENODEV; + + return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", + be16_to_cpu(target->orig_dgid[0]), + be16_to_cpu(target->orig_dgid[1]), + be16_to_cpu(target->orig_dgid[2]), + be16_to_cpu(target->orig_dgid[3]), + be16_to_cpu(target->orig_dgid[4]), + be16_to_cpu(target->orig_dgid[5]), + be16_to_cpu(target->orig_dgid[6]), + be16_to_cpu(target->orig_dgid[7])); +} + static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(cdev)); @@ -1498,6 +1517,7 @@ static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); +static CLASS_DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); @@ -1508,6 +1528,7 @@ static struct class_device_attribute *srp_host_attrs[] = { &class_device_attr_service_id, &class_device_attr_pkey, &class_device_attr_dgid, + &class_device_attr_orig_dgid, &class_device_attr_zero_req_lim, &class_device_attr_local_ib_port, &class_device_attr_local_ib_device, @@ -1516,7 +1537,8 @@ static struct class_device_attribute *srp_host_attrs[] = { static struct scsi_host_template srp_template = { .module = THIS_MODULE, - .name = DRV_NAME, + .name = "InfiniBand SRP initiator", + .proc_name = DRV_NAME, .info = srp_target_info, .queuecommand = srp_queuecommand, .eh_abort_handler = srp_abort, @@ -1662,6 +1684,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); } kfree(p); + memcpy(target->orig_dgid, target->path.dgid.raw, 16); break; case SRP_OPT_PKEY: diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 2f3319c..1d53c7b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -129,6 +129,7 @@ struct srp_target_port { unsigned int scsi_id; struct ib_sa_path_rec path; + __be16 orig_dgid[8]; struct ib_sa_query *path_query; int path_query_id; |