From a3285aa4eecd722508dab01c4932b11b4ba80134 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 9 May 2006 10:50:29 -0700 Subject: IB/mthca: Fix race in reference counting Fix races in in destroying various objects. If a destroy routine waits for an object to become free by doing wait_event(&obj->wait, !atomic_read(&obj->refcount)); /* now clean up and destroy the object */ and another place drops a reference to the object by doing if (atomic_dec_and_test(&obj->refcount)) wake_up(&obj->wait); then this is susceptible to a race where the wait_event() and final freeing of the object occur between the atomic_dec_and_test() and the wake_up(). And this is a use-after-free, since wake_up() will be called on part of the already-freed object. Fix this in mthca by replacing the atomic_t refcounts with plain old integers protected by a spinlock. This makes it possible to do the decrement of the reference count and the wake_up() so that it appears as a single atomic operation to the code waiting on the wait queue. While touching this code, also simplify mthca_cq_clean(): the CQ being cleaned cannot go away, because it still has a QP attached to it. So there's no reason to be paranoid and look up the CQ by number; it's perfectly safe to use the pointer that the callers already have. Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_srq.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'drivers/infiniband/hw/mthca/mthca_srq.c') diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index adcaf85..1ea4332 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c @@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, goto err_out_mailbox; spin_lock_init(&srq->lock); - atomic_set(&srq->refcount, 1); + srq->refcount = 1; init_waitqueue_head(&srq->wait); if (mthca_is_memfree(dev)) @@ -308,6 +308,17 @@ err_out: return err; } +static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) +{ + int c; + + spin_lock_irq(&dev->srq_table.lock); + c = srq->refcount; + spin_unlock_irq(&dev->srq_table.lock); + + return c; +} + void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) { struct mthca_mailbox *mailbox; @@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) spin_lock_irq(&dev->srq_table.lock); mthca_array_clear(&dev->srq_table.srq, srq->srqn & (dev->limits.num_srqs - 1)); + --srq->refcount; spin_unlock_irq(&dev->srq_table.lock); - atomic_dec(&srq->refcount); - wait_event(srq->wait, !atomic_read(&srq->refcount)); + wait_event(srq->wait, !get_srq_refcount(dev, srq)); if (!srq->ibsrq.uobject) { mthca_free_srq_buf(dev, srq); @@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, spin_lock(&dev->srq_table.lock); srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); if (srq) - atomic_inc(&srq->refcount); + ++srq->refcount; spin_unlock(&dev->srq_table.lock); if (!srq) { @@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); out: - if (atomic_dec_and_test(&srq->refcount)) + spin_lock(&dev->srq_table.lock); + if (!--srq->refcount) wake_up(&srq->wait); + spin_unlock(&dev->srq_table.lock); } /* -- cgit v1.1