IB/qib: Optimize locking for get_txreq()
authorMike Marciniszyn <mike.marciniszyn@qlogic.com>
Fri, 23 Dec 2011 13:03:41 +0000 (08:03 -0500)
committerRoland Dreier <roland@purestorage.com>
Wed, 4 Jan 2012 04:53:31 +0000 (20:53 -0800)
The current code locks the QP s_lock, followed by the pending_lock, I
guess to to protect against the allocate failing.

This patch only locks the pending_lock, assuming that the empty case
is an exeception, in which case the pending_lock is dropped, and the
original code is executed.  This will save a lock of s_lock in the
normal case.

The observation is that the sdma descriptors will deplete at twice the
rate of txreq's, so this should be rare.

Signed-off-by: Mike Marciniszyn <mike.marciniszyn@qlogic.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
drivers/infiniband/hw/qib/qib_verbs.c

index a894762da462c73b304e9e2abc314582d84cd6ab..7b6c3bffa9d9fa0cd1090b5498bb663d1cbc5b6f 100644 (file)
@@ -913,8 +913,8 @@ static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
                __raw_writel(last, piobuf);
 }
 
-static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
-                                        struct qib_qp *qp, int *retp)
+static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
+                                          struct qib_qp *qp)
 {
        struct qib_verbs_txreq *tx;
        unsigned long flags;
@@ -926,8 +926,9 @@ static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
                struct list_head *l = dev->txreq_free.next;
 
                list_del(l);
+               spin_unlock(&dev->pending_lock);
+               spin_unlock_irqrestore(&qp->s_lock, flags);
                tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
-               *retp = 0;
        } else {
                if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
                    list_empty(&qp->iowait)) {
@@ -935,14 +936,33 @@ static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
                        qp->s_flags |= QIB_S_WAIT_TX;
                        list_add_tail(&qp->iowait, &dev->txwait);
                }
-               tx = NULL;
                qp->s_flags &= ~QIB_S_BUSY;
-               *retp = -EBUSY;
+               spin_unlock(&dev->pending_lock);
+               spin_unlock_irqrestore(&qp->s_lock, flags);
+               tx = ERR_PTR(-EBUSY);
        }
+       return tx;
+}
 
-       spin_unlock(&dev->pending_lock);
-       spin_unlock_irqrestore(&qp->s_lock, flags);
+static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
+                                        struct qib_qp *qp)
+{
+       struct qib_verbs_txreq *tx;
+       unsigned long flags;
 
+       spin_lock_irqsave(&dev->pending_lock, flags);
+       /* assume the list non empty */
+       if (likely(!list_empty(&dev->txreq_free))) {
+               struct list_head *l = dev->txreq_free.next;
+
+               list_del(l);
+               spin_unlock_irqrestore(&dev->pending_lock, flags);
+               tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+       } else {
+               /* call slow path to get the extra lock */
+               spin_unlock_irqrestore(&dev->pending_lock, flags);
+               tx =  __get_txreq(dev, qp);
+       }
        return tx;
 }
 
@@ -1122,9 +1142,9 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
                goto bail;
        }
 
-       tx = get_txreq(dev, qp, &ret);
-       if (!tx)
-               goto bail;
+       tx = get_txreq(dev, qp);
+       if (IS_ERR(tx))
+               goto bail_tx;
 
        control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
                                       be16_to_cpu(hdr->lrh[0]) >> 12);
@@ -1195,6 +1215,9 @@ unaligned:
        ibp->n_unaligned++;
 bail:
        return ret;
+bail_tx:
+       ret = PTR_ERR(tx);
+       goto bail;
 }
 
 /*