IB/qib: Add fix missing from earlier patch
authorMike Marciniszyn <mike.marciniszyn@qlogic.com>
Tue, 11 Jan 2011 01:42:21 +0000 (17:42 -0800)
committerRoland Dreier <rolandd@cisco.com>
Tue, 11 Jan 2011 01:42:21 +0000 (17:42 -0800)
The upstream code was missing part of a receive/error race fix from
the internal tree.  Add the missing part, which makes future merges
possible.

Signed-off-by: Mike Marciniszyn <mike.marciniszyn@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/qib/qib_ud.c

index a4b945d9a303bc0aa7569ef21c28b26490f4b80e..4a51fd1e9cb7de7e9aa1723e91fc100d819ba52f 100644 (file)
@@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
        qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
        src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
 
        qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
        src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
 
-       /* Get the number of bytes the message was padded by. */
+       /*
+        * Get the number of bytes the message was padded by
+        * and drop incomplete packets.
+        */
        pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
        pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-       if (unlikely(tlen < (hdrsize + pad + 4))) {
-               /* Drop incomplete packets. */
-               ibp->n_pkt_drops++;
-               goto bail;
-       }
+       if (unlikely(tlen < (hdrsize + pad + 4)))
+               goto drop;
+
        tlen -= hdrsize + pad + 4;
 
        /*
        tlen -= hdrsize + pad + 4;
 
        /*
@@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
         */
        if (qp->ibqp.qp_num) {
                if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
         */
        if (qp->ibqp.qp_num) {
                if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
-                            hdr->lrh[3] == IB_LID_PERMISSIVE)) {
-                       ibp->n_pkt_drops++;
-                       goto bail;
-               }
+                            hdr->lrh[3] == IB_LID_PERMISSIVE))
+                       goto drop;
                if (qp->ibqp.qp_num > 1) {
                        u16 pkey1, pkey2;
 
                if (qp->ibqp.qp_num > 1) {
                        u16 pkey1, pkey2;
 
@@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
                                                0xF,
                                              src_qp, qp->ibqp.qp_num,
                                              hdr->lrh[3], hdr->lrh[1]);
                                                0xF,
                                              src_qp, qp->ibqp.qp_num,
                                              hdr->lrh[3], hdr->lrh[1]);
-                               goto bail;
+                               return;
                        }
                }
                if (unlikely(qkey != qp->qkey)) {
                        }
                }
                if (unlikely(qkey != qp->qkey)) {
@@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
                                      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
                                      src_qp, qp->ibqp.qp_num,
                                      hdr->lrh[3], hdr->lrh[1]);
                                      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
                                      src_qp, qp->ibqp.qp_num,
                                      hdr->lrh[3], hdr->lrh[1]);
-                       goto bail;
+                       return;
                }
                /* Drop invalid MAD packets (see 13.5.3.1). */
                if (unlikely(qp->ibqp.qp_num == 1 &&
                             (tlen != 256 ||
                }
                /* Drop invalid MAD packets (see 13.5.3.1). */
                if (unlikely(qp->ibqp.qp_num == 1 &&
                             (tlen != 256 ||
-                             (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
-                       ibp->n_pkt_drops++;
-                       goto bail;
-               }
+                             (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
+                       goto drop;
        } else {
                struct ib_smp *smp;
 
                /* Drop invalid MAD packets (see 13.5.3.1). */
        } else {
                struct ib_smp *smp;
 
                /* Drop invalid MAD packets (see 13.5.3.1). */
-               if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
-                       ibp->n_pkt_drops++;
-                       goto bail;
-               }
+               if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
+                       goto drop;
                smp = (struct ib_smp *) data;
                if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
                     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
                smp = (struct ib_smp *) data;
                if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
                     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
-                   smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
-                       ibp->n_pkt_drops++;
-                       goto bail;
-               }
+                   smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+                       goto drop;
        }
 
        /*
        }
 
        /*
@@ -523,10 +516,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
        } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
                wc.ex.imm_data = 0;
                wc.wc_flags = 0;
        } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
                wc.ex.imm_data = 0;
                wc.wc_flags = 0;
-       } else {
-               ibp->n_pkt_drops++;
-               goto bail;
-       }
+       } else
+               goto drop;
 
        /*
         * A GRH is expected to preceed the data even if not
 
        /*
         * A GRH is expected to preceed the data even if not
@@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
        /* Silently drop packets which are too big. */
        if (unlikely(wc.byte_len > qp->r_len)) {
                qp->r_flags |= QIB_R_REUSE_SGE;
        /* Silently drop packets which are too big. */
        if (unlikely(wc.byte_len > qp->r_len)) {
                qp->r_flags |= QIB_R_REUSE_SGE;
-               ibp->n_pkt_drops++;
-               return;
+               goto drop;
        }
        if (has_grh) {
                qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
        }
        if (has_grh) {
                qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
        qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
                     (ohdr->bth[0] &
                        cpu_to_be32(IB_BTH_SOLICITED)) != 0);
        qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
                     (ohdr->bth[0] &
                        cpu_to_be32(IB_BTH_SOLICITED)) != 0);
-bail:;
+       return;
+
+drop:
+       ibp->n_pkt_drops++;
 }
 }