qed: Allow chance for fast ramrod completions
authorYuval Mintz <Yuval.Mintz@caviumnetworks.com>
Fri, 14 Oct 2016 09:19:21 +0000 (05:19 -0400)
committerDavid S. Miller <davem@davemloft.net>
Fri, 14 Oct 2016 15:59:58 +0000 (11:59 -0400)
Whenever a ramrod is being sent for some device configuration,
the driver is going to sleep at least 5ms between each iteration
of polling on the completion of the ramrod.

However, in almost every configuration scenario the firmware
would be able to comply and complete the ramrod in a manner of
several usecs. This is especially important in cases where there
might be a lot of sequential configurations applying to the hardware
[e.g., RoCE], in which case the existing scheme might cause some
visible user delays.

This patch changes the completion scheme - instead of immediately
starting to sleep for a 'long' period, allow the device to quickly
poll on the first iteration after a couple of usecs.

Signed-off-by: Yuval Mintz <Yuval.Mintz@caviumnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/qlogic/qed/qed_spq.c

index caff41544898baed09f45a41829cb0ba9c719fb9..259a6156c761c0b498f3500e966d9f34ebb695a0 100644 (file)
 ***************************************************************************/
 
 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
-#define SPQ_BLOCK_SLEEP_LENGTH          (1000)
+
+#define SPQ_BLOCK_DELAY_MAX_ITER        (10)
+#define SPQ_BLOCK_DELAY_US              (10)
+#define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
+#define SPQ_BLOCK_SLEEP_MS              (5)
 
 /***************************************************************************
 * Blocking Imp. (BLOCK/EBLOCK mode)
@@ -57,53 +61,81 @@ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
        smp_wmb();
 }
 
-static int qed_spq_block(struct qed_hwfn *p_hwfn,
-                        struct qed_spq_entry *p_ent,
-                        u8 *p_fw_ret)
+static int __qed_spq_block(struct qed_hwfn *p_hwfn,
+                          struct qed_spq_entry *p_ent,
+                          u8 *p_fw_ret, bool sleep_between_iter)
 {
-       int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
        struct qed_spq_comp_done *comp_done;
-       int rc;
+       u32 iter_cnt;
 
        comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
-       while (sleep_count) {
-               /* validate we receive completion update */
+       iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+                                     : SPQ_BLOCK_DELAY_MAX_ITER;
+
+       while (iter_cnt--) {
+               /* Validate we receive completion update */
                smp_rmb();
                if (comp_done->done == 1) {
                        if (p_fw_ret)
                                *p_fw_ret = comp_done->fw_return_code;
                        return 0;
                }
-               usleep_range(5000, 10000);
-               sleep_count--;
+
+               if (sleep_between_iter)
+                       msleep(SPQ_BLOCK_SLEEP_MS);
+               else
+                       udelay(SPQ_BLOCK_DELAY_US);
        }
 
+       return -EBUSY;
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+                        struct qed_spq_entry *p_ent,
+                        u8 *p_fw_ret, bool skip_quick_poll)
+{
+       struct qed_spq_comp_done *comp_done;
+       int rc;
+
+       /* A relatively short polling period w/o sleeping, to allow the FW to
+        * complete the ramrod and thus possibly to avoid the following sleeps.
+        */
+       if (!skip_quick_poll) {
+               rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
+               if (!rc)
+                       return 0;
+       }
+
+       /* Move to polling with a sleeping period between iterations */
+       rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+       if (!rc)
+               return 0;
+
        DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
        rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
-       if (rc != 0)
+       if (rc) {
                DP_NOTICE(p_hwfn, "MCP drain failed\n");
+               goto err;
+       }
 
        /* Retry after drain */
-       sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
-       while (sleep_count) {
-               /* validate we receive completion update */
-               smp_rmb();
-               if (comp_done->done == 1) {
-                       if (p_fw_ret)
-                               *p_fw_ret = comp_done->fw_return_code;
-                       return 0;
-               }
-               usleep_range(5000, 10000);
-               sleep_count--;
-       }
+       rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+       if (!rc)
+               return 0;
 
+       comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
        if (comp_done->done == 1) {
                if (p_fw_ret)
                        *p_fw_ret = comp_done->fw_return_code;
                return 0;
        }
-
-       DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+err:
+       DP_NOTICE(p_hwfn,
+                 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+                 le32_to_cpu(p_ent->elem.hdr.cid),
+                 p_ent->elem.hdr.cmd_id,
+                 p_ent->elem.hdr.protocol_id,
+                 le16_to_cpu(p_ent->elem.hdr.echo));
 
        return -EBUSY;
 }
@@ -729,7 +761,8 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
                 * access p_ent here to see whether it's successful or not.
                 * Thus, after gaining the answer perform the cleanup here.
                 */
-               rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+               rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
+                                  p_ent->queue == &p_spq->unlimited_pending);
 
                if (p_ent->queue == &p_spq->unlimited_pending) {
                        /* This is an allocated p_ent which does not need to