Merge tag 'ntb-3.13' of git://github.com/jonmason/ntb
[linux-drm-fsl-dcu.git] / drivers / ntb / ntb_transport.c
index d0222f13d154808f3cfa4ed3cab26cd1a7b899ee..3217f394d45b106051b282f824be1413b5efa65d 100644 (file)
@@ -119,7 +119,6 @@ struct ntb_transport_qp {
 
        void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
                            void *data, int len);
-       struct tasklet_struct rx_work;
        struct list_head rx_pend_q;
        struct list_head rx_free_q;
        spinlock_t ntb_rx_pend_q_lock;
@@ -584,11 +583,8 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
        return 0;
 }
 
-static void ntb_qp_link_cleanup(struct work_struct *work)
+static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
 {
-       struct ntb_transport_qp *qp = container_of(work,
-                                                  struct ntb_transport_qp,
-                                                  link_cleanup);
        struct ntb_transport *nt = qp->transport;
        struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
 
@@ -602,6 +598,16 @@ static void ntb_qp_link_cleanup(struct work_struct *work)
 
        dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
        qp->qp_link = NTB_LINK_DOWN;
+}
+
+static void ntb_qp_link_cleanup_work(struct work_struct *work)
+{
+       struct ntb_transport_qp *qp = container_of(work,
+                                                  struct ntb_transport_qp,
+                                                  link_cleanup);
+       struct ntb_transport *nt = qp->transport;
+
+       ntb_qp_link_cleanup(qp);
 
        if (nt->transport_link == NTB_LINK_UP)
                schedule_delayed_work(&qp->link_work,
@@ -613,22 +619,20 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp)
        schedule_work(&qp->link_cleanup);
 }
 
-static void ntb_transport_link_cleanup(struct work_struct *work)
+static void ntb_transport_link_cleanup(struct ntb_transport *nt)
 {
-       struct ntb_transport *nt = container_of(work, struct ntb_transport,
-                                               link_cleanup);
        int i;
 
+       /* Pass along the info to any clients */
+       for (i = 0; i < nt->max_qps; i++)
+               if (!test_bit(i, &nt->qp_bitmap))
+                       ntb_qp_link_cleanup(&nt->qps[i]);
+
        if (nt->transport_link == NTB_LINK_DOWN)
                cancel_delayed_work_sync(&nt->link_work);
        else
                nt->transport_link = NTB_LINK_DOWN;
 
-       /* Pass along the info to any clients */
-       for (i = 0; i < nt->max_qps; i++)
-               if (!test_bit(i, &nt->qp_bitmap))
-                       ntb_qp_link_down(&nt->qps[i]);
-
        /* The scratchpad registers keep the values if the remote side
         * goes down, blast them now to give them a sane value the next
         * time they are accessed
@@ -637,6 +641,14 @@ static void ntb_transport_link_cleanup(struct work_struct *work)
                ntb_write_local_spad(nt->ndev, i, 0);
 }
 
+static void ntb_transport_link_cleanup_work(struct work_struct *work)
+{
+       struct ntb_transport *nt = container_of(work, struct ntb_transport,
+                                               link_cleanup);
+
+       ntb_transport_link_cleanup(nt);
+}
+
 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
 {
        struct ntb_transport *nt = data;
@@ -880,7 +892,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
        }
 
        INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
-       INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
+       INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
 
        spin_lock_init(&qp->ntb_rx_pend_q_lock);
        spin_lock_init(&qp->ntb_rx_free_q_lock);
@@ -936,7 +948,7 @@ int ntb_transport_init(struct pci_dev *pdev)
        }
 
        INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
-       INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
+       INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
 
        rc = ntb_register_event_callback(nt->ndev,
                                         ntb_transport_event_callback);
@@ -972,7 +984,7 @@ void ntb_transport_free(void *transport)
        struct ntb_device *ndev = nt->ndev;
        int i;
 
-       nt->transport_link = NTB_LINK_DOWN;
+       ntb_transport_link_cleanup(nt);
 
        /* verify that all the qp's are freed */
        for (i = 0; i < nt->max_qps; i++) {
@@ -1188,11 +1200,14 @@ err:
        goto out;
 }
 
-static void ntb_transport_rx(unsigned long data)
+static int ntb_transport_rxc_db(void *data, int db_num)
 {
-       struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
+       struct ntb_transport_qp *qp = data;
        int rc, i;
 
+       dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
+               __func__, db_num);
+
        /* Limit the number of packets processed in a single interrupt to
         * provide fairness to others
         */
@@ -1204,16 +1219,8 @@ static void ntb_transport_rx(unsigned long data)
 
        if (qp->dma_chan)
                dma_async_issue_pending(qp->dma_chan);
-}
-
-static void ntb_transport_rxc_db(void *data, int db_num)
-{
-       struct ntb_transport_qp *qp = data;
-
-       dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
-               __func__, db_num);
 
-       tasklet_schedule(&qp->rx_work);
+       return i;
 }
 
 static void ntb_tx_copy_callback(void *data)
@@ -1432,11 +1439,12 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
        qp->tx_handler = handlers->tx_handler;
        qp->event_handler = handlers->event_handler;
 
+       dmaengine_get();
        qp->dma_chan = dma_find_channel(DMA_MEMCPY);
-       if (!qp->dma_chan)
+       if (!qp->dma_chan) {
+               dmaengine_put();
                dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
-       else
-               dmaengine_get();
+       }
 
        for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
                entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
@@ -1458,25 +1466,23 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
                             &qp->tx_free_q);
        }
 
-       tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
-
        rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
                                      ntb_transport_rxc_db);
        if (rc)
-               goto err3;
+               goto err2;
 
        dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
 
        return qp;
 
-err3:
-       tasklet_disable(&qp->rx_work);
 err2:
        while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
                kfree(entry);
 err1:
        while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
                kfree(entry);
+       if (qp->dma_chan)
+               dmaengine_put();
        set_bit(free_queue, &nt->qp_bitmap);
 err:
        return NULL;
@@ -1515,7 +1521,6 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
        }
 
        ntb_unregister_db_callback(qp->ndev, qp->qp_num);
-       tasklet_disable(&qp->rx_work);
 
        cancel_delayed_work_sync(&qp->link_work);