dmaengine: remove DMA unmap flags
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Fri, 18 Oct 2013 17:35:33 +0000 (19:35 +0200)
committerDan Williams <dan.j.williams@intel.com>
Thu, 14 Nov 2013 19:04:38 +0000 (11:04 -0800)
Remove no longer needed DMA unmap flags:
- DMA_COMPL_SKIP_SRC_UNMAP
- DMA_COMPL_SKIP_DEST_UNMAP
- DMA_COMPL_SRC_UNMAP_SINGLE
- DMA_COMPL_DEST_UNMAP_SINGLE

Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Jon Mason <jon.mason@intel.com>
Acked-by: Mark Brown <broonie@linaro.org>
[djbw: clean up straggling skip unmap flags in ntb]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
18 files changed:
crypto/async_tx/async_memcpy.c
crypto/async_tx/async_pq.c
crypto/async_tx/async_raid6_recov.c
crypto/async_tx/async_xor.c
drivers/ata/pata_arasan_cf.c
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/ioat/dma.c
drivers/dma/ioat/dma_v3.c
drivers/media/platform/m2m-deinterlace.c
drivers/media/platform/timblogiw.c
drivers/misc/carma/carma-fpga.c
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/fsmc_nand.c
drivers/net/ethernet/micrel/ks8842.c
drivers/ntb/ntb_transport.c
drivers/spi/spi-dw-mid.c
include/linux/dmaengine.h

index 72750214f7793d0002de21402fc3d076f555d948..f8c0b8dbeb7582beca1ee7fd5c7aaac58aba23cd 100644 (file)
@@ -56,8 +56,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
                unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
 
        if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
-               unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP |
-                                              DMA_COMPL_SKIP_DEST_UNMAP;
+               unsigned long dma_prep_flags = 0;
 
                if (submit->cb_fn)
                        dma_prep_flags |= DMA_PREP_INTERRUPT;
index 4126b56fbc01c664d64e5ffa43b98fef8857ee00..d05327caf69dbc18532478b122ea9834e67f1fd9 100644 (file)
@@ -62,7 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
        dma_addr_t dma_dest[2];
        int src_off = 0;
 
-       dma_flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
        if (submit->flags & ASYNC_TX_FENCE)
                dma_flags |= DMA_PREP_FENCE;
 
index a3a72a784421fcece0f0468808a43b27a9d390e8..934a849814958e6ea37b9dbdb96abc820c4fe9e1 100644 (file)
@@ -47,9 +47,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
                struct device *dev = dma->dev;
                dma_addr_t pq[2];
                struct dma_async_tx_descriptor *tx;
-               enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP |
-                                               DMA_COMPL_SKIP_DEST_UNMAP |
-                                               DMA_PREP_PQ_DISABLE_P;
+               enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
 
                if (submit->flags & ASYNC_TX_FENCE)
                        dma_flags |= DMA_PREP_FENCE;
@@ -113,9 +111,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
                dma_addr_t dma_dest[2];
                struct device *dev = dma->dev;
                struct dma_async_tx_descriptor *tx;
-               enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP |
-                                               DMA_COMPL_SKIP_DEST_UNMAP |
-                                               DMA_PREP_PQ_DISABLE_P;
+               enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
 
                if (submit->flags & ASYNC_TX_FENCE)
                        dma_flags |= DMA_PREP_FENCE;
index d2cc77d501c7951a61d84592190b58e5a2416c1d..3c562f5a60bbb34f19e6b90f4d858b04d3d496e2 100644 (file)
@@ -41,7 +41,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
        dma_async_tx_callback cb_fn_orig = submit->cb_fn;
        void *cb_param_orig = submit->cb_param;
        enum async_tx_flags flags_orig = submit->flags;
-       enum dma_ctrl_flags dma_flags;
+       enum dma_ctrl_flags dma_flags = 0;
        int src_cnt = unmap->to_cnt;
        int xor_src_cnt;
        dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
@@ -55,7 +55,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
                /* if we are submitting additional xors, leave the chain open
                 * and clear the callback parameters
                 */
-               dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
                if (src_cnt > xor_src_cnt) {
                        submit->flags &= ~ASYNC_TX_ACK;
                        submit->flags |= ASYNC_TX_FENCE;
@@ -284,8 +283,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
 
        if (unmap && src_cnt <= device->max_xor &&
            is_dma_xor_aligned(device, offset, 0, len)) {
-               unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP |
-                                              DMA_COMPL_SKIP_DEST_UNMAP;
+               unsigned long dma_prep_flags = 0;
                int i;
 
                pr_debug("%s: (async) len: %zu\n", __func__, len);
index 853f610af28fbc9dff0ee59d1fa1e6688cbfd732..e88690ebfd827b8a02558c4b611b474a4f8c4a1e 100644 (file)
@@ -396,8 +396,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
        struct dma_async_tx_descriptor *tx;
        struct dma_chan *chan = acdev->dma_chan;
        dma_cookie_t cookie;
-       unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
-               DMA_COMPL_SKIP_DEST_UNMAP;
+       unsigned long flags = DMA_PREP_INTERRUPT;
        int ret = 0;
 
        tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
index f878c808466e0b38c4c3347ab3ba6aaebd88c08e..b69ac3892b862862a682f96ed67249ef7190bb9e 100644 (file)
@@ -1065,8 +1065,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
        unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
                                      DMA_FROM_DEVICE);
        unmap->len = len;
-       flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_SRC_UNMAP |
-               DMA_COMPL_SKIP_DEST_UNMAP;
+       flags = DMA_CTRL_ACK;
        tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
                                         len, flags);
 
index f4a2a25fae31d7d5035d04993d16e20ecf2b5fe6..5791091c13ca36f8e40b406d232808c618328c13 100644 (file)
@@ -599,8 +599,7 @@ static int dmatest_func(void *data)
        /*
         * src and dst buffers are freed by ourselves below
         */
-       flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT |
-               DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
+       flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 
        while (!kthread_should_stop()
               && !(params->iterations && total_tests >= params->iterations)) {
index c123e32dbbb03e0fc96c65adfddf5fa91c56a30d..6fcf741ad91b7023fb0b23c842d0728e492ae0d9 100644 (file)
@@ -818,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
 
        dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
        dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
-       flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
-               DMA_PREP_INTERRUPT;
+       flags = DMA_PREP_INTERRUPT;
        tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
                                                   IOAT_TEST_SIZE, flags);
        if (!tx) {
index 43386c171bba764b186424bb6a445d387896d007..a4798f0cc22527b4c4f5e5be88e900491f4d0e68 100644 (file)
@@ -1279,9 +1279,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
                                           DMA_TO_DEVICE);
        tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
                                      IOAT_NUM_SRC_TEST, PAGE_SIZE,
-                                     DMA_PREP_INTERRUPT |
-                                     DMA_COMPL_SKIP_SRC_UNMAP |
-                                     DMA_COMPL_SKIP_DEST_UNMAP);
+                                     DMA_PREP_INTERRUPT);
 
        if (!tx) {
                dev_err(dev, "Self-test xor prep failed\n");
@@ -1342,9 +1340,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
                                           DMA_TO_DEVICE);
        tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
                                          IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
-                                         &xor_val_result, DMA_PREP_INTERRUPT |
-                                         DMA_COMPL_SKIP_SRC_UNMAP |
-                                         DMA_COMPL_SKIP_DEST_UNMAP);
+                                         &xor_val_result, DMA_PREP_INTERRUPT);
        if (!tx) {
                dev_err(dev, "Self-test zero prep failed\n");
                err = -ENODEV;
@@ -1389,9 +1385,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
                                           DMA_TO_DEVICE);
        tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
                                          IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
-                                         &xor_val_result, DMA_PREP_INTERRUPT |
-                                         DMA_COMPL_SKIP_SRC_UNMAP |
-                                         DMA_COMPL_SKIP_DEST_UNMAP);
+                                         &xor_val_result, DMA_PREP_INTERRUPT);
        if (!tx) {
                dev_err(dev, "Self-test 2nd zero prep failed\n");
                err = -ENODEV;
index 540516ca872c53a05d3bce4dbcf3cb9b3a87b4e3..879ea6fdd1be64ca2d280ed7316e47e320319843 100644 (file)
@@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
        ctx->xt->dir = DMA_MEM_TO_MEM;
        ctx->xt->src_sgl = false;
        ctx->xt->dst_sgl = true;
-       flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT |
-               DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP;
+       flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 
        tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags);
        if (tx == NULL) {
index b557caf5b1a4791846361ebf8ac382c429025f18..59a95e3ab0e31bff666a2388dbf512e35eec9624 100644 (file)
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
 
        desc = dmaengine_prep_slave_sg(fh->chan,
                buf->sg, sg_elems, DMA_DEV_TO_MEM,
-               DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+               DMA_PREP_INTERRUPT);
        if (!desc) {
                spin_lock_irq(&fh->queue_lock);
                list_del_init(&vb->queue);
index 7b56563f8b747dc87df208c5be260173c217153c..5335104e7c84b7fc9f91b2e74c2be0895b5d4c1f 100644 (file)
@@ -631,8 +631,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
        struct dma_async_tx_descriptor *tx;
        dma_cookie_t cookie;
        dma_addr_t dst, src;
-       unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP |
-                                 DMA_COMPL_SKIP_SRC_UNMAP;
+       unsigned long dma_flags = 0;
 
        dst_sg = buf->vb.sglist;
        dst_nents = buf->vb.sglen;
index 060feeaf6b3e5554328904a1ac870a97cbc5685b..2a837cb425d76b056a8a14b801a772d76b6746f7 100644 (file)
@@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
 
        dma_dev = host->dma_chan->device;
 
-       flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
-               DMA_COMPL_SKIP_DEST_UNMAP;
+       flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 
        phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
        if (dma_mapping_error(dma_dev->dev, phys_addr)) {
index 3dc1a7564d8725d62085b16cb7c0544e138858b2..8b2752263db9a5549742bb36c3dcee48999b8b62 100644 (file)
@@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
        dma_dev = chan->device;
        dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
 
-       flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
-
        if (direction == DMA_TO_DEVICE) {
                dma_src = dma_addr;
                dma_dst = host->data_pa;
index 0951f7aca1eff6671f187d03b203952926a1ecea..822616e3c3754118ab2e09eada44d59a24c3954c 100644 (file)
@@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
                sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
 
        ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
-               &ctl->sg, 1, DMA_MEM_TO_DEV,
-               DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+               &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
        if (!ctl->adesc)
                return NETDEV_TX_BUSY;
 
@@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
                sg_dma_len(sg) = DMA_BUFFER_SIZE;
 
                ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
-                       sg, 1, DMA_DEV_TO_MEM,
-                       DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+                       sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
 
                if (!ctl->adesc)
                        goto out;
index 222c2baa3a4b6a474e134a06dce4d2740a02e476..d0222f13d154808f3cfa4ed3cab26cd1a7b899ee 100644 (file)
@@ -1037,7 +1037,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
        struct dmaengine_unmap_data *unmap;
        dma_cookie_t cookie;
        void *buf = entry->buf;
-       unsigned long flags;
 
        entry->len = len;
 
@@ -1073,10 +1072,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
 
        unmap->from_cnt = 1;
 
-       flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
-               DMA_PREP_INTERRUPT;
        txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
-                                            unmap->addr[0], len, flags);
+                                            unmap->addr[0], len,
+                                            DMA_PREP_INTERRUPT);
        if (!txd)
                goto err_get_unmap;
 
@@ -1266,7 +1264,6 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
        void __iomem *offset;
        size_t len = entry->len;
        void *buf = entry->buf;
-       unsigned long flags;
 
        offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
        hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
@@ -1301,10 +1298,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
 
        unmap->to_cnt = 1;
 
-       flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
-               DMA_PREP_INTERRUPT;
        txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
-                                           flags);
+                                            DMA_PREP_INTERRUPT);
        if (!txd)
                goto err_get_unmap;
 
index b9f0192758d6d929aab86d087c443adc46154e66..6d207afec8cbdb578c9e5428d6018dff1d93690b 100644 (file)
@@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
                                &dws->tx_sgl,
                                1,
                                DMA_MEM_TO_DEV,
-                               DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
+                               DMA_PREP_INTERRUPT);
        txdesc->callback = dw_spi_dma_done;
        txdesc->callback_param = dws;
 
@@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
                                &dws->rx_sgl,
                                1,
                                DMA_DEV_TO_MEM,
-                               DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
+                               DMA_PREP_INTERRUPT);
        rxdesc->callback = dw_spi_dma_done;
        rxdesc->callback_param = dws;
 
index 3782cdb782a85ebeb1d93d312ec14c5c06e4dfa5..491072cb5ba01c9702b5dca691c55d1ffd01a003 100644 (file)
@@ -171,12 +171,6 @@ struct dma_interleaved_template {
  * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
  *  acknowledges receipt, i.e. has has a chance to establish any dependency
  *  chains
- * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
- * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
- * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
- *     (if not set, do the source dma-unmapping as page)
- * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
- *     (if not set, do the destination dma-unmapping as page)
  * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
  * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
  * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
@@ -188,14 +182,10 @@ struct dma_interleaved_template {
 enum dma_ctrl_flags {
        DMA_PREP_INTERRUPT = (1 << 0),
        DMA_CTRL_ACK = (1 << 1),
-       DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
-       DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
-       DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
-       DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
-       DMA_PREP_PQ_DISABLE_P = (1 << 6),
-       DMA_PREP_PQ_DISABLE_Q = (1 << 7),
-       DMA_PREP_CONTINUE = (1 << 8),
-       DMA_PREP_FENCE = (1 << 9),
+       DMA_PREP_PQ_DISABLE_P = (1 << 2),
+       DMA_PREP_PQ_DISABLE_Q = (1 << 3),
+       DMA_PREP_CONTINUE = (1 << 4),
+       DMA_PREP_FENCE = (1 << 5),
 };
 
 /**