dma-mapping: turn dma_cache_sync into a dma_map_ops method
authorChristoph Hellwig <hch@lst.de>
Sun, 27 Aug 2017 08:37:15 +0000 (10:37 +0200)
committerChristoph Hellwig <hch@lst.de>
Thu, 19 Oct 2017 14:37:49 +0000 (16:37 +0200)
After we removed all the dead wood it turns out only two architectures
actually implement dma_cache_sync as a real op: mips and parisc.  Add
a cache_sync method to struct dma_map_ops and implement it for the
mips defualt DMA ops, and the parisc pa11 ops.

Note that arm, arc and openrisc support DMA_ATTR_NON_CONSISTENT, but
never provided a functional dma_cache_sync implementations, which
seems somewhat odd.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
24 files changed:
arch/alpha/include/asm/dma-mapping.h
arch/cris/include/asm/dma-mapping.h
arch/frv/include/asm/dma-mapping.h
arch/hexagon/include/asm/dma-mapping.h
arch/ia64/include/asm/dma-mapping.h
arch/m32r/include/asm/dma-mapping.h
arch/m68k/include/asm/dma-mapping.h
arch/metag/include/asm/dma-mapping.h
arch/microblaze/include/asm/dma-mapping.h
arch/mips/include/asm/dma-mapping.h
arch/mips/mm/dma-default.c
arch/mn10300/include/asm/dma-mapping.h
arch/nios2/include/asm/dma-mapping.h
arch/parisc/include/asm/dma-mapping.h
arch/parisc/kernel/pci-dma.c
arch/powerpc/include/asm/dma-mapping.h
arch/s390/include/asm/dma-mapping.h
arch/sh/include/asm/dma-mapping.h
arch/sparc/include/asm/dma-mapping.h
arch/tile/include/asm/dma-mapping.h
arch/unicore32/include/asm/dma-mapping.h
arch/x86/include/asm/dma-mapping.h
arch/xtensa/include/asm/dma-mapping.h
include/linux/dma-mapping.h

index 5d53666935e6bacf733bb4c03d46c1c241618ff0..399a4f49355eb00e6b6db05da27f723be87ebbba 100644 (file)
@@ -8,6 +8,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return dma_ops;
 }
 
-#define dma_cache_sync(dev, va, size, dir)               ((void)0)
-
 #endif /* _ALPHA_DMA_MAPPING_H */
index 256169de3743defd576df3e5f119359e228a991a..e30adde42beb25dafe76b9164110fea7d6356637 100644 (file)
@@ -16,10 +16,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 }
 #endif
 
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-              enum dma_data_direction direction)
-{
-}
-
 #endif
index da0e5c9744c4cd88c27b144a88d8d7e3c9d614cc..da24ae943f028d3671b5364b29ff4edc1f96ba42 100644 (file)
@@ -14,10 +14,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return &frv_dma_ops;
 }
 
-static inline
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                   enum dma_data_direction direction)
-{
-}
-
 #endif  /* _ASM_DMA_MAPPING_H */
index 463dbc18f853e00040cf2bc7a2a65f1f55b96016..5208de242e794b2c3045490254044348e0de6cdd 100644 (file)
@@ -37,9 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return dma_ops;
 }
 
-extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                          enum dma_data_direction direction);
-
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
        if (!dev->dma_mask)
index 99dfc1aa9d3ce2c27b8168ae862daaa0e23d3182..9e5b5df76ff8bb22086ed4110bf69e2e36a95e77 100644 (file)
@@ -44,10 +44,4 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
        return daddr;
 }
 
-static inline void
-dma_cache_sync (struct device *dev, void *vaddr, size_t size,
-       enum dma_data_direction dir)
-{
-}
-
 #endif /* _ASM_IA64_DMA_MAPPING_H */
index aff3ae8b62f76286c6739b63acc995f3584c971a..9e993daed7a063389a1fc92247b015d2b49ff64b 100644 (file)
@@ -13,11 +13,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return &dma_noop_ops;
 }
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                                 enum dma_data_direction direction)
-{
-}
-
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
        if (!dev->dma_mask)
index 9210e470771bd43e5a7be7a8df08f9c05487713b..9a0d559fcc138a75f5c855a693b33251037883a9 100644 (file)
@@ -8,10 +8,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
         return &m68k_dma_ops;
 }
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                                 enum dma_data_direction dir)
-{
-       /* we use coherent allocation, so not much to do here. */
-}
-
 #endif  /* _M68K_DMA_MAPPING_H */
index ea573be2b6d0550e46a170c8a72178953022f4f3..340265dcf839ba950de9ac457f4fc856941ce451 100644 (file)
@@ -8,14 +8,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return &metag_dma_ops;
 }
 
-/*
- * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
- * do any flushing here.
- */
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-              enum dma_data_direction direction)
-{
-}
-
 #endif
index ad448e4aedb6b799980d360c0ab49cd68eeb053d..6b9ea39405b806bb6f8771f6719b5d67b398e947 100644 (file)
@@ -25,9 +25,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return &dma_direct_ops;
 }
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-               enum dma_data_direction direction)
-{
-}
-
 #endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
index aba71385f9d15afcbdd82262df9b905c69ef9c22..6ea1439430a2d6e44967ce25dc92ba073a95e6d1 100644 (file)
@@ -26,9 +26,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 
 static inline void dma_mark_clean(void *addr, size_t size) {}
 
-extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-              enum dma_data_direction direction);
-
 #define arch_setup_dma_ops arch_setup_dma_ops
 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
                                      u64 size, const struct iommu_ops *iommu,
index c01bd20d020810901b9712dc652f283d37d976b3..2e2514e0072068685e63ca3df2c01e09f2af4a81 100644 (file)
@@ -383,7 +383,7 @@ static int mips_dma_supported(struct device *dev, u64 mask)
        return plat_dma_supported(dev, mask);
 }
 
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                         enum dma_data_direction direction)
 {
        BUG_ON(direction == DMA_NONE);
@@ -392,8 +392,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                __dma_sync_virtual(vaddr, size, direction);
 }
 
-EXPORT_SYMBOL(dma_cache_sync);
-
 static const struct dma_map_ops mips_default_dma_map_ops = {
        .alloc = mips_dma_alloc_coherent,
        .free = mips_dma_free_coherent,
@@ -407,7 +405,8 @@ static const struct dma_map_ops mips_default_dma_map_ops = {
        .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
        .sync_sg_for_device = mips_dma_sync_sg_for_device,
        .mapping_error = mips_dma_mapping_error,
-       .dma_supported = mips_dma_supported
+       .dma_supported = mips_dma_supported,
+       .cache_sync = mips_dma_cache_sync,
 };
 
 const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
index dc24163b190f46f2483d018d56ec2a57da6255f6..439e474ed6d756768d035d1d8e7146122fab9f2e 100644 (file)
@@ -18,10 +18,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return &mn10300_dma_ops;
 }
 
-static inline
-void dma_cache_sync(void *vaddr, size_t size,
-                   enum dma_data_direction direction)
-{
-}
-
 #endif
index f8dc622227414abf8852c5c0718c2f832512eb50..6ceb92251da0fae2ab2010d75d78c07bf5057038 100644 (file)
@@ -17,13 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return &nios2_dma_ops;
 }
 
-/*
- * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
- * do any flushing here.
- */
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                                 enum dma_data_direction direction)
-{
-}
-
 #endif /* _ASM_NIOS2_DMA_MAPPING_H */
index 2b16282add691d15f5a6578e16f9b8fc395f1257..cb26bbd71d8a2b5da080e112f29839605eba7ec3 100644 (file)
@@ -32,14 +32,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return hppa_dma_ops;
 }
 
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-              enum dma_data_direction direction)
-{
-       if (hppa_dma_ops->sync_single_for_cpu)
-               flush_kernel_dcache_range((unsigned long)vaddr, size);
-}
-
 static inline void *
 parisc_walk_tree(struct device *dev)
 {
index bd4c0a7471d30b1c3411d372ef027bce24476f08..f87c34cb3b439e8a0a1437e71de58c1a9546dfb8 100644 (file)
@@ -571,6 +571,12 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
                flush_kernel_vmap_range(sg_virt(sg), sg->length);
 }
 
+static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+              enum dma_data_direction direction)
+{
+       flush_kernel_dcache_range((unsigned long)vaddr, size);
+}
+
 const struct dma_map_ops pcxl_dma_ops = {
        .dma_supported =        pa11_dma_supported,
        .alloc =                pa11_dma_alloc,
@@ -583,6 +589,7 @@ const struct dma_map_ops pcxl_dma_ops = {
        .sync_single_for_device = pa11_dma_sync_single_for_device,
        .sync_sg_for_cpu =      pa11_dma_sync_sg_for_cpu,
        .sync_sg_for_device =   pa11_dma_sync_sg_for_device,
+       .cache_sync =           pa11_dma_cache_sync,
 };
 
 static void *pcx_dma_alloc(struct device *dev, size_t size,
@@ -619,4 +626,5 @@ const struct dma_map_ops pcx_dma_ops = {
        .sync_single_for_device = pa11_dma_sync_single_for_device,
        .sync_sg_for_cpu =      pa11_dma_sync_sg_for_cpu,
        .sync_sg_for_device =   pa11_dma_sync_sg_for_device,
+       .cache_sync =           pa11_dma_cache_sync,
 };
index 320846442bfb0cb311f0b125880d6b844c0f1287..2e43c2ef76323add0f040cd44996100cc7706d6e 100644 (file)
@@ -141,10 +141,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 
 #define ARCH_HAS_DMA_MMAP_COHERENT
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-               enum dma_data_direction direction)
-{
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_DMA_MAPPING_H */
index 512ad0eaa11a7f1d4e5f9ed3837f4b6962f2d0be..b17304b13de5f077c30492930c651333d5531846 100644 (file)
@@ -15,11 +15,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return &dma_noop_ops;
 }
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                                 enum dma_data_direction direction)
-{
-}
-
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
        if (!dev->dma_mask)
index b46194ecef1734be5af32c4f6cbe9c7365f6ce8a..e89df111c017803a9d15d9fb3b6d98dcbe85e0e2 100644 (file)
@@ -9,12 +9,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return dma_ops;
 }
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                   enum dma_data_direction dir)
-{
-}
-
-/* arch/sh/mm/consistent.c */
 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                        dma_addr_t *dma_addr, gfp_t flag,
                                        unsigned long attrs);
index 60bf1633d5541757af6165c4c6413226a369ba98..b298ed45cb239fe7fc83b1287f1b9f16af8eba54 100644 (file)
@@ -5,14 +5,6 @@
 #include <linux/mm.h>
 #include <linux/dma-debug.h>
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                                 enum dma_data_direction dir)
-{
-       /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
-        * routine can be a nop.
-        */
-}
-
 extern const struct dma_map_ops *dma_ops;
 extern const struct dma_map_ops pci32_dma_ops;
 
index 7061dc8af43af0681208f0fef5b769a99348f346..97ad6287829045b7d0f465f8ca5c0f11324e2e4d 100644 (file)
@@ -67,13 +67,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 #define HAVE_ARCH_DMA_SET_MASK 1
 int dma_set_mask(struct device *dev, u64 mask);
 
-/*
- * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
- * do any flushing here.
- */
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                                 enum dma_data_direction direction)
-{
-}
-
 #endif /* _ASM_TILE_DMA_MAPPING_H */
index e949855bb7944ceda4cba2e729f3a210ccd225d7..ac608c2f6af6a8d713d091018b0bf7fb3b41324b 100644 (file)
@@ -45,10 +45,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 
 static inline void dma_mark_clean(void *addr, size_t size) {}
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr,
-               size_t size, enum dma_data_direction direction)
-{
-}
-
 #endif /* __KERNEL__ */
 #endif
index c6d3367be916842907d36c62ca451fe61088de90..c26747340ba5be244cdaa17ea988ed8aa9ab5b2e 100644 (file)
@@ -67,12 +67,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 }
 #endif /* CONFIG_X86_DMA_REMAP */
 
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-       enum dma_data_direction dir)
-{
-}
-
 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
                                                    gfp_t gfp)
 {
index 353e0314d6ba39aa4c031faf016760353e89c373..153bf23709885e56b27d050f2445510a8a5dc05e 100644 (file)
@@ -23,11 +23,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
        return &xtensa_dma_map_ops;
 }
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                   enum dma_data_direction direction)
-{
-}
-
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
        return (dma_addr_t)paddr;
index 29ce9815da87fcf436405fff2c1bdfddd6847e4b..028a375d240d8a64aec1e0e881a88f4b287438e5 100644 (file)
@@ -126,6 +126,8 @@ struct dma_map_ops {
        void (*sync_sg_for_device)(struct device *dev,
                                   struct scatterlist *sg, int nents,
                                   enum dma_data_direction dir);
+       void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
+                       enum dma_data_direction direction);
        int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
        int (*dma_supported)(struct device *dev, u64 mask);
 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
@@ -436,6 +438,17 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
 
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+               enum dma_data_direction dir)
+{
+       const struct dma_map_ops *ops = get_dma_ops(dev);
+
+       BUG_ON(!valid_dma_direction(dir));
+       if (ops->cache_sync)
+               ops->cache_sync(dev, vaddr, size, dir);
+}
+
 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
                           void *cpu_addr, dma_addr_t dma_addr, size_t size);