Merge branch 'drm-patches' of master.kernel.org:/pub/scm/linux/kernel/git/airlied...
[linux-drm-fsl-dcu.git] / arch / mips / mm / dma-default.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
8  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9  */
10
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
16
17 #include <asm/cache.h>
18 #include <asm/io.h>
19
20 #include <dma-coherence.h>
21
22 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
23 {
24         unsigned long addr = plat_dma_addr_to_phys(dma_addr);
25
26         return (unsigned long)phys_to_virt(addr);
27 }
28
29 /*
30  * Warning on the terminology - Linux calls an uncached area coherent;
31  * MIPS terminology calls memory areas with hardware maintained coherency
32  * coherent.
33  */
34
35 static inline int cpu_is_noncoherent_r10000(struct device *dev)
36 {
37         return !plat_device_is_coherent(dev) &&
38                (current_cpu_data.cputype == CPU_R10000 &&
39                current_cpu_data.cputype == CPU_R12000);
40 }
41
42 void *dma_alloc_noncoherent(struct device *dev, size_t size,
43         dma_addr_t * dma_handle, gfp_t gfp)
44 {
45         void *ret;
46
47         /* ignore region specifiers */
48         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
49
50         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
51                 gfp |= GFP_DMA;
52         ret = (void *) __get_free_pages(gfp, get_order(size));
53
54         if (ret != NULL) {
55                 memset(ret, 0, size);
56                 *dma_handle = plat_map_dma_mem(dev, ret, size);
57         }
58
59         return ret;
60 }
61
62 EXPORT_SYMBOL(dma_alloc_noncoherent);
63
64 void *dma_alloc_coherent(struct device *dev, size_t size,
65         dma_addr_t * dma_handle, gfp_t gfp)
66 {
67         void *ret;
68
69         /* ignore region specifiers */
70         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
71
72         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
73                 gfp |= GFP_DMA;
74         ret = (void *) __get_free_pages(gfp, get_order(size));
75
76         if (ret) {
77                 memset(ret, 0, size);
78                 *dma_handle = plat_map_dma_mem(dev, ret, size);
79
80                 if (!plat_device_is_coherent(dev)) {
81                         dma_cache_wback_inv((unsigned long) ret, size);
82                         ret = UNCAC_ADDR(ret);
83                 }
84         }
85
86         return ret;
87 }
88
89 EXPORT_SYMBOL(dma_alloc_coherent);
90
91 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
92         dma_addr_t dma_handle)
93 {
94         free_pages((unsigned long) vaddr, get_order(size));
95 }
96
97 EXPORT_SYMBOL(dma_free_noncoherent);
98
99 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
100         dma_addr_t dma_handle)
101 {
102         unsigned long addr = (unsigned long) vaddr;
103
104         if (!plat_device_is_coherent(dev))
105                 addr = CAC_ADDR(addr);
106
107         free_pages(addr, get_order(size));
108 }
109
110 EXPORT_SYMBOL(dma_free_coherent);
111
112 static inline void __dma_sync(unsigned long addr, size_t size,
113         enum dma_data_direction direction)
114 {
115         switch (direction) {
116         case DMA_TO_DEVICE:
117                 dma_cache_wback(addr, size);
118                 break;
119
120         case DMA_FROM_DEVICE:
121                 dma_cache_inv(addr, size);
122                 break;
123
124         case DMA_BIDIRECTIONAL:
125                 dma_cache_wback_inv(addr, size);
126                 break;
127
128         default:
129                 BUG();
130         }
131 }
132
133 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
134         enum dma_data_direction direction)
135 {
136         unsigned long addr = (unsigned long) ptr;
137
138         if (!plat_device_is_coherent(dev))
139                 __dma_sync(addr, size, direction);
140
141         return plat_map_dma_mem(dev, ptr, size);
142 }
143
144 EXPORT_SYMBOL(dma_map_single);
145
146 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
147         enum dma_data_direction direction)
148 {
149         if (cpu_is_noncoherent_r10000(dev))
150                 __dma_sync(dma_addr_to_virt(dma_addr), size,
151                            direction);
152
153         plat_unmap_dma_mem(dma_addr);
154 }
155
156 EXPORT_SYMBOL(dma_unmap_single);
157
158 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
159         enum dma_data_direction direction)
160 {
161         int i;
162
163         BUG_ON(direction == DMA_NONE);
164
165         for (i = 0; i < nents; i++, sg++) {
166                 unsigned long addr;
167
168                 addr = (unsigned long) page_address(sg->page);
169                 if (!plat_device_is_coherent(dev) && addr)
170                         __dma_sync(addr + sg->offset, sg->length, direction);
171                 sg->dma_address = plat_map_dma_mem_page(dev, sg->page) +
172                                   sg->offset;
173         }
174
175         return nents;
176 }
177
178 EXPORT_SYMBOL(dma_map_sg);
179
180 dma_addr_t dma_map_page(struct device *dev, struct page *page,
181         unsigned long offset, size_t size, enum dma_data_direction direction)
182 {
183         BUG_ON(direction == DMA_NONE);
184
185         if (!plat_device_is_coherent(dev)) {
186                 unsigned long addr;
187
188                 addr = (unsigned long) page_address(page) + offset;
189                 dma_cache_wback_inv(addr, size);
190         }
191
192         return plat_map_dma_mem_page(dev, page) + offset;
193 }
194
195 EXPORT_SYMBOL(dma_map_page);
196
197 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
198         enum dma_data_direction direction)
199 {
200         BUG_ON(direction == DMA_NONE);
201
202         if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
203                 unsigned long addr;
204
205                 addr = plat_dma_addr_to_phys(dma_address);
206                 dma_cache_wback_inv(addr, size);
207         }
208
209         plat_unmap_dma_mem(dma_address);
210 }
211
212 EXPORT_SYMBOL(dma_unmap_page);
213
214 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
215         enum dma_data_direction direction)
216 {
217         unsigned long addr;
218         int i;
219
220         BUG_ON(direction == DMA_NONE);
221
222         for (i = 0; i < nhwentries; i++, sg++) {
223                 if (!plat_device_is_coherent(dev) &&
224                     direction != DMA_TO_DEVICE) {
225                         addr = (unsigned long) page_address(sg->page);
226                         if (addr)
227                                 __dma_sync(addr + sg->offset, sg->length,
228                                            direction);
229                 }
230                 plat_unmap_dma_mem(sg->dma_address);
231         }
232 }
233
234 EXPORT_SYMBOL(dma_unmap_sg);
235
236 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
237         size_t size, enum dma_data_direction direction)
238 {
239         BUG_ON(direction == DMA_NONE);
240
241         if (cpu_is_noncoherent_r10000(dev)) {
242                 unsigned long addr;
243
244                 addr = dma_addr_to_virt(dma_handle);
245                 __dma_sync(addr, size, direction);
246         }
247 }
248
249 EXPORT_SYMBOL(dma_sync_single_for_cpu);
250
251 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
252         size_t size, enum dma_data_direction direction)
253 {
254         BUG_ON(direction == DMA_NONE);
255
256         if (!plat_device_is_coherent(dev)) {
257                 unsigned long addr;
258
259                 addr = dma_addr_to_virt(dma_handle);
260                 __dma_sync(addr, size, direction);
261         }
262 }
263
264 EXPORT_SYMBOL(dma_sync_single_for_device);
265
266 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
267         unsigned long offset, size_t size, enum dma_data_direction direction)
268 {
269         BUG_ON(direction == DMA_NONE);
270
271         if (cpu_is_noncoherent_r10000(dev)) {
272                 unsigned long addr;
273
274                 addr = dma_addr_to_virt(dma_handle);
275                 __dma_sync(addr + offset, size, direction);
276         }
277 }
278
279 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
280
281 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
282         unsigned long offset, size_t size, enum dma_data_direction direction)
283 {
284         BUG_ON(direction == DMA_NONE);
285
286         if (!plat_device_is_coherent(dev)) {
287                 unsigned long addr;
288
289                 addr = dma_addr_to_virt(dma_handle);
290                 __dma_sync(addr + offset, size, direction);
291         }
292 }
293
294 EXPORT_SYMBOL(dma_sync_single_range_for_device);
295
296 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
297         enum dma_data_direction direction)
298 {
299         int i;
300
301         BUG_ON(direction == DMA_NONE);
302
303         /* Make sure that gcc doesn't leave the empty loop body.  */
304         for (i = 0; i < nelems; i++, sg++) {
305                 if (cpu_is_noncoherent_r10000(dev))
306                         __dma_sync((unsigned long)page_address(sg->page),
307                                    sg->length, direction);
308                 plat_unmap_dma_mem(sg->dma_address);
309         }
310 }
311
312 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
313
314 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
315         enum dma_data_direction direction)
316 {
317         int i;
318
319         BUG_ON(direction == DMA_NONE);
320
321         /* Make sure that gcc doesn't leave the empty loop body.  */
322         for (i = 0; i < nelems; i++, sg++) {
323                 if (!plat_device_is_coherent(dev))
324                         __dma_sync((unsigned long)page_address(sg->page),
325                                    sg->length, direction);
326                 plat_unmap_dma_mem(sg->dma_address);
327         }
328 }
329
330 EXPORT_SYMBOL(dma_sync_sg_for_device);
331
332 int dma_mapping_error(dma_addr_t dma_addr)
333 {
334         return 0;
335 }
336
337 EXPORT_SYMBOL(dma_mapping_error);
338
339 int dma_supported(struct device *dev, u64 mask)
340 {
341         /*
342          * we fall back to GFP_DMA when the mask isn't all 1s,
343          * so we can't guarantee allocations that must be
344          * within a tighter range than GFP_DMA..
345          */
346         if (mask < 0x00ffffff)
347                 return 0;
348
349         return 1;
350 }
351
352 EXPORT_SYMBOL(dma_supported);
353
354 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
355 {
356         return plat_device_is_coherent(dev);
357 }
358
359 EXPORT_SYMBOL(dma_is_consistent);
360
361 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
362                enum dma_data_direction direction)
363 {
364         BUG_ON(direction == DMA_NONE);
365
366         if (!plat_device_is_coherent(dev))
367                 dma_cache_wback_inv((unsigned long)vaddr, size);
368 }
369
370 EXPORT_SYMBOL(dma_cache_sync);