Merge remote-tracking branches 'asoc/fix/adsp', 'asoc/fix/arizona', 'asoc/fix/atmel...
[linux-drm-fsl-dcu.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
1 /*
2  * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
23
24 static int debug;
25 module_param(debug, int, 0644);
26
27 #define dprintk(level, fmt, arg...)                                     \
28         do {                                                            \
29                 if (debug >= level)                                     \
30                         printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);  \
31         } while (0)
32
33 struct vb2_dma_sg_buf {
34         void                            *vaddr;
35         struct page                     **pages;
36         int                             write;
37         int                             offset;
38         struct sg_table                 sg_table;
39         size_t                          size;
40         unsigned int                    num_pages;
41         atomic_t                        refcount;
42         struct vb2_vmarea_handler       handler;
43 };
44
45 static void vb2_dma_sg_put(void *buf_priv);
46
47 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
48                 gfp_t gfp_flags)
49 {
50         unsigned int last_page = 0;
51         int size = buf->size;
52
53         while (size > 0) {
54                 struct page *pages;
55                 int order;
56                 int i;
57
58                 order = get_order(size);
59                 /* Dont over allocate*/
60                 if ((PAGE_SIZE << order) > size)
61                         order--;
62
63                 pages = NULL;
64                 while (!pages) {
65                         pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
66                                         __GFP_NOWARN | gfp_flags, order);
67                         if (pages)
68                                 break;
69
70                         if (order == 0) {
71                                 while (last_page--)
72                                         __free_page(buf->pages[last_page]);
73                                 return -ENOMEM;
74                         }
75                         order--;
76                 }
77
78                 split_page(pages, order);
79                 for (i = 0; i < (1 << order); i++)
80                         buf->pages[last_page++] = &pages[i];
81
82                 size -= PAGE_SIZE << order;
83         }
84
85         return 0;
86 }
87
88 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
89 {
90         struct vb2_dma_sg_buf *buf;
91         int ret;
92         int num_pages;
93
94         buf = kzalloc(sizeof *buf, GFP_KERNEL);
95         if (!buf)
96                 return NULL;
97
98         buf->vaddr = NULL;
99         buf->write = 0;
100         buf->offset = 0;
101         buf->size = size;
102         /* size is already page aligned */
103         buf->num_pages = size >> PAGE_SHIFT;
104
105         buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
106                              GFP_KERNEL);
107         if (!buf->pages)
108                 goto fail_pages_array_alloc;
109
110         ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
111         if (ret)
112                 goto fail_pages_alloc;
113
114         ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
115                         buf->num_pages, 0, size, gfp_flags);
116         if (ret)
117                 goto fail_table_alloc;
118
119         buf->handler.refcount = &buf->refcount;
120         buf->handler.put = vb2_dma_sg_put;
121         buf->handler.arg = buf;
122
123         atomic_inc(&buf->refcount);
124
125         dprintk(1, "%s: Allocated buffer of %d pages\n",
126                 __func__, buf->num_pages);
127         return buf;
128
129 fail_table_alloc:
130         num_pages = buf->num_pages;
131         while (num_pages--)
132                 __free_page(buf->pages[num_pages]);
133 fail_pages_alloc:
134         kfree(buf->pages);
135 fail_pages_array_alloc:
136         kfree(buf);
137         return NULL;
138 }
139
140 static void vb2_dma_sg_put(void *buf_priv)
141 {
142         struct vb2_dma_sg_buf *buf = buf_priv;
143         int i = buf->num_pages;
144
145         if (atomic_dec_and_test(&buf->refcount)) {
146                 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
147                         buf->num_pages);
148                 if (buf->vaddr)
149                         vm_unmap_ram(buf->vaddr, buf->num_pages);
150                 sg_free_table(&buf->sg_table);
151                 while (--i >= 0)
152                         __free_page(buf->pages[i]);
153                 kfree(buf->pages);
154                 kfree(buf);
155         }
156 }
157
158 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
159                                     unsigned long size, int write)
160 {
161         struct vb2_dma_sg_buf *buf;
162         unsigned long first, last;
163         int num_pages_from_user;
164
165         buf = kzalloc(sizeof *buf, GFP_KERNEL);
166         if (!buf)
167                 return NULL;
168
169         buf->vaddr = NULL;
170         buf->write = write;
171         buf->offset = vaddr & ~PAGE_MASK;
172         buf->size = size;
173
174         first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
175         last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
176         buf->num_pages = last - first + 1;
177
178         buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
179                              GFP_KERNEL);
180         if (!buf->pages)
181                 goto userptr_fail_alloc_pages;
182
183         num_pages_from_user = get_user_pages(current, current->mm,
184                                              vaddr & PAGE_MASK,
185                                              buf->num_pages,
186                                              write,
187                                              1, /* force */
188                                              buf->pages,
189                                              NULL);
190
191         if (num_pages_from_user != buf->num_pages)
192                 goto userptr_fail_get_user_pages;
193
194         if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
195                         buf->num_pages, buf->offset, size, 0))
196                 goto userptr_fail_alloc_table_from_pages;
197
198         return buf;
199
200 userptr_fail_alloc_table_from_pages:
201 userptr_fail_get_user_pages:
202         dprintk(1, "get_user_pages requested/got: %d/%d]\n",
203                num_pages_from_user, buf->num_pages);
204         while (--num_pages_from_user >= 0)
205                 put_page(buf->pages[num_pages_from_user]);
206         kfree(buf->pages);
207 userptr_fail_alloc_pages:
208         kfree(buf);
209         return NULL;
210 }
211
212 /*
213  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
214  *               be used
215  */
216 static void vb2_dma_sg_put_userptr(void *buf_priv)
217 {
218         struct vb2_dma_sg_buf *buf = buf_priv;
219         int i = buf->num_pages;
220
221         dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
222                __func__, buf->num_pages);
223         if (buf->vaddr)
224                 vm_unmap_ram(buf->vaddr, buf->num_pages);
225         sg_free_table(&buf->sg_table);
226         while (--i >= 0) {
227                 if (buf->write)
228                         set_page_dirty_lock(buf->pages[i]);
229                 put_page(buf->pages[i]);
230         }
231         kfree(buf->pages);
232         kfree(buf);
233 }
234
235 static void *vb2_dma_sg_vaddr(void *buf_priv)
236 {
237         struct vb2_dma_sg_buf *buf = buf_priv;
238
239         BUG_ON(!buf);
240
241         if (!buf->vaddr)
242                 buf->vaddr = vm_map_ram(buf->pages,
243                                         buf->num_pages,
244                                         -1,
245                                         PAGE_KERNEL);
246
247         /* add offset in case userptr is not page-aligned */
248         return buf->vaddr + buf->offset;
249 }
250
251 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
252 {
253         struct vb2_dma_sg_buf *buf = buf_priv;
254
255         return atomic_read(&buf->refcount);
256 }
257
258 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
259 {
260         struct vb2_dma_sg_buf *buf = buf_priv;
261         unsigned long uaddr = vma->vm_start;
262         unsigned long usize = vma->vm_end - vma->vm_start;
263         int i = 0;
264
265         if (!buf) {
266                 printk(KERN_ERR "No memory to map\n");
267                 return -EINVAL;
268         }
269
270         do {
271                 int ret;
272
273                 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
274                 if (ret) {
275                         printk(KERN_ERR "Remapping memory, error: %d\n", ret);
276                         return ret;
277                 }
278
279                 uaddr += PAGE_SIZE;
280                 usize -= PAGE_SIZE;
281         } while (usize > 0);
282
283
284         /*
285          * Use common vm_area operations to track buffer refcount.
286          */
287         vma->vm_private_data    = &buf->handler;
288         vma->vm_ops             = &vb2_common_vm_ops;
289
290         vma->vm_ops->open(vma);
291
292         return 0;
293 }
294
295 static void *vb2_dma_sg_cookie(void *buf_priv)
296 {
297         struct vb2_dma_sg_buf *buf = buf_priv;
298
299         return &buf->sg_table;
300 }
301
302 const struct vb2_mem_ops vb2_dma_sg_memops = {
303         .alloc          = vb2_dma_sg_alloc,
304         .put            = vb2_dma_sg_put,
305         .get_userptr    = vb2_dma_sg_get_userptr,
306         .put_userptr    = vb2_dma_sg_put_userptr,
307         .vaddr          = vb2_dma_sg_vaddr,
308         .mmap           = vb2_dma_sg_mmap,
309         .num_users      = vb2_dma_sg_num_users,
310         .cookie         = vb2_dma_sg_cookie,
311 };
312 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
313
314 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
315 MODULE_AUTHOR("Andrzej Pietrasiewicz");
316 MODULE_LICENSE("GPL");