Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-drm-fsl-dcu.git] / drivers / char / drm / drm_bufs.c
1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
40 {
41         return pci_resource_start(dev->pdev, resource);
42 }
43 EXPORT_SYMBOL(drm_get_resource_start);
44
45 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
46 {
47         return pci_resource_len(dev->pdev, resource);
48 }
49
50 EXPORT_SYMBOL(drm_get_resource_len);
51
52 static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
53                                              drm_local_map_t *map)
54 {
55         struct list_head *list;
56
57         list_for_each(list, &dev->maplist->head) {
58                 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
59                 if (entry->map && map->type == entry->map->type &&
60                     entry->map->offset == map->offset) {
61                         return entry;
62                 }
63         }
64
65         return NULL;
66 }
67
68 static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
69                           unsigned long user_token, int hashed_handle)
70 {
71         int use_hashed_handle;
72 #if (BITS_PER_LONG == 64)
73         use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
74 #elif (BITS_PER_LONG == 32)
75         use_hashed_handle = hashed_handle;
76 #else
77 #error Unsupported long size. Neither 64 nor 32 bits.
78 #endif
79
80         if (!use_hashed_handle) {
81                 int ret;
82                 hash->key = user_token >> PAGE_SHIFT;
83                 ret = drm_ht_insert_item(&dev->map_hash, hash);
84                 if (ret != -EINVAL)
85                         return ret;
86         }
87         return drm_ht_just_insert_please(&dev->map_hash, hash,
88                                          user_token, 32 - PAGE_SHIFT - 3,
89                                          0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
90 }
91
92 /**
93  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
94  *
95  * \param inode device inode.
96  * \param filp file pointer.
97  * \param cmd command.
98  * \param arg pointer to a drm_map structure.
99  * \return zero on success or a negative value on error.
100  *
101  * Adjusts the memory offset to its absolute value according to the mapping
102  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
103  * applicable and if supported by the kernel.
104  */
105 static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
106                            unsigned int size, drm_map_type_t type,
107                            drm_map_flags_t flags, drm_map_list_t ** maplist)
108 {
109         drm_map_t *map;
110         drm_map_list_t *list;
111         drm_dma_handle_t *dmah;
112         unsigned long user_token;
113         int ret;
114
115         map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
116         if (!map)
117                 return -ENOMEM;
118
119         map->offset = offset;
120         map->size = size;
121         map->flags = flags;
122         map->type = type;
123
124         /* Only allow shared memory to be removable since we only keep enough
125          * book keeping information about shared memory to allow for removal
126          * when processes fork.
127          */
128         if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
129                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
130                 return -EINVAL;
131         }
132         DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
133                   map->offset, map->size, map->type);
134         if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
135                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
136                 return -EINVAL;
137         }
138         map->mtrr = -1;
139         map->handle = NULL;
140
141         switch (map->type) {
142         case _DRM_REGISTERS:
143         case _DRM_FRAME_BUFFER:
144 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
145                 if (map->offset + (map->size-1) < map->offset ||
146                     map->offset < virt_to_phys(high_memory)) {
147                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
148                         return -EINVAL;
149                 }
150 #endif
151 #ifdef __alpha__
152                 map->offset += dev->hose->mem_space->start;
153 #endif
154                 /* Some drivers preinitialize some maps, without the X Server
155                  * needing to be aware of it.  Therefore, we just return success
156                  * when the server tries to create a duplicate map.
157                  */
158                 list = drm_find_matching_map(dev, map);
159                 if (list != NULL) {
160                         if (list->map->size != map->size) {
161                                 DRM_DEBUG("Matching maps of type %d with "
162                                           "mismatched sizes, (%ld vs %ld)\n",
163                                           map->type, map->size,
164                                           list->map->size);
165                                 list->map->size = map->size;
166                         }
167
168                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
169                         *maplist = list;
170                         return 0;
171                 }
172
173                 if (drm_core_has_MTRR(dev)) {
174                         if (map->type == _DRM_FRAME_BUFFER ||
175                             (map->flags & _DRM_WRITE_COMBINING)) {
176                                 map->mtrr = mtrr_add(map->offset, map->size,
177                                                      MTRR_TYPE_WRCOMB, 1);
178                         }
179                 }
180                 if (map->type == _DRM_REGISTERS)
181                         map->handle = ioremap(map->offset, map->size);
182                 break;
183
184         case _DRM_SHM:
185                 map->handle = vmalloc_user(map->size);
186                 DRM_DEBUG("%lu %d %p\n",
187                           map->size, drm_order(map->size), map->handle);
188                 if (!map->handle) {
189                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
190                         return -ENOMEM;
191                 }
192                 map->offset = (unsigned long)map->handle;
193                 if (map->flags & _DRM_CONTAINS_LOCK) {
194                         /* Prevent a 2nd X Server from creating a 2nd lock */
195                         if (dev->lock.hw_lock != NULL) {
196                                 vfree(map->handle);
197                                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
198                                 return -EBUSY;
199                         }
200                         dev->sigdata.lock = dev->lock.hw_lock = map->handle;    /* Pointer to lock */
201                 }
202                 break;
203         case _DRM_AGP:
204                 if (drm_core_has_AGP(dev)) {
205 #ifdef __alpha__
206                         map->offset += dev->hose->mem_space->start;
207 #endif
208                         map->offset += dev->agp->base;
209                         map->mtrr = dev->agp->agp_mtrr; /* for getmap */
210                 }
211                 break;
212         case _DRM_SCATTER_GATHER:
213                 if (!dev->sg) {
214                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
215                         return -EINVAL;
216                 }
217                 map->offset += (unsigned long)dev->sg->virtual;
218                 break;
219         case _DRM_CONSISTENT:
220                 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
221                  * As we're limiting the address to 2^32-1 (or less),
222                  * casting it down to 32 bits is no problem, but we
223                  * need to point to a 64bit variable first. */
224                 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
225                 if (!dmah) {
226                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
227                         return -ENOMEM;
228                 }
229                 map->handle = dmah->vaddr;
230                 map->offset = (unsigned long)dmah->busaddr;
231                 kfree(dmah);
232                 break;
233         default:
234                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
235                 return -EINVAL;
236         }
237
238         list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
239         if (!list) {
240                 if (map->type == _DRM_REGISTERS)
241                         iounmap(map->handle);
242                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
243                 return -EINVAL;
244         }
245         memset(list, 0, sizeof(*list));
246         list->map = map;
247
248         mutex_lock(&dev->struct_mutex);
249         list_add(&list->head, &dev->maplist->head);
250
251         /* Assign a 32-bit handle */
252         /* We do it here so that dev->struct_mutex protects the increment */
253         user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
254                 map->offset;
255         ret = drm_map_handle(dev, &list->hash, user_token, 0);
256         if (ret) {
257                 if (map->type == _DRM_REGISTERS)
258                         iounmap(map->handle);
259                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
260                 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
261                 mutex_unlock(&dev->struct_mutex);
262                 return ret;
263         }
264
265         list->user_token = list->hash.key << PAGE_SHIFT;
266         mutex_unlock(&dev->struct_mutex);
267
268         *maplist = list;
269         return 0;
270 }
271
272 int drm_addmap(drm_device_t * dev, unsigned int offset,
273                unsigned int size, drm_map_type_t type,
274                drm_map_flags_t flags, drm_local_map_t ** map_ptr)
275 {
276         drm_map_list_t *list;
277         int rc;
278
279         rc = drm_addmap_core(dev, offset, size, type, flags, &list);
280         if (!rc)
281                 *map_ptr = list->map;
282         return rc;
283 }
284
285 EXPORT_SYMBOL(drm_addmap);
286
287 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
288                      unsigned int cmd, unsigned long arg)
289 {
290         drm_file_t *priv = filp->private_data;
291         drm_device_t *dev = priv->head->dev;
292         drm_map_t map;
293         drm_map_list_t *maplist;
294         drm_map_t __user *argp = (void __user *)arg;
295         int err;
296
297         if (!(filp->f_mode & 3))
298                 return -EACCES; /* Require read/write */
299
300         if (copy_from_user(&map, argp, sizeof(map))) {
301                 return -EFAULT;
302         }
303
304         if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
305                 return -EPERM;
306
307         err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
308                               &maplist);
309
310         if (err)
311                 return err;
312
313         if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
314                 return -EFAULT;
315
316         /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
317         if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
318                 return -EFAULT;
319         return 0;
320 }
321
322 /**
323  * Remove a map private from list and deallocate resources if the mapping
324  * isn't in use.
325  *
326  * \param inode device inode.
327  * \param filp file pointer.
328  * \param cmd command.
329  * \param arg pointer to a drm_map_t structure.
330  * \return zero on success or a negative value on error.
331  *
332  * Searches the map on drm_device::maplist, removes it from the list, see if
333  * its being used, and free any associate resource (such as MTRR's) if it's not
334  * being on use.
335  *
336  * \sa drm_addmap
337  */
338 int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
339 {
340         struct list_head *list;
341         drm_map_list_t *r_list = NULL;
342         drm_dma_handle_t dmah;
343
344         /* Find the list entry for the map and remove it */
345         list_for_each(list, &dev->maplist->head) {
346                 r_list = list_entry(list, drm_map_list_t, head);
347
348                 if (r_list->map == map) {
349                         list_del(list);
350                         drm_ht_remove_key(&dev->map_hash,
351                                           r_list->user_token >> PAGE_SHIFT);
352                         drm_free(list, sizeof(*list), DRM_MEM_MAPS);
353                         break;
354                 }
355         }
356
357         /* List has wrapped around to the head pointer, or it's empty and we
358          * didn't find anything.
359          */
360         if (list == (&dev->maplist->head)) {
361                 return -EINVAL;
362         }
363
364         switch (map->type) {
365         case _DRM_REGISTERS:
366                 iounmap(map->handle);
367                 /* FALLTHROUGH */
368         case _DRM_FRAME_BUFFER:
369                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
370                         int retcode;
371                         retcode = mtrr_del(map->mtrr, map->offset, map->size);
372                         DRM_DEBUG("mtrr_del=%d\n", retcode);
373                 }
374                 break;
375         case _DRM_SHM:
376                 vfree(map->handle);
377                 break;
378         case _DRM_AGP:
379         case _DRM_SCATTER_GATHER:
380                 break;
381         case _DRM_CONSISTENT:
382                 dmah.vaddr = map->handle;
383                 dmah.busaddr = map->offset;
384                 dmah.size = map->size;
385                 __drm_pci_free(dev, &dmah);
386                 break;
387         }
388         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
389
390         return 0;
391 }
392
393 int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
394 {
395         int ret;
396
397         mutex_lock(&dev->struct_mutex);
398         ret = drm_rmmap_locked(dev, map);
399         mutex_unlock(&dev->struct_mutex);
400
401         return ret;
402 }
403
404 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
405  * the last close of the device, and this is necessary for cleanup when things
406  * exit uncleanly.  Therefore, having userland manually remove mappings seems
407  * like a pointless exercise since they're going away anyway.
408  *
409  * One use case might be after addmap is allowed for normal users for SHM and
410  * gets used by drivers that the server doesn't need to care about.  This seems
411  * unlikely.
412  */
413 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
414                     unsigned int cmd, unsigned long arg)
415 {
416         drm_file_t *priv = filp->private_data;
417         drm_device_t *dev = priv->head->dev;
418         drm_map_t request;
419         drm_local_map_t *map = NULL;
420         struct list_head *list;
421         int ret;
422
423         if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
424                 return -EFAULT;
425         }
426
427         mutex_lock(&dev->struct_mutex);
428         list_for_each(list, &dev->maplist->head) {
429                 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
430
431                 if (r_list->map &&
432                     r_list->user_token == (unsigned long)request.handle &&
433                     r_list->map->flags & _DRM_REMOVABLE) {
434                         map = r_list->map;
435                         break;
436                 }
437         }
438
439         /* List has wrapped around to the head pointer, or its empty we didn't
440          * find anything.
441          */
442         if (list == (&dev->maplist->head)) {
443                 mutex_unlock(&dev->struct_mutex);
444                 return -EINVAL;
445         }
446
447         if (!map) {
448                 mutex_unlock(&dev->struct_mutex);
449                 return -EINVAL;
450         }
451
452         /* Register and framebuffer maps are permanent */
453         if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
454                 mutex_unlock(&dev->struct_mutex);
455                 return 0;
456         }
457
458         ret = drm_rmmap_locked(dev, map);
459
460         mutex_unlock(&dev->struct_mutex);
461
462         return ret;
463 }
464
465 /**
466  * Cleanup after an error on one of the addbufs() functions.
467  *
468  * \param dev DRM device.
469  * \param entry buffer entry where the error occurred.
470  *
471  * Frees any pages and buffers associated with the given entry.
472  */
473 static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
474 {
475         int i;
476
477         if (entry->seg_count) {
478                 for (i = 0; i < entry->seg_count; i++) {
479                         if (entry->seglist[i]) {
480                                 drm_pci_free(dev, entry->seglist[i]);
481                         }
482                 }
483                 drm_free(entry->seglist,
484                          entry->seg_count *
485                          sizeof(*entry->seglist), DRM_MEM_SEGS);
486
487                 entry->seg_count = 0;
488         }
489
490         if (entry->buf_count) {
491                 for (i = 0; i < entry->buf_count; i++) {
492                         if (entry->buflist[i].dev_private) {
493                                 drm_free(entry->buflist[i].dev_private,
494                                          entry->buflist[i].dev_priv_size,
495                                          DRM_MEM_BUFS);
496                         }
497                 }
498                 drm_free(entry->buflist,
499                          entry->buf_count *
500                          sizeof(*entry->buflist), DRM_MEM_BUFS);
501
502                 entry->buf_count = 0;
503         }
504 }
505
506 #if __OS_HAS_AGP
507 /**
508  * Add AGP buffers for DMA transfers.
509  *
510  * \param dev drm_device_t to which the buffers are to be added.
511  * \param request pointer to a drm_buf_desc_t describing the request.
512  * \return zero on success or a negative number on failure.
513  *
514  * After some sanity checks creates a drm_buf structure for each buffer and
515  * reallocates the buffer list of the same size order to accommodate the new
516  * buffers.
517  */
518 int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
519 {
520         drm_device_dma_t *dma = dev->dma;
521         drm_buf_entry_t *entry;
522         drm_buf_t *buf;
523         unsigned long offset;
524         unsigned long agp_offset;
525         int count;
526         int order;
527         int size;
528         int alignment;
529         int page_order;
530         int total;
531         int byte_count;
532         int i;
533         drm_buf_t **temp_buflist;
534
535         if (!dma)
536                 return -EINVAL;
537
538         count = request->count;
539         order = drm_order(request->size);
540         size = 1 << order;
541
542         alignment = (request->flags & _DRM_PAGE_ALIGN)
543             ? PAGE_ALIGN(size) : size;
544         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
545         total = PAGE_SIZE << page_order;
546
547         byte_count = 0;
548         agp_offset = dev->agp->base + request->agp_start;
549
550         DRM_DEBUG("count:      %d\n", count);
551         DRM_DEBUG("order:      %d\n", order);
552         DRM_DEBUG("size:       %d\n", size);
553         DRM_DEBUG("agp_offset: %lx\n", agp_offset);
554         DRM_DEBUG("alignment:  %d\n", alignment);
555         DRM_DEBUG("page_order: %d\n", page_order);
556         DRM_DEBUG("total:      %d\n", total);
557
558         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
559                 return -EINVAL;
560         if (dev->queue_count)
561                 return -EBUSY;  /* Not while in use */
562
563         spin_lock(&dev->count_lock);
564         if (dev->buf_use) {
565                 spin_unlock(&dev->count_lock);
566                 return -EBUSY;
567         }
568         atomic_inc(&dev->buf_alloc);
569         spin_unlock(&dev->count_lock);
570
571         mutex_lock(&dev->struct_mutex);
572         entry = &dma->bufs[order];
573         if (entry->buf_count) {
574                 mutex_unlock(&dev->struct_mutex);
575                 atomic_dec(&dev->buf_alloc);
576                 return -ENOMEM; /* May only call once for each order */
577         }
578
579         if (count < 0 || count > 4096) {
580                 mutex_unlock(&dev->struct_mutex);
581                 atomic_dec(&dev->buf_alloc);
582                 return -EINVAL;
583         }
584
585         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
586                                    DRM_MEM_BUFS);
587         if (!entry->buflist) {
588                 mutex_unlock(&dev->struct_mutex);
589                 atomic_dec(&dev->buf_alloc);
590                 return -ENOMEM;
591         }
592         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
593
594         entry->buf_size = size;
595         entry->page_order = page_order;
596
597         offset = 0;
598
599         while (entry->buf_count < count) {
600                 buf = &entry->buflist[entry->buf_count];
601                 buf->idx = dma->buf_count + entry->buf_count;
602                 buf->total = alignment;
603                 buf->order = order;
604                 buf->used = 0;
605
606                 buf->offset = (dma->byte_count + offset);
607                 buf->bus_address = agp_offset + offset;
608                 buf->address = (void *)(agp_offset + offset);
609                 buf->next = NULL;
610                 buf->waiting = 0;
611                 buf->pending = 0;
612                 init_waitqueue_head(&buf->dma_wait);
613                 buf->filp = NULL;
614
615                 buf->dev_priv_size = dev->driver->dev_priv_size;
616                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
617                 if (!buf->dev_private) {
618                         /* Set count correctly so we free the proper amount. */
619                         entry->buf_count = count;
620                         drm_cleanup_buf_error(dev, entry);
621                         mutex_unlock(&dev->struct_mutex);
622                         atomic_dec(&dev->buf_alloc);
623                         return -ENOMEM;
624                 }
625                 memset(buf->dev_private, 0, buf->dev_priv_size);
626
627                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
628
629                 offset += alignment;
630                 entry->buf_count++;
631                 byte_count += PAGE_SIZE << page_order;
632         }
633
634         DRM_DEBUG("byte_count: %d\n", byte_count);
635
636         temp_buflist = drm_realloc(dma->buflist,
637                                    dma->buf_count * sizeof(*dma->buflist),
638                                    (dma->buf_count + entry->buf_count)
639                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
640         if (!temp_buflist) {
641                 /* Free the entry because it isn't valid */
642                 drm_cleanup_buf_error(dev, entry);
643                 mutex_unlock(&dev->struct_mutex);
644                 atomic_dec(&dev->buf_alloc);
645                 return -ENOMEM;
646         }
647         dma->buflist = temp_buflist;
648
649         for (i = 0; i < entry->buf_count; i++) {
650                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
651         }
652
653         dma->buf_count += entry->buf_count;
654         dma->seg_count += entry->seg_count;
655         dma->page_count += byte_count >> PAGE_SHIFT;
656         dma->byte_count += byte_count;
657
658         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
659         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
660
661         mutex_unlock(&dev->struct_mutex);
662
663         request->count = entry->buf_count;
664         request->size = size;
665
666         dma->flags = _DRM_DMA_USE_AGP;
667
668         atomic_dec(&dev->buf_alloc);
669         return 0;
670 }
671 EXPORT_SYMBOL(drm_addbufs_agp);
672 #endif                          /* __OS_HAS_AGP */
673
674 int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
675 {
676         drm_device_dma_t *dma = dev->dma;
677         int count;
678         int order;
679         int size;
680         int total;
681         int page_order;
682         drm_buf_entry_t *entry;
683         drm_dma_handle_t *dmah;
684         drm_buf_t *buf;
685         int alignment;
686         unsigned long offset;
687         int i;
688         int byte_count;
689         int page_count;
690         unsigned long *temp_pagelist;
691         drm_buf_t **temp_buflist;
692
693         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
694                 return -EINVAL;
695
696         if (!dma)
697                 return -EINVAL;
698
699         if (!capable(CAP_SYS_ADMIN))
700                 return -EPERM;
701
702         count = request->count;
703         order = drm_order(request->size);
704         size = 1 << order;
705
706         DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
707                   request->count, request->size, size, order, dev->queue_count);
708
709         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
710                 return -EINVAL;
711         if (dev->queue_count)
712                 return -EBUSY;  /* Not while in use */
713
714         alignment = (request->flags & _DRM_PAGE_ALIGN)
715             ? PAGE_ALIGN(size) : size;
716         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
717         total = PAGE_SIZE << page_order;
718
719         spin_lock(&dev->count_lock);
720         if (dev->buf_use) {
721                 spin_unlock(&dev->count_lock);
722                 return -EBUSY;
723         }
724         atomic_inc(&dev->buf_alloc);
725         spin_unlock(&dev->count_lock);
726
727         mutex_lock(&dev->struct_mutex);
728         entry = &dma->bufs[order];
729         if (entry->buf_count) {
730                 mutex_unlock(&dev->struct_mutex);
731                 atomic_dec(&dev->buf_alloc);
732                 return -ENOMEM; /* May only call once for each order */
733         }
734
735         if (count < 0 || count > 4096) {
736                 mutex_unlock(&dev->struct_mutex);
737                 atomic_dec(&dev->buf_alloc);
738                 return -EINVAL;
739         }
740
741         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
742                                    DRM_MEM_BUFS);
743         if (!entry->buflist) {
744                 mutex_unlock(&dev->struct_mutex);
745                 atomic_dec(&dev->buf_alloc);
746                 return -ENOMEM;
747         }
748         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
749
750         entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
751                                    DRM_MEM_SEGS);
752         if (!entry->seglist) {
753                 drm_free(entry->buflist,
754                          count * sizeof(*entry->buflist), DRM_MEM_BUFS);
755                 mutex_unlock(&dev->struct_mutex);
756                 atomic_dec(&dev->buf_alloc);
757                 return -ENOMEM;
758         }
759         memset(entry->seglist, 0, count * sizeof(*entry->seglist));
760
761         /* Keep the original pagelist until we know all the allocations
762          * have succeeded
763          */
764         temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
765                                   * sizeof(*dma->pagelist), DRM_MEM_PAGES);
766         if (!temp_pagelist) {
767                 drm_free(entry->buflist,
768                          count * sizeof(*entry->buflist), DRM_MEM_BUFS);
769                 drm_free(entry->seglist,
770                          count * sizeof(*entry->seglist), DRM_MEM_SEGS);
771                 mutex_unlock(&dev->struct_mutex);
772                 atomic_dec(&dev->buf_alloc);
773                 return -ENOMEM;
774         }
775         memcpy(temp_pagelist,
776                dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
777         DRM_DEBUG("pagelist: %d entries\n",
778                   dma->page_count + (count << page_order));
779
780         entry->buf_size = size;
781         entry->page_order = page_order;
782         byte_count = 0;
783         page_count = 0;
784
785         while (entry->buf_count < count) {
786                 
787                 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
788                 
789                 if (!dmah) {
790                         /* Set count correctly so we free the proper amount. */
791                         entry->buf_count = count;
792                         entry->seg_count = count;
793                         drm_cleanup_buf_error(dev, entry);
794                         drm_free(temp_pagelist,
795                                  (dma->page_count + (count << page_order))
796                                  * sizeof(*dma->pagelist), DRM_MEM_PAGES);
797                         mutex_unlock(&dev->struct_mutex);
798                         atomic_dec(&dev->buf_alloc);
799                         return -ENOMEM;
800                 }
801                 entry->seglist[entry->seg_count++] = dmah;
802                 for (i = 0; i < (1 << page_order); i++) {
803                         DRM_DEBUG("page %d @ 0x%08lx\n",
804                                   dma->page_count + page_count,
805                                   (unsigned long)dmah->vaddr + PAGE_SIZE * i);
806                         temp_pagelist[dma->page_count + page_count++]
807                                 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
808                 }
809                 for (offset = 0;
810                      offset + size <= total && entry->buf_count < count;
811                      offset += alignment, ++entry->buf_count) {
812                         buf = &entry->buflist[entry->buf_count];
813                         buf->idx = dma->buf_count + entry->buf_count;
814                         buf->total = alignment;
815                         buf->order = order;
816                         buf->used = 0;
817                         buf->offset = (dma->byte_count + byte_count + offset);
818                         buf->address = (void *)(dmah->vaddr + offset);
819                         buf->bus_address = dmah->busaddr + offset;
820                         buf->next = NULL;
821                         buf->waiting = 0;
822                         buf->pending = 0;
823                         init_waitqueue_head(&buf->dma_wait);
824                         buf->filp = NULL;
825
826                         buf->dev_priv_size = dev->driver->dev_priv_size;
827                         buf->dev_private = drm_alloc(buf->dev_priv_size,
828                                                      DRM_MEM_BUFS);
829                         if (!buf->dev_private) {
830                                 /* Set count correctly so we free the proper amount. */
831                                 entry->buf_count = count;
832                                 entry->seg_count = count;
833                                 drm_cleanup_buf_error(dev, entry);
834                                 drm_free(temp_pagelist,
835                                          (dma->page_count +
836                                           (count << page_order))
837                                          * sizeof(*dma->pagelist),
838                                          DRM_MEM_PAGES);
839                                 mutex_unlock(&dev->struct_mutex);
840                                 atomic_dec(&dev->buf_alloc);
841                                 return -ENOMEM;
842                         }
843                         memset(buf->dev_private, 0, buf->dev_priv_size);
844
845                         DRM_DEBUG("buffer %d @ %p\n",
846                                   entry->buf_count, buf->address);
847                 }
848                 byte_count += PAGE_SIZE << page_order;
849         }
850
851         temp_buflist = drm_realloc(dma->buflist,
852                                    dma->buf_count * sizeof(*dma->buflist),
853                                    (dma->buf_count + entry->buf_count)
854                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
855         if (!temp_buflist) {
856                 /* Free the entry because it isn't valid */
857                 drm_cleanup_buf_error(dev, entry);
858                 drm_free(temp_pagelist,
859                          (dma->page_count + (count << page_order))
860                          * sizeof(*dma->pagelist), DRM_MEM_PAGES);
861                 mutex_unlock(&dev->struct_mutex);
862                 atomic_dec(&dev->buf_alloc);
863                 return -ENOMEM;
864         }
865         dma->buflist = temp_buflist;
866
867         for (i = 0; i < entry->buf_count; i++) {
868                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
869         }
870
871         /* No allocations failed, so now we can replace the orginal pagelist
872          * with the new one.
873          */
874         if (dma->page_count) {
875                 drm_free(dma->pagelist,
876                          dma->page_count * sizeof(*dma->pagelist),
877                          DRM_MEM_PAGES);
878         }
879         dma->pagelist = temp_pagelist;
880
881         dma->buf_count += entry->buf_count;
882         dma->seg_count += entry->seg_count;
883         dma->page_count += entry->seg_count << page_order;
884         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
885
886         mutex_unlock(&dev->struct_mutex);
887
888         request->count = entry->buf_count;
889         request->size = size;
890
891         if (request->flags & _DRM_PCI_BUFFER_RO)
892                 dma->flags = _DRM_DMA_USE_PCI_RO;
893
894         atomic_dec(&dev->buf_alloc);
895         return 0;
896
897 }
898 EXPORT_SYMBOL(drm_addbufs_pci);
899
900 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
901 {
902         drm_device_dma_t *dma = dev->dma;
903         drm_buf_entry_t *entry;
904         drm_buf_t *buf;
905         unsigned long offset;
906         unsigned long agp_offset;
907         int count;
908         int order;
909         int size;
910         int alignment;
911         int page_order;
912         int total;
913         int byte_count;
914         int i;
915         drm_buf_t **temp_buflist;
916
917         if (!drm_core_check_feature(dev, DRIVER_SG))
918                 return -EINVAL;
919
920         if (!dma)
921                 return -EINVAL;
922
923         if (!capable(CAP_SYS_ADMIN))
924                 return -EPERM;
925
926         count = request->count;
927         order = drm_order(request->size);
928         size = 1 << order;
929
930         alignment = (request->flags & _DRM_PAGE_ALIGN)
931             ? PAGE_ALIGN(size) : size;
932         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
933         total = PAGE_SIZE << page_order;
934
935         byte_count = 0;
936         agp_offset = request->agp_start;
937
938         DRM_DEBUG("count:      %d\n", count);
939         DRM_DEBUG("order:      %d\n", order);
940         DRM_DEBUG("size:       %d\n", size);
941         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
942         DRM_DEBUG("alignment:  %d\n", alignment);
943         DRM_DEBUG("page_order: %d\n", page_order);
944         DRM_DEBUG("total:      %d\n", total);
945
946         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
947                 return -EINVAL;
948         if (dev->queue_count)
949                 return -EBUSY;  /* Not while in use */
950
951         spin_lock(&dev->count_lock);
952         if (dev->buf_use) {
953                 spin_unlock(&dev->count_lock);
954                 return -EBUSY;
955         }
956         atomic_inc(&dev->buf_alloc);
957         spin_unlock(&dev->count_lock);
958
959         mutex_lock(&dev->struct_mutex);
960         entry = &dma->bufs[order];
961         if (entry->buf_count) {
962                 mutex_unlock(&dev->struct_mutex);
963                 atomic_dec(&dev->buf_alloc);
964                 return -ENOMEM; /* May only call once for each order */
965         }
966
967         if (count < 0 || count > 4096) {
968                 mutex_unlock(&dev->struct_mutex);
969                 atomic_dec(&dev->buf_alloc);
970                 return -EINVAL;
971         }
972
973         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
974                                    DRM_MEM_BUFS);
975         if (!entry->buflist) {
976                 mutex_unlock(&dev->struct_mutex);
977                 atomic_dec(&dev->buf_alloc);
978                 return -ENOMEM;
979         }
980         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
981
982         entry->buf_size = size;
983         entry->page_order = page_order;
984
985         offset = 0;
986
987         while (entry->buf_count < count) {
988                 buf = &entry->buflist[entry->buf_count];
989                 buf->idx = dma->buf_count + entry->buf_count;
990                 buf->total = alignment;
991                 buf->order = order;
992                 buf->used = 0;
993
994                 buf->offset = (dma->byte_count + offset);
995                 buf->bus_address = agp_offset + offset;
996                 buf->address = (void *)(agp_offset + offset
997                                         + (unsigned long)dev->sg->virtual);
998                 buf->next = NULL;
999                 buf->waiting = 0;
1000                 buf->pending = 0;
1001                 init_waitqueue_head(&buf->dma_wait);
1002                 buf->filp = NULL;
1003
1004                 buf->dev_priv_size = dev->driver->dev_priv_size;
1005                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1006                 if (!buf->dev_private) {
1007                         /* Set count correctly so we free the proper amount. */
1008                         entry->buf_count = count;
1009                         drm_cleanup_buf_error(dev, entry);
1010                         mutex_unlock(&dev->struct_mutex);
1011                         atomic_dec(&dev->buf_alloc);
1012                         return -ENOMEM;
1013                 }
1014
1015                 memset(buf->dev_private, 0, buf->dev_priv_size);
1016
1017                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1018
1019                 offset += alignment;
1020                 entry->buf_count++;
1021                 byte_count += PAGE_SIZE << page_order;
1022         }
1023
1024         DRM_DEBUG("byte_count: %d\n", byte_count);
1025
1026         temp_buflist = drm_realloc(dma->buflist,
1027                                    dma->buf_count * sizeof(*dma->buflist),
1028                                    (dma->buf_count + entry->buf_count)
1029                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1030         if (!temp_buflist) {
1031                 /* Free the entry because it isn't valid */
1032                 drm_cleanup_buf_error(dev, entry);
1033                 mutex_unlock(&dev->struct_mutex);
1034                 atomic_dec(&dev->buf_alloc);
1035                 return -ENOMEM;
1036         }
1037         dma->buflist = temp_buflist;
1038
1039         for (i = 0; i < entry->buf_count; i++) {
1040                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1041         }
1042
1043         dma->buf_count += entry->buf_count;
1044         dma->seg_count += entry->seg_count;
1045         dma->page_count += byte_count >> PAGE_SHIFT;
1046         dma->byte_count += byte_count;
1047
1048         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1049         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1050
1051         mutex_unlock(&dev->struct_mutex);
1052
1053         request->count = entry->buf_count;
1054         request->size = size;
1055
1056         dma->flags = _DRM_DMA_USE_SG;
1057
1058         atomic_dec(&dev->buf_alloc);
1059         return 0;
1060 }
1061
1062 static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1063 {
1064         drm_device_dma_t *dma = dev->dma;
1065         drm_buf_entry_t *entry;
1066         drm_buf_t *buf;
1067         unsigned long offset;
1068         unsigned long agp_offset;
1069         int count;
1070         int order;
1071         int size;
1072         int alignment;
1073         int page_order;
1074         int total;
1075         int byte_count;
1076         int i;
1077         drm_buf_t **temp_buflist;
1078
1079         if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1080                 return -EINVAL;
1081
1082         if (!dma)
1083                 return -EINVAL;
1084
1085         if (!capable(CAP_SYS_ADMIN))
1086                 return -EPERM;
1087
1088         count = request->count;
1089         order = drm_order(request->size);
1090         size = 1 << order;
1091
1092         alignment = (request->flags & _DRM_PAGE_ALIGN)
1093             ? PAGE_ALIGN(size) : size;
1094         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1095         total = PAGE_SIZE << page_order;
1096
1097         byte_count = 0;
1098         agp_offset = request->agp_start;
1099
1100         DRM_DEBUG("count:      %d\n", count);
1101         DRM_DEBUG("order:      %d\n", order);
1102         DRM_DEBUG("size:       %d\n", size);
1103         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1104         DRM_DEBUG("alignment:  %d\n", alignment);
1105         DRM_DEBUG("page_order: %d\n", page_order);
1106         DRM_DEBUG("total:      %d\n", total);
1107
1108         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1109                 return -EINVAL;
1110         if (dev->queue_count)
1111                 return -EBUSY;  /* Not while in use */
1112
1113         spin_lock(&dev->count_lock);
1114         if (dev->buf_use) {
1115                 spin_unlock(&dev->count_lock);
1116                 return -EBUSY;
1117         }
1118         atomic_inc(&dev->buf_alloc);
1119         spin_unlock(&dev->count_lock);
1120
1121         mutex_lock(&dev->struct_mutex);
1122         entry = &dma->bufs[order];
1123         if (entry->buf_count) {
1124                 mutex_unlock(&dev->struct_mutex);
1125                 atomic_dec(&dev->buf_alloc);
1126                 return -ENOMEM; /* May only call once for each order */
1127         }
1128
1129         if (count < 0 || count > 4096) {
1130                 mutex_unlock(&dev->struct_mutex);
1131                 atomic_dec(&dev->buf_alloc);
1132                 return -EINVAL;
1133         }
1134
1135         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1136                                    DRM_MEM_BUFS);
1137         if (!entry->buflist) {
1138                 mutex_unlock(&dev->struct_mutex);
1139                 atomic_dec(&dev->buf_alloc);
1140                 return -ENOMEM;
1141         }
1142         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1143
1144         entry->buf_size = size;
1145         entry->page_order = page_order;
1146
1147         offset = 0;
1148
1149         while (entry->buf_count < count) {
1150                 buf = &entry->buflist[entry->buf_count];
1151                 buf->idx = dma->buf_count + entry->buf_count;
1152                 buf->total = alignment;
1153                 buf->order = order;
1154                 buf->used = 0;
1155
1156                 buf->offset = (dma->byte_count + offset);
1157                 buf->bus_address = agp_offset + offset;
1158                 buf->address = (void *)(agp_offset + offset);
1159                 buf->next = NULL;
1160                 buf->waiting = 0;
1161                 buf->pending = 0;
1162                 init_waitqueue_head(&buf->dma_wait);
1163                 buf->filp = NULL;
1164
1165                 buf->dev_priv_size = dev->driver->dev_priv_size;
1166                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1167                 if (!buf->dev_private) {
1168                         /* Set count correctly so we free the proper amount. */
1169                         entry->buf_count = count;
1170                         drm_cleanup_buf_error(dev, entry);
1171                         mutex_unlock(&dev->struct_mutex);
1172                         atomic_dec(&dev->buf_alloc);
1173                         return -ENOMEM;
1174                 }
1175                 memset(buf->dev_private, 0, buf->dev_priv_size);
1176
1177                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1178
1179                 offset += alignment;
1180                 entry->buf_count++;
1181                 byte_count += PAGE_SIZE << page_order;
1182         }
1183
1184         DRM_DEBUG("byte_count: %d\n", byte_count);
1185
1186         temp_buflist = drm_realloc(dma->buflist,
1187                                    dma->buf_count * sizeof(*dma->buflist),
1188                                    (dma->buf_count + entry->buf_count)
1189                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1190         if (!temp_buflist) {
1191                 /* Free the entry because it isn't valid */
1192                 drm_cleanup_buf_error(dev, entry);
1193                 mutex_unlock(&dev->struct_mutex);
1194                 atomic_dec(&dev->buf_alloc);
1195                 return -ENOMEM;
1196         }
1197         dma->buflist = temp_buflist;
1198
1199         for (i = 0; i < entry->buf_count; i++) {
1200                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1201         }
1202
1203         dma->buf_count += entry->buf_count;
1204         dma->seg_count += entry->seg_count;
1205         dma->page_count += byte_count >> PAGE_SHIFT;
1206         dma->byte_count += byte_count;
1207
1208         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1209         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1210
1211         mutex_unlock(&dev->struct_mutex);
1212
1213         request->count = entry->buf_count;
1214         request->size = size;
1215
1216         dma->flags = _DRM_DMA_USE_FB;
1217
1218         atomic_dec(&dev->buf_alloc);
1219         return 0;
1220 }
1221
1222
1223 /**
1224  * Add buffers for DMA transfers (ioctl).
1225  *
1226  * \param inode device inode.
1227  * \param filp file pointer.
1228  * \param cmd command.
1229  * \param arg pointer to a drm_buf_desc_t request.
1230  * \return zero on success or a negative number on failure.
1231  *
1232  * According with the memory type specified in drm_buf_desc::flags and the
1233  * build options, it dispatches the call either to addbufs_agp(),
1234  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1235  * PCI memory respectively.
1236  */
1237 int drm_addbufs(struct inode *inode, struct file *filp,
1238                 unsigned int cmd, unsigned long arg)
1239 {
1240         drm_buf_desc_t request;
1241         drm_file_t *priv = filp->private_data;
1242         drm_device_t *dev = priv->head->dev;
1243         int ret;
1244
1245         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1246                 return -EINVAL;
1247
1248         if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
1249                            sizeof(request)))
1250                 return -EFAULT;
1251
1252 #if __OS_HAS_AGP
1253         if (request.flags & _DRM_AGP_BUFFER)
1254                 ret = drm_addbufs_agp(dev, &request);
1255         else
1256 #endif
1257         if (request.flags & _DRM_SG_BUFFER)
1258                 ret = drm_addbufs_sg(dev, &request);
1259         else if (request.flags & _DRM_FB_BUFFER)
1260                 ret = drm_addbufs_fb(dev, &request);
1261         else
1262                 ret = drm_addbufs_pci(dev, &request);
1263
1264         if (ret == 0) {
1265                 if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1266                         ret = -EFAULT;
1267                 }
1268         }
1269         return ret;
1270 }
1271
1272 /**
1273  * Get information about the buffer mappings.
1274  *
1275  * This was originally mean for debugging purposes, or by a sophisticated
1276  * client library to determine how best to use the available buffers (e.g.,
1277  * large buffers can be used for image transfer).
1278  *
1279  * \param inode device inode.
1280  * \param filp file pointer.
1281  * \param cmd command.
1282  * \param arg pointer to a drm_buf_info structure.
1283  * \return zero on success or a negative number on failure.
1284  *
1285  * Increments drm_device::buf_use while holding the drm_device::count_lock
1286  * lock, preventing of allocating more buffers after this call. Information
1287  * about each requested buffer is then copied into user space.
1288  */
1289 int drm_infobufs(struct inode *inode, struct file *filp,
1290                  unsigned int cmd, unsigned long arg)
1291 {
1292         drm_file_t *priv = filp->private_data;
1293         drm_device_t *dev = priv->head->dev;
1294         drm_device_dma_t *dma = dev->dma;
1295         drm_buf_info_t request;
1296         drm_buf_info_t __user *argp = (void __user *)arg;
1297         int i;
1298         int count;
1299
1300         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1301                 return -EINVAL;
1302
1303         if (!dma)
1304                 return -EINVAL;
1305
1306         spin_lock(&dev->count_lock);
1307         if (atomic_read(&dev->buf_alloc)) {
1308                 spin_unlock(&dev->count_lock);
1309                 return -EBUSY;
1310         }
1311         ++dev->buf_use;         /* Can't allocate more after this call */
1312         spin_unlock(&dev->count_lock);
1313
1314         if (copy_from_user(&request, argp, sizeof(request)))
1315                 return -EFAULT;
1316
1317         for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1318                 if (dma->bufs[i].buf_count)
1319                         ++count;
1320         }
1321
1322         DRM_DEBUG("count = %d\n", count);
1323
1324         if (request.count >= count) {
1325                 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1326                         if (dma->bufs[i].buf_count) {
1327                                 drm_buf_desc_t __user *to =
1328                                     &request.list[count];
1329                                 drm_buf_entry_t *from = &dma->bufs[i];
1330                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1331                                 if (copy_to_user(&to->count,
1332                                                  &from->buf_count,
1333                                                  sizeof(from->buf_count)) ||
1334                                     copy_to_user(&to->size,
1335                                                  &from->buf_size,
1336                                                  sizeof(from->buf_size)) ||
1337                                     copy_to_user(&to->low_mark,
1338                                                  &list->low_mark,
1339                                                  sizeof(list->low_mark)) ||
1340                                     copy_to_user(&to->high_mark,
1341                                                  &list->high_mark,
1342                                                  sizeof(list->high_mark)))
1343                                         return -EFAULT;
1344
1345                                 DRM_DEBUG("%d %d %d %d %d\n",
1346                                           i,
1347                                           dma->bufs[i].buf_count,
1348                                           dma->bufs[i].buf_size,
1349                                           dma->bufs[i].freelist.low_mark,
1350                                           dma->bufs[i].freelist.high_mark);
1351                                 ++count;
1352                         }
1353                 }
1354         }
1355         request.count = count;
1356
1357         if (copy_to_user(argp, &request, sizeof(request)))
1358                 return -EFAULT;
1359
1360         return 0;
1361 }
1362
1363 /**
1364  * Specifies a low and high water mark for buffer allocation
1365  *
1366  * \param inode device inode.
1367  * \param filp file pointer.
1368  * \param cmd command.
1369  * \param arg a pointer to a drm_buf_desc structure.
1370  * \return zero on success or a negative number on failure.
1371  *
1372  * Verifies that the size order is bounded between the admissible orders and
1373  * updates the respective drm_device_dma::bufs entry low and high water mark.
1374  *
1375  * \note This ioctl is deprecated and mostly never used.
1376  */
1377 int drm_markbufs(struct inode *inode, struct file *filp,
1378                  unsigned int cmd, unsigned long arg)
1379 {
1380         drm_file_t *priv = filp->private_data;
1381         drm_device_t *dev = priv->head->dev;
1382         drm_device_dma_t *dma = dev->dma;
1383         drm_buf_desc_t request;
1384         int order;
1385         drm_buf_entry_t *entry;
1386
1387         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1388                 return -EINVAL;
1389
1390         if (!dma)
1391                 return -EINVAL;
1392
1393         if (copy_from_user(&request,
1394                            (drm_buf_desc_t __user *) arg, sizeof(request)))
1395                 return -EFAULT;
1396
1397         DRM_DEBUG("%d, %d, %d\n",
1398                   request.size, request.low_mark, request.high_mark);
1399         order = drm_order(request.size);
1400         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1401                 return -EINVAL;
1402         entry = &dma->bufs[order];
1403
1404         if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1405                 return -EINVAL;
1406         if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1407                 return -EINVAL;
1408
1409         entry->freelist.low_mark = request.low_mark;
1410         entry->freelist.high_mark = request.high_mark;
1411
1412         return 0;
1413 }
1414
1415 /**
1416  * Unreserve the buffers in list, previously reserved using drmDMA.
1417  *
1418  * \param inode device inode.
1419  * \param filp file pointer.
1420  * \param cmd command.
1421  * \param arg pointer to a drm_buf_free structure.
1422  * \return zero on success or a negative number on failure.
1423  *
1424  * Calls free_buffer() for each used buffer.
1425  * This function is primarily used for debugging.
1426  */
1427 int drm_freebufs(struct inode *inode, struct file *filp,
1428                  unsigned int cmd, unsigned long arg)
1429 {
1430         drm_file_t *priv = filp->private_data;
1431         drm_device_t *dev = priv->head->dev;
1432         drm_device_dma_t *dma = dev->dma;
1433         drm_buf_free_t request;
1434         int i;
1435         int idx;
1436         drm_buf_t *buf;
1437
1438         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1439                 return -EINVAL;
1440
1441         if (!dma)
1442                 return -EINVAL;
1443
1444         if (copy_from_user(&request,
1445                            (drm_buf_free_t __user *) arg, sizeof(request)))
1446                 return -EFAULT;
1447
1448         DRM_DEBUG("%d\n", request.count);
1449         for (i = 0; i < request.count; i++) {
1450                 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1451                         return -EFAULT;
1452                 if (idx < 0 || idx >= dma->buf_count) {
1453                         DRM_ERROR("Index %d (of %d max)\n",
1454                                   idx, dma->buf_count - 1);
1455                         return -EINVAL;
1456                 }
1457                 buf = dma->buflist[idx];
1458                 if (buf->filp != filp) {
1459                         DRM_ERROR("Process %d freeing buffer not owned\n",
1460                                   current->pid);
1461                         return -EINVAL;
1462                 }
1463                 drm_free_buffer(dev, buf);
1464         }
1465
1466         return 0;
1467 }
1468
1469 /**
1470  * Maps all of the DMA buffers into client-virtual space (ioctl).
1471  *
1472  * \param inode device inode.
1473  * \param filp file pointer.
1474  * \param cmd command.
1475  * \param arg pointer to a drm_buf_map structure.
1476  * \return zero on success or a negative number on failure.
1477  *
1478  * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1479  * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1480  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1481  * drm_mmap_dma().
1482  */
1483 int drm_mapbufs(struct inode *inode, struct file *filp,
1484                 unsigned int cmd, unsigned long arg)
1485 {
1486         drm_file_t *priv = filp->private_data;
1487         drm_device_t *dev = priv->head->dev;
1488         drm_device_dma_t *dma = dev->dma;
1489         drm_buf_map_t __user *argp = (void __user *)arg;
1490         int retcode = 0;
1491         const int zero = 0;
1492         unsigned long virtual;
1493         unsigned long address;
1494         drm_buf_map_t request;
1495         int i;
1496
1497         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1498                 return -EINVAL;
1499
1500         if (!dma)
1501                 return -EINVAL;
1502
1503         spin_lock(&dev->count_lock);
1504         if (atomic_read(&dev->buf_alloc)) {
1505                 spin_unlock(&dev->count_lock);
1506                 return -EBUSY;
1507         }
1508         dev->buf_use++;         /* Can't allocate more after this call */
1509         spin_unlock(&dev->count_lock);
1510
1511         if (copy_from_user(&request, argp, sizeof(request)))
1512                 return -EFAULT;
1513
1514         if (request.count >= dma->buf_count) {
1515                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1516                     || (drm_core_check_feature(dev, DRIVER_SG)
1517                         && (dma->flags & _DRM_DMA_USE_SG))
1518                     || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1519                         && (dma->flags & _DRM_DMA_USE_FB))) {
1520                         drm_map_t *map = dev->agp_buffer_map;
1521                         unsigned long token = dev->agp_buffer_token;
1522
1523                         if (!map) {
1524                                 retcode = -EINVAL;
1525                                 goto done;
1526                         }
1527
1528                         down_write(&current->mm->mmap_sem);
1529                         virtual = do_mmap(filp, 0, map->size,
1530                                           PROT_READ | PROT_WRITE,
1531                                           MAP_SHARED, token);
1532                         up_write(&current->mm->mmap_sem);
1533                 } else {
1534                         down_write(&current->mm->mmap_sem);
1535                         virtual = do_mmap(filp, 0, dma->byte_count,
1536                                           PROT_READ | PROT_WRITE,
1537                                           MAP_SHARED, 0);
1538                         up_write(&current->mm->mmap_sem);
1539                 }
1540                 if (virtual > -1024UL) {
1541                         /* Real error */
1542                         retcode = (signed long)virtual;
1543                         goto done;
1544                 }
1545                 request.virtual = (void __user *)virtual;
1546
1547                 for (i = 0; i < dma->buf_count; i++) {
1548                         if (copy_to_user(&request.list[i].idx,
1549                                          &dma->buflist[i]->idx,
1550                                          sizeof(request.list[0].idx))) {
1551                                 retcode = -EFAULT;
1552                                 goto done;
1553                         }
1554                         if (copy_to_user(&request.list[i].total,
1555                                          &dma->buflist[i]->total,
1556                                          sizeof(request.list[0].total))) {
1557                                 retcode = -EFAULT;
1558                                 goto done;
1559                         }
1560                         if (copy_to_user(&request.list[i].used,
1561                                          &zero, sizeof(zero))) {
1562                                 retcode = -EFAULT;
1563                                 goto done;
1564                         }
1565                         address = virtual + dma->buflist[i]->offset;    /* *** */
1566                         if (copy_to_user(&request.list[i].address,
1567                                          &address, sizeof(address))) {
1568                                 retcode = -EFAULT;
1569                                 goto done;
1570                         }
1571                 }
1572         }
1573       done:
1574         request.count = dma->buf_count;
1575         DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1576
1577         if (copy_to_user(argp, &request, sizeof(request)))
1578                 return -EFAULT;
1579
1580         return retcode;
1581 }
1582
1583 /**
1584  * Compute size order.  Returns the exponent of the smaller power of two which
1585  * is greater or equal to given number.
1586  *
1587  * \param size size.
1588  * \return order.
1589  *
1590  * \todo Can be made faster.
1591  */
1592 int drm_order(unsigned long size)
1593 {
1594         int order;
1595         unsigned long tmp;
1596
1597         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1598
1599         if (size & (size - 1))
1600                 ++order;
1601
1602         return order;
1603 }
1604 EXPORT_SYMBOL(drm_order);
1605
1606