dma-debug: introduce debug_dma_assert_idle()
[linux-drm-fsl-dcu.git] / lib / dma-debug.c
1 /*
2  * Copyright (C) 2008 Advanced Micro Devices, Inc.
3  *
4  * Author: Joerg Roedel <joerg.roedel@amd.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  */
19
20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/stacktrace.h>
23 #include <linux/dma-debug.h>
24 #include <linux/spinlock.h>
25 #include <linux/debugfs.h>
26 #include <linux/uaccess.h>
27 #include <linux/export.h>
28 #include <linux/device.h>
29 #include <linux/types.h>
30 #include <linux/sched.h>
31 #include <linux/ctype.h>
32 #include <linux/list.h>
33 #include <linux/slab.h>
34
35 #include <asm/sections.h>
36
37 #define HASH_SIZE       1024ULL
38 #define HASH_FN_SHIFT   13
39 #define HASH_FN_MASK    (HASH_SIZE - 1)
40
41 enum {
42         dma_debug_single,
43         dma_debug_page,
44         dma_debug_sg,
45         dma_debug_coherent,
46 };
47
48 enum map_err_types {
49         MAP_ERR_CHECK_NOT_APPLICABLE,
50         MAP_ERR_NOT_CHECKED,
51         MAP_ERR_CHECKED,
52 };
53
54 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
55
56 /**
57  * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
58  * @list: node on pre-allocated free_entries list
59  * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
60  * @type: single, page, sg, coherent
61  * @pfn: page frame of the start address
62  * @offset: offset of mapping relative to pfn
63  * @size: length of the mapping
64  * @direction: enum dma_data_direction
65  * @sg_call_ents: 'nents' from dma_map_sg
66  * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
67  * @map_err_type: track whether dma_mapping_error() was checked
68  * @stacktrace: support backtraces when a violation is detected
69  */
70 struct dma_debug_entry {
71         struct list_head list;
72         struct device    *dev;
73         int              type;
74         unsigned long    pfn;
75         size_t           offset;
76         u64              dev_addr;
77         u64              size;
78         int              direction;
79         int              sg_call_ents;
80         int              sg_mapped_ents;
81         enum map_err_types  map_err_type;
82 #ifdef CONFIG_STACKTRACE
83         struct           stack_trace stacktrace;
84         unsigned long    st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
85 #endif
86 };
87
88 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
89
90 struct hash_bucket {
91         struct list_head list;
92         spinlock_t lock;
93 } ____cacheline_aligned_in_smp;
94
95 /* Hash list to save the allocated dma addresses */
96 static struct hash_bucket dma_entry_hash[HASH_SIZE];
97 /* List of pre-allocated dma_debug_entry's */
98 static LIST_HEAD(free_entries);
99 /* Lock for the list above */
100 static DEFINE_SPINLOCK(free_entries_lock);
101
102 /* Global disable flag - will be set in case of an error */
103 static u32 global_disable __read_mostly;
104
105 /* Global error count */
106 static u32 error_count;
107
108 /* Global error show enable*/
109 static u32 show_all_errors __read_mostly;
110 /* Number of errors to show */
111 static u32 show_num_errors = 1;
112
113 static u32 num_free_entries;
114 static u32 min_free_entries;
115 static u32 nr_total_entries;
116
117 /* number of preallocated entries requested by kernel cmdline */
118 static u32 req_entries;
119
120 /* debugfs dentry's for the stuff above */
121 static struct dentry *dma_debug_dent        __read_mostly;
122 static struct dentry *global_disable_dent   __read_mostly;
123 static struct dentry *error_count_dent      __read_mostly;
124 static struct dentry *show_all_errors_dent  __read_mostly;
125 static struct dentry *show_num_errors_dent  __read_mostly;
126 static struct dentry *num_free_entries_dent __read_mostly;
127 static struct dentry *min_free_entries_dent __read_mostly;
128 static struct dentry *filter_dent           __read_mostly;
129
130 /* per-driver filter related state */
131
132 #define NAME_MAX_LEN    64
133
134 static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
135 static struct device_driver *current_driver                    __read_mostly;
136
137 static DEFINE_RWLOCK(driver_name_lock);
138
139 static const char *const maperr2str[] = {
140         [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
141         [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
142         [MAP_ERR_CHECKED] = "dma map error checked",
143 };
144
145 static const char *type2name[4] = { "single", "page",
146                                     "scather-gather", "coherent" };
147
148 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
149                                    "DMA_FROM_DEVICE", "DMA_NONE" };
150
151 /*
152  * The access to some variables in this macro is racy. We can't use atomic_t
153  * here because all these variables are exported to debugfs. Some of them even
154  * writeable. This is also the reason why a lock won't help much. But anyway,
155  * the races are no big deal. Here is why:
156  *
157  *   error_count: the addition is racy, but the worst thing that can happen is
158  *                that we don't count some errors
159  *   show_num_errors: the subtraction is racy. Also no big deal because in
160  *                    worst case this will result in one warning more in the
161  *                    system log than the user configured. This variable is
162  *                    writeable via debugfs.
163  */
164 static inline void dump_entry_trace(struct dma_debug_entry *entry)
165 {
166 #ifdef CONFIG_STACKTRACE
167         if (entry) {
168                 pr_warning("Mapped at:\n");
169                 print_stack_trace(&entry->stacktrace, 0);
170         }
171 #endif
172 }
173
174 static bool driver_filter(struct device *dev)
175 {
176         struct device_driver *drv;
177         unsigned long flags;
178         bool ret;
179
180         /* driver filter off */
181         if (likely(!current_driver_name[0]))
182                 return true;
183
184         /* driver filter on and initialized */
185         if (current_driver && dev && dev->driver == current_driver)
186                 return true;
187
188         /* driver filter on, but we can't filter on a NULL device... */
189         if (!dev)
190                 return false;
191
192         if (current_driver || !current_driver_name[0])
193                 return false;
194
195         /* driver filter on but not yet initialized */
196         drv = dev->driver;
197         if (!drv)
198                 return false;
199
200         /* lock to protect against change of current_driver_name */
201         read_lock_irqsave(&driver_name_lock, flags);
202
203         ret = false;
204         if (drv->name &&
205             strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
206                 current_driver = drv;
207                 ret = true;
208         }
209
210         read_unlock_irqrestore(&driver_name_lock, flags);
211
212         return ret;
213 }
214
215 #define err_printk(dev, entry, format, arg...) do {                     \
216                 error_count += 1;                                       \
217                 if (driver_filter(dev) &&                               \
218                     (show_all_errors || show_num_errors > 0)) {         \
219                         WARN(1, "%s %s: " format,                       \
220                              dev ? dev_driver_string(dev) : "NULL",     \
221                              dev ? dev_name(dev) : "NULL", ## arg);     \
222                         dump_entry_trace(entry);                        \
223                 }                                                       \
224                 if (!show_all_errors && show_num_errors > 0)            \
225                         show_num_errors -= 1;                           \
226         } while (0);
227
228 /*
229  * Hash related functions
230  *
231  * Every DMA-API request is saved into a struct dma_debug_entry. To
232  * have quick access to these structs they are stored into a hash.
233  */
234 static int hash_fn(struct dma_debug_entry *entry)
235 {
236         /*
237          * Hash function is based on the dma address.
238          * We use bits 20-27 here as the index into the hash
239          */
240         return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
241 }
242
243 /*
244  * Request exclusive access to a hash bucket for a given dma_debug_entry.
245  */
246 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
247                                            unsigned long *flags)
248 {
249         int idx = hash_fn(entry);
250         unsigned long __flags;
251
252         spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
253         *flags = __flags;
254         return &dma_entry_hash[idx];
255 }
256
257 /*
258  * Give up exclusive access to the hash bucket
259  */
260 static void put_hash_bucket(struct hash_bucket *bucket,
261                             unsigned long *flags)
262 {
263         unsigned long __flags = *flags;
264
265         spin_unlock_irqrestore(&bucket->lock, __flags);
266 }
267
268 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
269 {
270         return ((a->dev_addr == b->dev_addr) &&
271                 (a->dev == b->dev)) ? true : false;
272 }
273
274 static bool containing_match(struct dma_debug_entry *a,
275                              struct dma_debug_entry *b)
276 {
277         if (a->dev != b->dev)
278                 return false;
279
280         if ((b->dev_addr <= a->dev_addr) &&
281             ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
282                 return true;
283
284         return false;
285 }
286
287 /*
288  * Search a given entry in the hash bucket list
289  */
290 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
291                                                   struct dma_debug_entry *ref,
292                                                   match_fn match)
293 {
294         struct dma_debug_entry *entry, *ret = NULL;
295         int matches = 0, match_lvl, last_lvl = -1;
296
297         list_for_each_entry(entry, &bucket->list, list) {
298                 if (!match(ref, entry))
299                         continue;
300
301                 /*
302                  * Some drivers map the same physical address multiple
303                  * times. Without a hardware IOMMU this results in the
304                  * same device addresses being put into the dma-debug
305                  * hash multiple times too. This can result in false
306                  * positives being reported. Therefore we implement a
307                  * best-fit algorithm here which returns the entry from
308                  * the hash which fits best to the reference value
309                  * instead of the first-fit.
310                  */
311                 matches += 1;
312                 match_lvl = 0;
313                 entry->size         == ref->size         ? ++match_lvl : 0;
314                 entry->type         == ref->type         ? ++match_lvl : 0;
315                 entry->direction    == ref->direction    ? ++match_lvl : 0;
316                 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
317
318                 if (match_lvl == 4) {
319                         /* perfect-fit - return the result */
320                         return entry;
321                 } else if (match_lvl > last_lvl) {
322                         /*
323                          * We found an entry that fits better then the
324                          * previous one or it is the 1st match.
325                          */
326                         last_lvl = match_lvl;
327                         ret      = entry;
328                 }
329         }
330
331         /*
332          * If we have multiple matches but no perfect-fit, just return
333          * NULL.
334          */
335         ret = (matches == 1) ? ret : NULL;
336
337         return ret;
338 }
339
340 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
341                                                  struct dma_debug_entry *ref)
342 {
343         return __hash_bucket_find(bucket, ref, exact_match);
344 }
345
346 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
347                                                    struct dma_debug_entry *ref,
348                                                    unsigned long *flags)
349 {
350
351         unsigned int max_range = dma_get_max_seg_size(ref->dev);
352         struct dma_debug_entry *entry, index = *ref;
353         unsigned int range = 0;
354
355         while (range <= max_range) {
356                 entry = __hash_bucket_find(*bucket, &index, containing_match);
357
358                 if (entry)
359                         return entry;
360
361                 /*
362                  * Nothing found, go back a hash bucket
363                  */
364                 put_hash_bucket(*bucket, flags);
365                 range          += (1 << HASH_FN_SHIFT);
366                 index.dev_addr -= (1 << HASH_FN_SHIFT);
367                 *bucket = get_hash_bucket(&index, flags);
368         }
369
370         return NULL;
371 }
372
373 /*
374  * Add an entry to a hash bucket
375  */
376 static void hash_bucket_add(struct hash_bucket *bucket,
377                             struct dma_debug_entry *entry)
378 {
379         list_add_tail(&entry->list, &bucket->list);
380 }
381
382 /*
383  * Remove entry from a hash bucket list
384  */
385 static void hash_bucket_del(struct dma_debug_entry *entry)
386 {
387         list_del(&entry->list);
388 }
389
390 static unsigned long long phys_addr(struct dma_debug_entry *entry)
391 {
392         return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
393 }
394
395 /*
396  * Dump mapping entries for debugging purposes
397  */
398 void debug_dma_dump_mappings(struct device *dev)
399 {
400         int idx;
401
402         for (idx = 0; idx < HASH_SIZE; idx++) {
403                 struct hash_bucket *bucket = &dma_entry_hash[idx];
404                 struct dma_debug_entry *entry;
405                 unsigned long flags;
406
407                 spin_lock_irqsave(&bucket->lock, flags);
408
409                 list_for_each_entry(entry, &bucket->list, list) {
410                         if (!dev || dev == entry->dev) {
411                                 dev_info(entry->dev,
412                                          "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
413                                          type2name[entry->type], idx,
414                                          phys_addr(entry), entry->pfn,
415                                          entry->dev_addr, entry->size,
416                                          dir2name[entry->direction],
417                                          maperr2str[entry->map_err_type]);
418                         }
419                 }
420
421                 spin_unlock_irqrestore(&bucket->lock, flags);
422         }
423 }
424 EXPORT_SYMBOL(debug_dma_dump_mappings);
425
426 /*
427  * For each page mapped (initial page in the case of
428  * dma_alloc_coherent/dma_map_{single|page}, or each page in a
429  * scatterlist) insert into this tree using the pfn as the key. At
430  * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
431  * the pfn already exists at insertion time add a tag as a reference
432  * count for the overlapping mappings.  For now, the overlap tracking
433  * just ensures that 'unmaps' balance 'maps' before marking the pfn
434  * idle, but we should also be flagging overlaps as an API violation.
435  *
436  * Memory usage is mostly constrained by the maximum number of available
437  * dma-debug entries in that we need a free dma_debug_entry before
438  * inserting into the tree.  In the case of dma_map_{single|page} and
439  * dma_alloc_coherent there is only one dma_debug_entry and one pfn to
440  * track per event.  dma_map_sg(), on the other hand,
441  * consumes a single dma_debug_entry, but inserts 'nents' entries into
442  * the tree.
443  *
444  * At any time debug_dma_assert_idle() can be called to trigger a
445  * warning if the given page is in the active set.
446  */
447 static RADIX_TREE(dma_active_pfn, GFP_NOWAIT);
448 static DEFINE_SPINLOCK(radix_lock);
449 #define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
450
451 static int active_pfn_read_overlap(unsigned long pfn)
452 {
453         int overlap = 0, i;
454
455         for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
456                 if (radix_tree_tag_get(&dma_active_pfn, pfn, i))
457                         overlap |= 1 << i;
458         return overlap;
459 }
460
461 static int active_pfn_set_overlap(unsigned long pfn, int overlap)
462 {
463         int i;
464
465         if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0)
466                 return 0;
467
468         for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
469                 if (overlap & 1 << i)
470                         radix_tree_tag_set(&dma_active_pfn, pfn, i);
471                 else
472                         radix_tree_tag_clear(&dma_active_pfn, pfn, i);
473
474         return overlap;
475 }
476
477 static void active_pfn_inc_overlap(unsigned long pfn)
478 {
479         int overlap = active_pfn_read_overlap(pfn);
480
481         overlap = active_pfn_set_overlap(pfn, ++overlap);
482
483         /* If we overflowed the overlap counter then we're potentially
484          * leaking dma-mappings.  Otherwise, if maps and unmaps are
485          * balanced then this overflow may cause false negatives in
486          * debug_dma_assert_idle() as the pfn may be marked idle
487          * prematurely.
488          */
489         WARN_ONCE(overlap == 0,
490                   "DMA-API: exceeded %d overlapping mappings of pfn %lx\n",
491                   ACTIVE_PFN_MAX_OVERLAP, pfn);
492 }
493
494 static int active_pfn_dec_overlap(unsigned long pfn)
495 {
496         int overlap = active_pfn_read_overlap(pfn);
497
498         return active_pfn_set_overlap(pfn, --overlap);
499 }
500
501 static int active_pfn_insert(struct dma_debug_entry *entry)
502 {
503         unsigned long flags;
504         int rc;
505
506         spin_lock_irqsave(&radix_lock, flags);
507         rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry);
508         if (rc == -EEXIST)
509                 active_pfn_inc_overlap(entry->pfn);
510         spin_unlock_irqrestore(&radix_lock, flags);
511
512         return rc;
513 }
514
515 static void active_pfn_remove(struct dma_debug_entry *entry)
516 {
517         unsigned long flags;
518
519         spin_lock_irqsave(&radix_lock, flags);
520         if (active_pfn_dec_overlap(entry->pfn) == 0)
521                 radix_tree_delete(&dma_active_pfn, entry->pfn);
522         spin_unlock_irqrestore(&radix_lock, flags);
523 }
524
525 /**
526  * debug_dma_assert_idle() - assert that a page is not undergoing dma
527  * @page: page to lookup in the dma_active_pfn tree
528  *
529  * Place a call to this routine in cases where the cpu touching the page
530  * before the dma completes (page is dma_unmapped) will lead to data
531  * corruption.
532  */
533 void debug_dma_assert_idle(struct page *page)
534 {
535         unsigned long flags;
536         struct dma_debug_entry *entry;
537
538         if (!page)
539                 return;
540
541         spin_lock_irqsave(&radix_lock, flags);
542         entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page));
543         spin_unlock_irqrestore(&radix_lock, flags);
544
545         if (!entry)
546                 return;
547
548         err_printk(entry->dev, entry,
549                    "DMA-API: cpu touching an active dma mapped page "
550                    "[pfn=0x%lx]\n", entry->pfn);
551 }
552
553 /*
554  * Wrapper function for adding an entry to the hash.
555  * This function takes care of locking itself.
556  */
557 static void add_dma_entry(struct dma_debug_entry *entry)
558 {
559         struct hash_bucket *bucket;
560         unsigned long flags;
561         int rc;
562
563         bucket = get_hash_bucket(entry, &flags);
564         hash_bucket_add(bucket, entry);
565         put_hash_bucket(bucket, &flags);
566
567         rc = active_pfn_insert(entry);
568         if (rc == -ENOMEM) {
569                 pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n");
570                 global_disable = true;
571         }
572
573         /* TODO: report -EEXIST errors here as overlapping mappings are
574          * not supported by the DMA API
575          */
576 }
577
578 static struct dma_debug_entry *__dma_entry_alloc(void)
579 {
580         struct dma_debug_entry *entry;
581
582         entry = list_entry(free_entries.next, struct dma_debug_entry, list);
583         list_del(&entry->list);
584         memset(entry, 0, sizeof(*entry));
585
586         num_free_entries -= 1;
587         if (num_free_entries < min_free_entries)
588                 min_free_entries = num_free_entries;
589
590         return entry;
591 }
592
593 /* struct dma_entry allocator
594  *
595  * The next two functions implement the allocator for
596  * struct dma_debug_entries.
597  */
598 static struct dma_debug_entry *dma_entry_alloc(void)
599 {
600         struct dma_debug_entry *entry;
601         unsigned long flags;
602
603         spin_lock_irqsave(&free_entries_lock, flags);
604
605         if (list_empty(&free_entries)) {
606                 pr_err("DMA-API: debugging out of memory - disabling\n");
607                 global_disable = true;
608                 spin_unlock_irqrestore(&free_entries_lock, flags);
609                 return NULL;
610         }
611
612         entry = __dma_entry_alloc();
613
614         spin_unlock_irqrestore(&free_entries_lock, flags);
615
616 #ifdef CONFIG_STACKTRACE
617         entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
618         entry->stacktrace.entries = entry->st_entries;
619         entry->stacktrace.skip = 2;
620         save_stack_trace(&entry->stacktrace);
621 #endif
622
623         return entry;
624 }
625
626 static void dma_entry_free(struct dma_debug_entry *entry)
627 {
628         unsigned long flags;
629
630         active_pfn_remove(entry);
631
632         /*
633          * add to beginning of the list - this way the entries are
634          * more likely cache hot when they are reallocated.
635          */
636         spin_lock_irqsave(&free_entries_lock, flags);
637         list_add(&entry->list, &free_entries);
638         num_free_entries += 1;
639         spin_unlock_irqrestore(&free_entries_lock, flags);
640 }
641
642 int dma_debug_resize_entries(u32 num_entries)
643 {
644         int i, delta, ret = 0;
645         unsigned long flags;
646         struct dma_debug_entry *entry;
647         LIST_HEAD(tmp);
648
649         spin_lock_irqsave(&free_entries_lock, flags);
650
651         if (nr_total_entries < num_entries) {
652                 delta = num_entries - nr_total_entries;
653
654                 spin_unlock_irqrestore(&free_entries_lock, flags);
655
656                 for (i = 0; i < delta; i++) {
657                         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
658                         if (!entry)
659                                 break;
660
661                         list_add_tail(&entry->list, &tmp);
662                 }
663
664                 spin_lock_irqsave(&free_entries_lock, flags);
665
666                 list_splice(&tmp, &free_entries);
667                 nr_total_entries += i;
668                 num_free_entries += i;
669         } else {
670                 delta = nr_total_entries - num_entries;
671
672                 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
673                         entry = __dma_entry_alloc();
674                         kfree(entry);
675                 }
676
677                 nr_total_entries -= i;
678         }
679
680         if (nr_total_entries != num_entries)
681                 ret = 1;
682
683         spin_unlock_irqrestore(&free_entries_lock, flags);
684
685         return ret;
686 }
687 EXPORT_SYMBOL(dma_debug_resize_entries);
688
689 /*
690  * DMA-API debugging init code
691  *
692  * The init code does two things:
693  *   1. Initialize core data structures
694  *   2. Preallocate a given number of dma_debug_entry structs
695  */
696
697 static int prealloc_memory(u32 num_entries)
698 {
699         struct dma_debug_entry *entry, *next_entry;
700         int i;
701
702         for (i = 0; i < num_entries; ++i) {
703                 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
704                 if (!entry)
705                         goto out_err;
706
707                 list_add_tail(&entry->list, &free_entries);
708         }
709
710         num_free_entries = num_entries;
711         min_free_entries = num_entries;
712
713         pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
714
715         return 0;
716
717 out_err:
718
719         list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
720                 list_del(&entry->list);
721                 kfree(entry);
722         }
723
724         return -ENOMEM;
725 }
726
727 static ssize_t filter_read(struct file *file, char __user *user_buf,
728                            size_t count, loff_t *ppos)
729 {
730         char buf[NAME_MAX_LEN + 1];
731         unsigned long flags;
732         int len;
733
734         if (!current_driver_name[0])
735                 return 0;
736
737         /*
738          * We can't copy to userspace directly because current_driver_name can
739          * only be read under the driver_name_lock with irqs disabled. So
740          * create a temporary copy first.
741          */
742         read_lock_irqsave(&driver_name_lock, flags);
743         len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
744         read_unlock_irqrestore(&driver_name_lock, flags);
745
746         return simple_read_from_buffer(user_buf, count, ppos, buf, len);
747 }
748
749 static ssize_t filter_write(struct file *file, const char __user *userbuf,
750                             size_t count, loff_t *ppos)
751 {
752         char buf[NAME_MAX_LEN];
753         unsigned long flags;
754         size_t len;
755         int i;
756
757         /*
758          * We can't copy from userspace directly. Access to
759          * current_driver_name is protected with a write_lock with irqs
760          * disabled. Since copy_from_user can fault and may sleep we
761          * need to copy to temporary buffer first
762          */
763         len = min(count, (size_t)(NAME_MAX_LEN - 1));
764         if (copy_from_user(buf, userbuf, len))
765                 return -EFAULT;
766
767         buf[len] = 0;
768
769         write_lock_irqsave(&driver_name_lock, flags);
770
771         /*
772          * Now handle the string we got from userspace very carefully.
773          * The rules are:
774          *         - only use the first token we got
775          *         - token delimiter is everything looking like a space
776          *           character (' ', '\n', '\t' ...)
777          *
778          */
779         if (!isalnum(buf[0])) {
780                 /*
781                  * If the first character userspace gave us is not
782                  * alphanumerical then assume the filter should be
783                  * switched off.
784                  */
785                 if (current_driver_name[0])
786                         pr_info("DMA-API: switching off dma-debug driver filter\n");
787                 current_driver_name[0] = 0;
788                 current_driver = NULL;
789                 goto out_unlock;
790         }
791
792         /*
793          * Now parse out the first token and use it as the name for the
794          * driver to filter for.
795          */
796         for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
797                 current_driver_name[i] = buf[i];
798                 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
799                         break;
800         }
801         current_driver_name[i] = 0;
802         current_driver = NULL;
803
804         pr_info("DMA-API: enable driver filter for driver [%s]\n",
805                 current_driver_name);
806
807 out_unlock:
808         write_unlock_irqrestore(&driver_name_lock, flags);
809
810         return count;
811 }
812
813 static const struct file_operations filter_fops = {
814         .read  = filter_read,
815         .write = filter_write,
816         .llseek = default_llseek,
817 };
818
819 static int dma_debug_fs_init(void)
820 {
821         dma_debug_dent = debugfs_create_dir("dma-api", NULL);
822         if (!dma_debug_dent) {
823                 pr_err("DMA-API: can not create debugfs directory\n");
824                 return -ENOMEM;
825         }
826
827         global_disable_dent = debugfs_create_bool("disabled", 0444,
828                         dma_debug_dent,
829                         &global_disable);
830         if (!global_disable_dent)
831                 goto out_err;
832
833         error_count_dent = debugfs_create_u32("error_count", 0444,
834                         dma_debug_dent, &error_count);
835         if (!error_count_dent)
836                 goto out_err;
837
838         show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
839                         dma_debug_dent,
840                         &show_all_errors);
841         if (!show_all_errors_dent)
842                 goto out_err;
843
844         show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
845                         dma_debug_dent,
846                         &show_num_errors);
847         if (!show_num_errors_dent)
848                 goto out_err;
849
850         num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
851                         dma_debug_dent,
852                         &num_free_entries);
853         if (!num_free_entries_dent)
854                 goto out_err;
855
856         min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
857                         dma_debug_dent,
858                         &min_free_entries);
859         if (!min_free_entries_dent)
860                 goto out_err;
861
862         filter_dent = debugfs_create_file("driver_filter", 0644,
863                                           dma_debug_dent, NULL, &filter_fops);
864         if (!filter_dent)
865                 goto out_err;
866
867         return 0;
868
869 out_err:
870         debugfs_remove_recursive(dma_debug_dent);
871
872         return -ENOMEM;
873 }
874
875 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
876 {
877         struct dma_debug_entry *entry;
878         unsigned long flags;
879         int count = 0, i;
880
881         local_irq_save(flags);
882
883         for (i = 0; i < HASH_SIZE; ++i) {
884                 spin_lock(&dma_entry_hash[i].lock);
885                 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
886                         if (entry->dev == dev) {
887                                 count += 1;
888                                 *out_entry = entry;
889                         }
890                 }
891                 spin_unlock(&dma_entry_hash[i].lock);
892         }
893
894         local_irq_restore(flags);
895
896         return count;
897 }
898
899 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
900 {
901         struct device *dev = data;
902         struct dma_debug_entry *uninitialized_var(entry);
903         int count;
904
905         if (global_disable)
906                 return 0;
907
908         switch (action) {
909         case BUS_NOTIFY_UNBOUND_DRIVER:
910                 count = device_dma_allocations(dev, &entry);
911                 if (count == 0)
912                         break;
913                 err_printk(dev, entry, "DMA-API: device driver has pending "
914                                 "DMA allocations while released from device "
915                                 "[count=%d]\n"
916                                 "One of leaked entries details: "
917                                 "[device address=0x%016llx] [size=%llu bytes] "
918                                 "[mapped with %s] [mapped as %s]\n",
919                         count, entry->dev_addr, entry->size,
920                         dir2name[entry->direction], type2name[entry->type]);
921                 break;
922         default:
923                 break;
924         }
925
926         return 0;
927 }
928
929 void dma_debug_add_bus(struct bus_type *bus)
930 {
931         struct notifier_block *nb;
932
933         if (global_disable)
934                 return;
935
936         nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
937         if (nb == NULL) {
938                 pr_err("dma_debug_add_bus: out of memory\n");
939                 return;
940         }
941
942         nb->notifier_call = dma_debug_device_change;
943
944         bus_register_notifier(bus, nb);
945 }
946
947 /*
948  * Let the architectures decide how many entries should be preallocated.
949  */
950 void dma_debug_init(u32 num_entries)
951 {
952         int i;
953
954         if (global_disable)
955                 return;
956
957         for (i = 0; i < HASH_SIZE; ++i) {
958                 INIT_LIST_HEAD(&dma_entry_hash[i].list);
959                 spin_lock_init(&dma_entry_hash[i].lock);
960         }
961
962         if (dma_debug_fs_init() != 0) {
963                 pr_err("DMA-API: error creating debugfs entries - disabling\n");
964                 global_disable = true;
965
966                 return;
967         }
968
969         if (req_entries)
970                 num_entries = req_entries;
971
972         if (prealloc_memory(num_entries) != 0) {
973                 pr_err("DMA-API: debugging out of memory error - disabled\n");
974                 global_disable = true;
975
976                 return;
977         }
978
979         nr_total_entries = num_free_entries;
980
981         pr_info("DMA-API: debugging enabled by kernel config\n");
982 }
983
984 static __init int dma_debug_cmdline(char *str)
985 {
986         if (!str)
987                 return -EINVAL;
988
989         if (strncmp(str, "off", 3) == 0) {
990                 pr_info("DMA-API: debugging disabled on kernel command line\n");
991                 global_disable = true;
992         }
993
994         return 0;
995 }
996
997 static __init int dma_debug_entries_cmdline(char *str)
998 {
999         int res;
1000
1001         if (!str)
1002                 return -EINVAL;
1003
1004         res = get_option(&str, &req_entries);
1005
1006         if (!res)
1007                 req_entries = 0;
1008
1009         return 0;
1010 }
1011
1012 __setup("dma_debug=", dma_debug_cmdline);
1013 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
1014
1015 static void check_unmap(struct dma_debug_entry *ref)
1016 {
1017         struct dma_debug_entry *entry;
1018         struct hash_bucket *bucket;
1019         unsigned long flags;
1020
1021         bucket = get_hash_bucket(ref, &flags);
1022         entry = bucket_find_exact(bucket, ref);
1023
1024         if (!entry) {
1025                 /* must drop lock before calling dma_mapping_error */
1026                 put_hash_bucket(bucket, &flags);
1027
1028                 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1029                         err_printk(ref->dev, NULL,
1030                                    "DMA-API: device driver tries to free an "
1031                                    "invalid DMA memory address\n");
1032                 } else {
1033                         err_printk(ref->dev, NULL,
1034                                    "DMA-API: device driver tries to free DMA "
1035                                    "memory it has not allocated [device "
1036                                    "address=0x%016llx] [size=%llu bytes]\n",
1037                                    ref->dev_addr, ref->size);
1038                 }
1039                 return;
1040         }
1041
1042         if (ref->size != entry->size) {
1043                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1044                            "DMA memory with different size "
1045                            "[device address=0x%016llx] [map size=%llu bytes] "
1046                            "[unmap size=%llu bytes]\n",
1047                            ref->dev_addr, entry->size, ref->size);
1048         }
1049
1050         if (ref->type != entry->type) {
1051                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1052                            "DMA memory with wrong function "
1053                            "[device address=0x%016llx] [size=%llu bytes] "
1054                            "[mapped as %s] [unmapped as %s]\n",
1055                            ref->dev_addr, ref->size,
1056                            type2name[entry->type], type2name[ref->type]);
1057         } else if ((entry->type == dma_debug_coherent) &&
1058                    (phys_addr(ref) != phys_addr(entry))) {
1059                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1060                            "DMA memory with different CPU address "
1061                            "[device address=0x%016llx] [size=%llu bytes] "
1062                            "[cpu alloc address=0x%016llx] "
1063                            "[cpu free address=0x%016llx]",
1064                            ref->dev_addr, ref->size,
1065                            phys_addr(entry),
1066                            phys_addr(ref));
1067         }
1068
1069         if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1070             ref->sg_call_ents != entry->sg_call_ents) {
1071                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1072                            "DMA sg list with different entry count "
1073                            "[map count=%d] [unmap count=%d]\n",
1074                            entry->sg_call_ents, ref->sg_call_ents);
1075         }
1076
1077         /*
1078          * This may be no bug in reality - but most implementations of the
1079          * DMA API don't handle this properly, so check for it here
1080          */
1081         if (ref->direction != entry->direction) {
1082                 err_printk(ref->dev, entry, "DMA-API: device driver frees "
1083                            "DMA memory with different direction "
1084                            "[device address=0x%016llx] [size=%llu bytes] "
1085                            "[mapped with %s] [unmapped with %s]\n",
1086                            ref->dev_addr, ref->size,
1087                            dir2name[entry->direction],
1088                            dir2name[ref->direction]);
1089         }
1090
1091         if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1092                 err_printk(ref->dev, entry,
1093                            "DMA-API: device driver failed to check map error"
1094                            "[device address=0x%016llx] [size=%llu bytes] "
1095                            "[mapped as %s]",
1096                            ref->dev_addr, ref->size,
1097                            type2name[entry->type]);
1098         }
1099
1100         hash_bucket_del(entry);
1101         dma_entry_free(entry);
1102
1103         put_hash_bucket(bucket, &flags);
1104 }
1105
1106 static void check_for_stack(struct device *dev, void *addr)
1107 {
1108         if (object_is_on_stack(addr))
1109                 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
1110                                 "stack [addr=%p]\n", addr);
1111 }
1112
1113 static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1114 {
1115         unsigned long a1 = (unsigned long)addr;
1116         unsigned long b1 = a1 + len;
1117         unsigned long a2 = (unsigned long)start;
1118         unsigned long b2 = (unsigned long)end;
1119
1120         return !(b1 <= a2 || a1 >= b2);
1121 }
1122
1123 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1124 {
1125         if (overlap(addr, len, _text, _etext) ||
1126             overlap(addr, len, __start_rodata, __end_rodata))
1127                 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1128 }
1129
1130 static void check_sync(struct device *dev,
1131                        struct dma_debug_entry *ref,
1132                        bool to_cpu)
1133 {
1134         struct dma_debug_entry *entry;
1135         struct hash_bucket *bucket;
1136         unsigned long flags;
1137
1138         bucket = get_hash_bucket(ref, &flags);
1139
1140         entry = bucket_find_contain(&bucket, ref, &flags);
1141
1142         if (!entry) {
1143                 err_printk(dev, NULL, "DMA-API: device driver tries "
1144                                 "to sync DMA memory it has not allocated "
1145                                 "[device address=0x%016llx] [size=%llu bytes]\n",
1146                                 (unsigned long long)ref->dev_addr, ref->size);
1147                 goto out;
1148         }
1149
1150         if (ref->size > entry->size) {
1151                 err_printk(dev, entry, "DMA-API: device driver syncs"
1152                                 " DMA memory outside allocated range "
1153                                 "[device address=0x%016llx] "
1154                                 "[allocation size=%llu bytes] "
1155                                 "[sync offset+size=%llu]\n",
1156                                 entry->dev_addr, entry->size,
1157                                 ref->size);
1158         }
1159
1160         if (entry->direction == DMA_BIDIRECTIONAL)
1161                 goto out;
1162
1163         if (ref->direction != entry->direction) {
1164                 err_printk(dev, entry, "DMA-API: device driver syncs "
1165                                 "DMA memory with different direction "
1166                                 "[device address=0x%016llx] [size=%llu bytes] "
1167                                 "[mapped with %s] [synced with %s]\n",
1168                                 (unsigned long long)ref->dev_addr, entry->size,
1169                                 dir2name[entry->direction],
1170                                 dir2name[ref->direction]);
1171         }
1172
1173         if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1174                       !(ref->direction == DMA_TO_DEVICE))
1175                 err_printk(dev, entry, "DMA-API: device driver syncs "
1176                                 "device read-only DMA memory for cpu "
1177                                 "[device address=0x%016llx] [size=%llu bytes] "
1178                                 "[mapped with %s] [synced with %s]\n",
1179                                 (unsigned long long)ref->dev_addr, entry->size,
1180                                 dir2name[entry->direction],
1181                                 dir2name[ref->direction]);
1182
1183         if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1184                        !(ref->direction == DMA_FROM_DEVICE))
1185                 err_printk(dev, entry, "DMA-API: device driver syncs "
1186                                 "device write-only DMA memory to device "
1187                                 "[device address=0x%016llx] [size=%llu bytes] "
1188                                 "[mapped with %s] [synced with %s]\n",
1189                                 (unsigned long long)ref->dev_addr, entry->size,
1190                                 dir2name[entry->direction],
1191                                 dir2name[ref->direction]);
1192
1193 out:
1194         put_hash_bucket(bucket, &flags);
1195 }
1196
1197 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1198                         size_t size, int direction, dma_addr_t dma_addr,
1199                         bool map_single)
1200 {
1201         struct dma_debug_entry *entry;
1202
1203         if (unlikely(global_disable))
1204                 return;
1205
1206         if (dma_mapping_error(dev, dma_addr))
1207                 return;
1208
1209         entry = dma_entry_alloc();
1210         if (!entry)
1211                 return;
1212
1213         entry->dev       = dev;
1214         entry->type      = dma_debug_page;
1215         entry->pfn       = page_to_pfn(page);
1216         entry->offset    = offset,
1217         entry->dev_addr  = dma_addr;
1218         entry->size      = size;
1219         entry->direction = direction;
1220         entry->map_err_type = MAP_ERR_NOT_CHECKED;
1221
1222         if (map_single)
1223                 entry->type = dma_debug_single;
1224
1225         if (!PageHighMem(page)) {
1226                 void *addr = page_address(page) + offset;
1227
1228                 check_for_stack(dev, addr);
1229                 check_for_illegal_area(dev, addr, size);
1230         }
1231
1232         add_dma_entry(entry);
1233 }
1234 EXPORT_SYMBOL(debug_dma_map_page);
1235
1236 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1237 {
1238         struct dma_debug_entry ref;
1239         struct dma_debug_entry *entry;
1240         struct hash_bucket *bucket;
1241         unsigned long flags;
1242
1243         if (unlikely(global_disable))
1244                 return;
1245
1246         ref.dev = dev;
1247         ref.dev_addr = dma_addr;
1248         bucket = get_hash_bucket(&ref, &flags);
1249
1250         list_for_each_entry(entry, &bucket->list, list) {
1251                 if (!exact_match(&ref, entry))
1252                         continue;
1253
1254                 /*
1255                  * The same physical address can be mapped multiple
1256                  * times. Without a hardware IOMMU this results in the
1257                  * same device addresses being put into the dma-debug
1258                  * hash multiple times too. This can result in false
1259                  * positives being reported. Therefore we implement a
1260                  * best-fit algorithm here which updates the first entry
1261                  * from the hash which fits the reference value and is
1262                  * not currently listed as being checked.
1263                  */
1264                 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1265                         entry->map_err_type = MAP_ERR_CHECKED;
1266                         break;
1267                 }
1268         }
1269
1270         put_hash_bucket(bucket, &flags);
1271 }
1272 EXPORT_SYMBOL(debug_dma_mapping_error);
1273
1274 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1275                           size_t size, int direction, bool map_single)
1276 {
1277         struct dma_debug_entry ref = {
1278                 .type           = dma_debug_page,
1279                 .dev            = dev,
1280                 .dev_addr       = addr,
1281                 .size           = size,
1282                 .direction      = direction,
1283         };
1284
1285         if (unlikely(global_disable))
1286                 return;
1287
1288         if (map_single)
1289                 ref.type = dma_debug_single;
1290
1291         check_unmap(&ref);
1292 }
1293 EXPORT_SYMBOL(debug_dma_unmap_page);
1294
1295 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1296                       int nents, int mapped_ents, int direction)
1297 {
1298         struct dma_debug_entry *entry;
1299         struct scatterlist *s;
1300         int i;
1301
1302         if (unlikely(global_disable))
1303                 return;
1304
1305         for_each_sg(sg, s, mapped_ents, i) {
1306                 entry = dma_entry_alloc();
1307                 if (!entry)
1308                         return;
1309
1310                 entry->type           = dma_debug_sg;
1311                 entry->dev            = dev;
1312                 entry->pfn            = page_to_pfn(sg_page(s));
1313                 entry->offset         = s->offset,
1314                 entry->size           = sg_dma_len(s);
1315                 entry->dev_addr       = sg_dma_address(s);
1316                 entry->direction      = direction;
1317                 entry->sg_call_ents   = nents;
1318                 entry->sg_mapped_ents = mapped_ents;
1319
1320                 if (!PageHighMem(sg_page(s))) {
1321                         check_for_stack(dev, sg_virt(s));
1322                         check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1323                 }
1324
1325                 add_dma_entry(entry);
1326         }
1327 }
1328 EXPORT_SYMBOL(debug_dma_map_sg);
1329
1330 static int get_nr_mapped_entries(struct device *dev,
1331                                  struct dma_debug_entry *ref)
1332 {
1333         struct dma_debug_entry *entry;
1334         struct hash_bucket *bucket;
1335         unsigned long flags;
1336         int mapped_ents;
1337
1338         bucket       = get_hash_bucket(ref, &flags);
1339         entry        = bucket_find_exact(bucket, ref);
1340         mapped_ents  = 0;
1341
1342         if (entry)
1343                 mapped_ents = entry->sg_mapped_ents;
1344         put_hash_bucket(bucket, &flags);
1345
1346         return mapped_ents;
1347 }
1348
1349 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1350                         int nelems, int dir)
1351 {
1352         struct scatterlist *s;
1353         int mapped_ents = 0, i;
1354
1355         if (unlikely(global_disable))
1356                 return;
1357
1358         for_each_sg(sglist, s, nelems, i) {
1359
1360                 struct dma_debug_entry ref = {
1361                         .type           = dma_debug_sg,
1362                         .dev            = dev,
1363                         .pfn            = page_to_pfn(sg_page(s)),
1364                         .offset         = s->offset,
1365                         .dev_addr       = sg_dma_address(s),
1366                         .size           = sg_dma_len(s),
1367                         .direction      = dir,
1368                         .sg_call_ents   = nelems,
1369                 };
1370
1371                 if (mapped_ents && i >= mapped_ents)
1372                         break;
1373
1374                 if (!i)
1375                         mapped_ents = get_nr_mapped_entries(dev, &ref);
1376
1377                 check_unmap(&ref);
1378         }
1379 }
1380 EXPORT_SYMBOL(debug_dma_unmap_sg);
1381
1382 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1383                               dma_addr_t dma_addr, void *virt)
1384 {
1385         struct dma_debug_entry *entry;
1386
1387         if (unlikely(global_disable))
1388                 return;
1389
1390         if (unlikely(virt == NULL))
1391                 return;
1392
1393         entry = dma_entry_alloc();
1394         if (!entry)
1395                 return;
1396
1397         entry->type      = dma_debug_coherent;
1398         entry->dev       = dev;
1399         entry->pfn       = page_to_pfn(virt_to_page(virt));
1400         entry->offset    = (size_t) virt & PAGE_MASK;
1401         entry->size      = size;
1402         entry->dev_addr  = dma_addr;
1403         entry->direction = DMA_BIDIRECTIONAL;
1404
1405         add_dma_entry(entry);
1406 }
1407 EXPORT_SYMBOL(debug_dma_alloc_coherent);
1408
1409 void debug_dma_free_coherent(struct device *dev, size_t size,
1410                          void *virt, dma_addr_t addr)
1411 {
1412         struct dma_debug_entry ref = {
1413                 .type           = dma_debug_coherent,
1414                 .dev            = dev,
1415                 .pfn            = page_to_pfn(virt_to_page(virt)),
1416                 .offset         = (size_t) virt & PAGE_MASK,
1417                 .dev_addr       = addr,
1418                 .size           = size,
1419                 .direction      = DMA_BIDIRECTIONAL,
1420         };
1421
1422         if (unlikely(global_disable))
1423                 return;
1424
1425         check_unmap(&ref);
1426 }
1427 EXPORT_SYMBOL(debug_dma_free_coherent);
1428
1429 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1430                                    size_t size, int direction)
1431 {
1432         struct dma_debug_entry ref;
1433
1434         if (unlikely(global_disable))
1435                 return;
1436
1437         ref.type         = dma_debug_single;
1438         ref.dev          = dev;
1439         ref.dev_addr     = dma_handle;
1440         ref.size         = size;
1441         ref.direction    = direction;
1442         ref.sg_call_ents = 0;
1443
1444         check_sync(dev, &ref, true);
1445 }
1446 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1447
1448 void debug_dma_sync_single_for_device(struct device *dev,
1449                                       dma_addr_t dma_handle, size_t size,
1450                                       int direction)
1451 {
1452         struct dma_debug_entry ref;
1453
1454         if (unlikely(global_disable))
1455                 return;
1456
1457         ref.type         = dma_debug_single;
1458         ref.dev          = dev;
1459         ref.dev_addr     = dma_handle;
1460         ref.size         = size;
1461         ref.direction    = direction;
1462         ref.sg_call_ents = 0;
1463
1464         check_sync(dev, &ref, false);
1465 }
1466 EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1467
1468 void debug_dma_sync_single_range_for_cpu(struct device *dev,
1469                                          dma_addr_t dma_handle,
1470                                          unsigned long offset, size_t size,
1471                                          int direction)
1472 {
1473         struct dma_debug_entry ref;
1474
1475         if (unlikely(global_disable))
1476                 return;
1477
1478         ref.type         = dma_debug_single;
1479         ref.dev          = dev;
1480         ref.dev_addr     = dma_handle;
1481         ref.size         = offset + size;
1482         ref.direction    = direction;
1483         ref.sg_call_ents = 0;
1484
1485         check_sync(dev, &ref, true);
1486 }
1487 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1488
1489 void debug_dma_sync_single_range_for_device(struct device *dev,
1490                                             dma_addr_t dma_handle,
1491                                             unsigned long offset,
1492                                             size_t size, int direction)
1493 {
1494         struct dma_debug_entry ref;
1495
1496         if (unlikely(global_disable))
1497                 return;
1498
1499         ref.type         = dma_debug_single;
1500         ref.dev          = dev;
1501         ref.dev_addr     = dma_handle;
1502         ref.size         = offset + size;
1503         ref.direction    = direction;
1504         ref.sg_call_ents = 0;
1505
1506         check_sync(dev, &ref, false);
1507 }
1508 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1509
1510 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1511                                int nelems, int direction)
1512 {
1513         struct scatterlist *s;
1514         int mapped_ents = 0, i;
1515
1516         if (unlikely(global_disable))
1517                 return;
1518
1519         for_each_sg(sg, s, nelems, i) {
1520
1521                 struct dma_debug_entry ref = {
1522                         .type           = dma_debug_sg,
1523                         .dev            = dev,
1524                         .pfn            = page_to_pfn(sg_page(s)),
1525                         .offset         = s->offset,
1526                         .dev_addr       = sg_dma_address(s),
1527                         .size           = sg_dma_len(s),
1528                         .direction      = direction,
1529                         .sg_call_ents   = nelems,
1530                 };
1531
1532                 if (!i)
1533                         mapped_ents = get_nr_mapped_entries(dev, &ref);
1534
1535                 if (i >= mapped_ents)
1536                         break;
1537
1538                 check_sync(dev, &ref, true);
1539         }
1540 }
1541 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1542
1543 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1544                                   int nelems, int direction)
1545 {
1546         struct scatterlist *s;
1547         int mapped_ents = 0, i;
1548
1549         if (unlikely(global_disable))
1550                 return;
1551
1552         for_each_sg(sg, s, nelems, i) {
1553
1554                 struct dma_debug_entry ref = {
1555                         .type           = dma_debug_sg,
1556                         .dev            = dev,
1557                         .pfn            = page_to_pfn(sg_page(s)),
1558                         .offset         = s->offset,
1559                         .dev_addr       = sg_dma_address(s),
1560                         .size           = sg_dma_len(s),
1561                         .direction      = direction,
1562                         .sg_call_ents   = nelems,
1563                 };
1564                 if (!i)
1565                         mapped_ents = get_nr_mapped_entries(dev, &ref);
1566
1567                 if (i >= mapped_ents)
1568                         break;
1569
1570                 check_sync(dev, &ref, false);
1571         }
1572 }
1573 EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1574
1575 static int __init dma_debug_driver_setup(char *str)
1576 {
1577         int i;
1578
1579         for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1580                 current_driver_name[i] = *str;
1581                 if (*str == 0)
1582                         break;
1583         }
1584
1585         if (current_driver_name[0])
1586                 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1587                         current_driver_name);
1588
1589
1590         return 1;
1591 }
1592 __setup("dma_debug_driver=", dma_debug_driver_setup);