blob: badd77670d00510b94336c715c13f73cf5052e24 [file] [log] [blame]
Joerg Roedelf2f45e52009-01-09 12:19:52 +01001/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Robin Murphyf737b092018-12-10 14:00:27 +000020#define pr_fmt(fmt) "DMA-API: " fmt
21
Ingo Molnar68db0cf2017-02-08 18:51:37 +010022#include <linux/sched/task_stack.h>
Joerg Roedel972aa452009-01-09 14:19:54 +010023#include <linux/scatterlist.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010024#include <linux/dma-mapping.h>
Ingo Molnar29930022017-02-08 18:51:36 +010025#include <linux/sched/task.h>
David Woodhouse6c132d12009-01-19 16:52:39 +010026#include <linux/stacktrace.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010027#include <linux/dma-debug.h>
Joerg Roedel30dfa902009-01-09 12:34:49 +010028#include <linux/spinlock.h>
Andy Lutomirskib4a0f532016-08-11 02:35:22 -070029#include <linux/vmalloc.h>
Joerg Roedel788dcfa2009-01-09 13:13:27 +010030#include <linux/debugfs.h>
Joerg Roedel8a6fc702009-05-22 21:23:13 +020031#include <linux/uaccess.h>
Paul Gortmaker23a7bfa2011-07-01 16:23:59 -040032#include <linux/export.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010033#include <linux/device.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010034#include <linux/types.h>
Joerg Roedel2d62ece2009-01-09 14:10:26 +010035#include <linux/sched.h>
Joerg Roedel8a6fc702009-05-22 21:23:13 +020036#include <linux/ctype.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010037#include <linux/list.h>
Joerg Roedel6bf07872009-01-09 12:54:42 +010038#include <linux/slab.h>
Joerg Roedelf2f45e52009-01-09 12:19:52 +010039
Joerg Roedel2e34bde2009-03-16 16:51:55 +010040#include <asm/sections.h>
41
Joerg Roedel30dfa902009-01-09 12:34:49 +010042#define HASH_SIZE 1024ULL
43#define HASH_FN_SHIFT 13
44#define HASH_FN_MASK (HASH_SIZE - 1)
45
Christoph Hellwig15b28bb2018-04-16 17:22:28 +020046#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
Robin Murphy2b9d9ac2018-12-10 14:00:29 +000047/* If the pool runs out, add this many new entries at once */
Robin Murphyad78dee2018-12-10 14:00:33 +000048#define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
Christoph Hellwig15b28bb2018-04-16 17:22:28 +020049
Joerg Roedelf2f45e52009-01-09 12:19:52 +010050enum {
51 dma_debug_single,
Joerg Roedelf2f45e52009-01-09 12:19:52 +010052 dma_debug_sg,
53 dma_debug_coherent,
Niklas Söderlund0e74b342016-08-10 13:22:15 +020054 dma_debug_resource,
Joerg Roedelf2f45e52009-01-09 12:19:52 +010055};
56
Shuah Khan6c9c6d62012-10-08 11:08:06 -060057enum map_err_types {
58 MAP_ERR_CHECK_NOT_APPLICABLE,
59 MAP_ERR_NOT_CHECKED,
60 MAP_ERR_CHECKED,
61};
62
David Woodhouse6c132d12009-01-19 16:52:39 +010063#define DMA_DEBUG_STACKTRACE_ENTRIES 5
64
Dan Williams0abdd7a2014-01-21 15:48:12 -080065/**
66 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
67 * @list: node on pre-allocated free_entries list
68 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
69 * @type: single, page, sg, coherent
70 * @pfn: page frame of the start address
71 * @offset: offset of mapping relative to pfn
72 * @size: length of the mapping
73 * @direction: enum dma_data_direction
74 * @sg_call_ents: 'nents' from dma_map_sg
75 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
76 * @map_err_type: track whether dma_mapping_error() was checked
77 * @stacktrace: support backtraces when a violation is detected
78 */
Joerg Roedelf2f45e52009-01-09 12:19:52 +010079struct dma_debug_entry {
80 struct list_head list;
81 struct device *dev;
82 int type;
Dan Williams0abdd7a2014-01-21 15:48:12 -080083 unsigned long pfn;
84 size_t offset;
Joerg Roedelf2f45e52009-01-09 12:19:52 +010085 u64 dev_addr;
86 u64 size;
87 int direction;
88 int sg_call_ents;
89 int sg_mapped_ents;
Shuah Khan6c9c6d62012-10-08 11:08:06 -060090 enum map_err_types map_err_type;
David Woodhouse6c132d12009-01-19 16:52:39 +010091#ifdef CONFIG_STACKTRACE
Thomas Gleixner746017e2019-04-25 11:45:05 +020092 unsigned int stack_len;
93 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
David Woodhouse6c132d12009-01-19 16:52:39 +010094#endif
Joerg Roedelf2f45e52009-01-09 12:19:52 +010095};
96
Neil Hormanc6a21d02011-08-08 15:13:54 -040097typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
98
Joerg Roedel30dfa902009-01-09 12:34:49 +010099struct hash_bucket {
100 struct list_head list;
101 spinlock_t lock;
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100102} ____cacheline_aligned_in_smp;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100103
104/* Hash list to save the allocated dma addresses */
105static struct hash_bucket dma_entry_hash[HASH_SIZE];
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100106/* List of pre-allocated dma_debug_entry's */
107static LIST_HEAD(free_entries);
108/* Lock for the list above */
109static DEFINE_SPINLOCK(free_entries_lock);
110
111/* Global disable flag - will be set in case of an error */
Viresh Kumar621a5f72015-09-26 15:04:07 -0700112static bool global_disable __read_mostly;
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100113
Florian Fainelli2ce8e7e2014-12-10 15:41:25 -0800114/* Early initialization disable flag, set at the end of dma_debug_init */
115static bool dma_debug_initialized __read_mostly;
116
Florian Fainelli01ce18b2014-12-10 15:41:23 -0800117static inline bool dma_debug_disabled(void)
118{
Florian Fainelli2ce8e7e2014-12-10 15:41:25 -0800119 return global_disable || !dma_debug_initialized;
Florian Fainelli01ce18b2014-12-10 15:41:23 -0800120}
121
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100122/* Global error count */
123static u32 error_count;
124
125/* Global error show enable*/
126static u32 show_all_errors __read_mostly;
127/* Number of errors to show */
128static u32 show_num_errors = 1;
129
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100130static u32 num_free_entries;
131static u32 min_free_entries;
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900132static u32 nr_total_entries;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100133
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100134/* number of preallocated entries requested by kernel cmdline */
Christoph Hellwigbcebe322018-04-24 09:40:51 +0200135static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100136
Joerg Roedel2e507d82009-05-22 18:24:20 +0200137/* per-driver filter related state */
138
139#define NAME_MAX_LEN 64
140
141static char current_driver_name[NAME_MAX_LEN] __read_mostly;
142static struct device_driver *current_driver __read_mostly;
143
144static DEFINE_RWLOCK(driver_name_lock);
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100145
Shuah Khan6c9c6d62012-10-08 11:08:06 -0600146static const char *const maperr2str[] = {
147 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
148 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
149 [MAP_ERR_CHECKED] = "dma map error checked",
150};
151
Niklas Söderlund0e74b342016-08-10 13:22:15 +0200152static const char *type2name[5] = { "single", "page",
153 "scather-gather", "coherent",
154 "resource" };
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100155
156static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
157 "DMA_FROM_DEVICE", "DMA_NONE" };
158
159/*
160 * The access to some variables in this macro is racy. We can't use atomic_t
161 * here because all these variables are exported to debugfs. Some of them even
162 * writeable. This is also the reason why a lock won't help much. But anyway,
163 * the races are no big deal. Here is why:
164 *
165 * error_count: the addition is racy, but the worst thing that can happen is
166 * that we don't count some errors
167 * show_num_errors: the subtraction is racy. Also no big deal because in
168 * worst case this will result in one warning more in the
169 * system log than the user configured. This variable is
170 * writeable via debugfs.
171 */
David Woodhouse6c132d12009-01-19 16:52:39 +0100172static inline void dump_entry_trace(struct dma_debug_entry *entry)
173{
174#ifdef CONFIG_STACKTRACE
175 if (entry) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200176 pr_warning("Mapped at:\n");
Thomas Gleixner746017e2019-04-25 11:45:05 +0200177 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
David Woodhouse6c132d12009-01-19 16:52:39 +0100178 }
179#endif
180}
181
Joerg Roedel2e507d82009-05-22 18:24:20 +0200182static bool driver_filter(struct device *dev)
183{
Joerg Roedel0bf84122009-06-08 15:53:46 +0200184 struct device_driver *drv;
185 unsigned long flags;
186 bool ret;
187
Joerg Roedel2e507d82009-05-22 18:24:20 +0200188 /* driver filter off */
189 if (likely(!current_driver_name[0]))
190 return true;
191
192 /* driver filter on and initialized */
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400193 if (current_driver && dev && dev->driver == current_driver)
Joerg Roedel2e507d82009-05-22 18:24:20 +0200194 return true;
195
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400196 /* driver filter on, but we can't filter on a NULL device... */
197 if (!dev)
198 return false;
199
Joerg Roedel0bf84122009-06-08 15:53:46 +0200200 if (current_driver || !current_driver_name[0])
201 return false;
202
Joerg Roedel2e507d82009-05-22 18:24:20 +0200203 /* driver filter on but not yet initialized */
Alan Sternf3ff9242012-01-24 13:35:24 -0500204 drv = dev->driver;
Joerg Roedel0bf84122009-06-08 15:53:46 +0200205 if (!drv)
206 return false;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200207
Joerg Roedel0bf84122009-06-08 15:53:46 +0200208 /* lock to protect against change of current_driver_name */
209 read_lock_irqsave(&driver_name_lock, flags);
Joerg Roedel2e507d82009-05-22 18:24:20 +0200210
Joerg Roedel0bf84122009-06-08 15:53:46 +0200211 ret = false;
212 if (drv->name &&
213 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
214 current_driver = drv;
215 ret = true;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200216 }
217
Joerg Roedel0bf84122009-06-08 15:53:46 +0200218 read_unlock_irqrestore(&driver_name_lock, flags);
Joerg Roedel0bf84122009-06-08 15:53:46 +0200219
220 return ret;
Joerg Roedel2e507d82009-05-22 18:24:20 +0200221}
222
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400223#define err_printk(dev, entry, format, arg...) do { \
224 error_count += 1; \
225 if (driver_filter(dev) && \
226 (show_all_errors || show_num_errors > 0)) { \
Robin Murphyf737b092018-12-10 14:00:27 +0000227 WARN(1, pr_fmt("%s %s: ") format, \
Kyle McMartinec9c96e2009-08-19 21:17:08 -0400228 dev ? dev_driver_string(dev) : "NULL", \
229 dev ? dev_name(dev) : "NULL", ## arg); \
230 dump_entry_trace(entry); \
231 } \
232 if (!show_all_errors && show_num_errors > 0) \
233 show_num_errors -= 1; \
Joerg Roedel2d62ece2009-01-09 14:10:26 +0100234 } while (0);
235
Joerg Roedel30dfa902009-01-09 12:34:49 +0100236/*
237 * Hash related functions
238 *
239 * Every DMA-API request is saved into a struct dma_debug_entry. To
240 * have quick access to these structs they are stored into a hash.
241 */
242static int hash_fn(struct dma_debug_entry *entry)
243{
244 /*
245 * Hash function is based on the dma address.
246 * We use bits 20-27 here as the index into the hash
247 */
248 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
249}
250
251/*
252 * Request exclusive access to a hash bucket for a given dma_debug_entry.
253 */
254static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
255 unsigned long *flags)
Stephen Boydd5dfc802016-07-26 15:21:08 -0700256 __acquires(&dma_entry_hash[idx].lock)
Joerg Roedel30dfa902009-01-09 12:34:49 +0100257{
258 int idx = hash_fn(entry);
259 unsigned long __flags;
260
261 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
262 *flags = __flags;
263 return &dma_entry_hash[idx];
264}
265
266/*
267 * Give up exclusive access to the hash bucket
268 */
269static void put_hash_bucket(struct hash_bucket *bucket,
270 unsigned long *flags)
Stephen Boydd5dfc802016-07-26 15:21:08 -0700271 __releases(&bucket->lock)
Joerg Roedel30dfa902009-01-09 12:34:49 +0100272{
273 unsigned long __flags = *flags;
274
275 spin_unlock_irqrestore(&bucket->lock, __flags);
276}
277
Neil Hormanc6a21d02011-08-08 15:13:54 -0400278static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
279{
Thomas Jarosch91ec37c2011-11-17 20:31:02 +0100280 return ((a->dev_addr == b->dev_addr) &&
Neil Hormanc6a21d02011-08-08 15:13:54 -0400281 (a->dev == b->dev)) ? true : false;
282}
283
284static bool containing_match(struct dma_debug_entry *a,
285 struct dma_debug_entry *b)
286{
287 if (a->dev != b->dev)
288 return false;
289
290 if ((b->dev_addr <= a->dev_addr) &&
291 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
292 return true;
293
294 return false;
295}
296
Joerg Roedel30dfa902009-01-09 12:34:49 +0100297/*
298 * Search a given entry in the hash bucket list
299 */
Neil Hormanc6a21d02011-08-08 15:13:54 -0400300static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
301 struct dma_debug_entry *ref,
302 match_fn match)
Joerg Roedel30dfa902009-01-09 12:34:49 +0100303{
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200304 struct dma_debug_entry *entry, *ret = NULL;
Ming Leife73fbe2012-10-19 13:57:01 -0700305 int matches = 0, match_lvl, last_lvl = -1;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100306
307 list_for_each_entry(entry, &bucket->list, list) {
Neil Hormanc6a21d02011-08-08 15:13:54 -0400308 if (!match(ref, entry))
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200309 continue;
310
311 /*
312 * Some drivers map the same physical address multiple
313 * times. Without a hardware IOMMU this results in the
314 * same device addresses being put into the dma-debug
315 * hash multiple times too. This can result in false
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200316 * positives being reported. Therefore we implement a
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200317 * best-fit algorithm here which returns the entry from
318 * the hash which fits best to the reference value
319 * instead of the first-fit.
320 */
321 matches += 1;
322 match_lvl = 0;
Joerg Roedele5e8c5b2009-06-11 10:03:42 +0200323 entry->size == ref->size ? ++match_lvl : 0;
324 entry->type == ref->type ? ++match_lvl : 0;
325 entry->direction == ref->direction ? ++match_lvl : 0;
326 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200327
Joerg Roedele5e8c5b2009-06-11 10:03:42 +0200328 if (match_lvl == 4) {
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200329 /* perfect-fit - return the result */
Joerg Roedel30dfa902009-01-09 12:34:49 +0100330 return entry;
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200331 } else if (match_lvl > last_lvl) {
332 /*
333 * We found an entry that fits better then the
Ming Leife73fbe2012-10-19 13:57:01 -0700334 * previous one or it is the 1st match.
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200335 */
336 last_lvl = match_lvl;
337 ret = entry;
338 }
Joerg Roedel30dfa902009-01-09 12:34:49 +0100339 }
340
Joerg Roedel7caf6a42009-06-05 12:01:35 +0200341 /*
342 * If we have multiple matches but no perfect-fit, just return
343 * NULL.
344 */
345 ret = (matches == 1) ? ret : NULL;
346
347 return ret;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100348}
349
Neil Hormanc6a21d02011-08-08 15:13:54 -0400350static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
351 struct dma_debug_entry *ref)
352{
353 return __hash_bucket_find(bucket, ref, exact_match);
354}
355
356static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
357 struct dma_debug_entry *ref,
358 unsigned long *flags)
359{
360
361 unsigned int max_range = dma_get_max_seg_size(ref->dev);
362 struct dma_debug_entry *entry, index = *ref;
363 unsigned int range = 0;
364
365 while (range <= max_range) {
Sebastian Otta7a2c022015-04-16 12:43:25 -0700366 entry = __hash_bucket_find(*bucket, ref, containing_match);
Neil Hormanc6a21d02011-08-08 15:13:54 -0400367
368 if (entry)
369 return entry;
370
371 /*
372 * Nothing found, go back a hash bucket
373 */
374 put_hash_bucket(*bucket, flags);
375 range += (1 << HASH_FN_SHIFT);
376 index.dev_addr -= (1 << HASH_FN_SHIFT);
377 *bucket = get_hash_bucket(&index, flags);
378 }
379
380 return NULL;
381}
382
Joerg Roedel30dfa902009-01-09 12:34:49 +0100383/*
384 * Add an entry to a hash bucket
385 */
386static void hash_bucket_add(struct hash_bucket *bucket,
387 struct dma_debug_entry *entry)
388{
389 list_add_tail(&entry->list, &bucket->list);
390}
391
392/*
393 * Remove entry from a hash bucket list
394 */
395static void hash_bucket_del(struct dma_debug_entry *entry)
396{
397 list_del(&entry->list);
398}
399
Dan Williams0abdd7a2014-01-21 15:48:12 -0800400static unsigned long long phys_addr(struct dma_debug_entry *entry)
401{
Niklas Söderlund0e74b342016-08-10 13:22:15 +0200402 if (entry->type == dma_debug_resource)
403 return __pfn_to_phys(entry->pfn) + entry->offset;
404
Dan Williams0abdd7a2014-01-21 15:48:12 -0800405 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
406}
407
Joerg Roedel30dfa902009-01-09 12:34:49 +0100408/*
David Woodhouseac26c182009-02-12 16:19:13 +0100409 * Dump mapping entries for debugging purposes
410 */
411void debug_dma_dump_mappings(struct device *dev)
412{
413 int idx;
414
415 for (idx = 0; idx < HASH_SIZE; idx++) {
416 struct hash_bucket *bucket = &dma_entry_hash[idx];
417 struct dma_debug_entry *entry;
418 unsigned long flags;
419
420 spin_lock_irqsave(&bucket->lock, flags);
421
422 list_for_each_entry(entry, &bucket->list, list) {
423 if (!dev || dev == entry->dev) {
424 dev_info(entry->dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -0800425 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
David Woodhouseac26c182009-02-12 16:19:13 +0100426 type2name[entry->type], idx,
Dan Williams0abdd7a2014-01-21 15:48:12 -0800427 phys_addr(entry), entry->pfn,
David Woodhouseac26c182009-02-12 16:19:13 +0100428 entry->dev_addr, entry->size,
Shuah Khan6c9c6d62012-10-08 11:08:06 -0600429 dir2name[entry->direction],
430 maperr2str[entry->map_err_type]);
David Woodhouseac26c182009-02-12 16:19:13 +0100431 }
432 }
433
434 spin_unlock_irqrestore(&bucket->lock, flags);
435 }
436}
David Woodhouseac26c182009-02-12 16:19:13 +0100437
438/*
Dan Williams3b7a6412014-03-03 15:38:21 -0800439 * For each mapping (initial cacheline in the case of
440 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
441 * scatterlist, or the cacheline specified in dma_map_single) insert
442 * into this tree using the cacheline as the key. At
Dan Williams0abdd7a2014-01-21 15:48:12 -0800443 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
Dan Williams3b7a6412014-03-03 15:38:21 -0800444 * the entry already exists at insertion time add a tag as a reference
Dan Williams0abdd7a2014-01-21 15:48:12 -0800445 * count for the overlapping mappings. For now, the overlap tracking
Dan Williams3b7a6412014-03-03 15:38:21 -0800446 * just ensures that 'unmaps' balance 'maps' before marking the
447 * cacheline idle, but we should also be flagging overlaps as an API
448 * violation.
Dan Williams0abdd7a2014-01-21 15:48:12 -0800449 *
450 * Memory usage is mostly constrained by the maximum number of available
451 * dma-debug entries in that we need a free dma_debug_entry before
Dan Williams3b7a6412014-03-03 15:38:21 -0800452 * inserting into the tree. In the case of dma_map_page and
453 * dma_alloc_coherent there is only one dma_debug_entry and one
454 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
455 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
456 * entries into the tree.
Dan Williams0abdd7a2014-01-21 15:48:12 -0800457 *
458 * At any time debug_dma_assert_idle() can be called to trigger a
Dan Williams3b7a6412014-03-03 15:38:21 -0800459 * warning if any cachelines in the given page are in the active set.
Dan Williams0abdd7a2014-01-21 15:48:12 -0800460 */
Dan Williams3b7a6412014-03-03 15:38:21 -0800461static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800462static DEFINE_SPINLOCK(radix_lock);
Dan Williams3b7a6412014-03-03 15:38:21 -0800463#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
464#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
465#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800466
Dan Williams3b7a6412014-03-03 15:38:21 -0800467static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
468{
469 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
470 (entry->offset >> L1_CACHE_SHIFT);
471}
472
473static int active_cacheline_read_overlap(phys_addr_t cln)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800474{
475 int overlap = 0, i;
476
477 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
Dan Williams3b7a6412014-03-03 15:38:21 -0800478 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
Dan Williams0abdd7a2014-01-21 15:48:12 -0800479 overlap |= 1 << i;
480 return overlap;
481}
482
Dan Williams3b7a6412014-03-03 15:38:21 -0800483static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800484{
485 int i;
486
Dan Williams3b7a6412014-03-03 15:38:21 -0800487 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
Dan Williams59f2e7d2014-01-29 14:05:53 -0800488 return overlap;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800489
490 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
491 if (overlap & 1 << i)
Dan Williams3b7a6412014-03-03 15:38:21 -0800492 radix_tree_tag_set(&dma_active_cacheline, cln, i);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800493 else
Dan Williams3b7a6412014-03-03 15:38:21 -0800494 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800495
496 return overlap;
497}
498
Dan Williams3b7a6412014-03-03 15:38:21 -0800499static void active_cacheline_inc_overlap(phys_addr_t cln)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800500{
Dan Williams3b7a6412014-03-03 15:38:21 -0800501 int overlap = active_cacheline_read_overlap(cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800502
Dan Williams3b7a6412014-03-03 15:38:21 -0800503 overlap = active_cacheline_set_overlap(cln, ++overlap);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800504
505 /* If we overflowed the overlap counter then we're potentially
506 * leaking dma-mappings. Otherwise, if maps and unmaps are
507 * balanced then this overflow may cause false negatives in
Dan Williams3b7a6412014-03-03 15:38:21 -0800508 * debug_dma_assert_idle() as the cacheline may be marked idle
Dan Williams0abdd7a2014-01-21 15:48:12 -0800509 * prematurely.
510 */
Dan Williams3b7a6412014-03-03 15:38:21 -0800511 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
Robin Murphyf737b092018-12-10 14:00:27 +0000512 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
Dan Williams3b7a6412014-03-03 15:38:21 -0800513 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800514}
515
Dan Williams3b7a6412014-03-03 15:38:21 -0800516static int active_cacheline_dec_overlap(phys_addr_t cln)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800517{
Dan Williams3b7a6412014-03-03 15:38:21 -0800518 int overlap = active_cacheline_read_overlap(cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800519
Dan Williams3b7a6412014-03-03 15:38:21 -0800520 return active_cacheline_set_overlap(cln, --overlap);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800521}
522
Dan Williams3b7a6412014-03-03 15:38:21 -0800523static int active_cacheline_insert(struct dma_debug_entry *entry)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800524{
Dan Williams3b7a6412014-03-03 15:38:21 -0800525 phys_addr_t cln = to_cacheline_number(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800526 unsigned long flags;
527 int rc;
528
Dan Williams3b7a6412014-03-03 15:38:21 -0800529 /* If the device is not writing memory then we don't have any
530 * concerns about the cpu consuming stale data. This mitigates
531 * legitimate usages of overlapping mappings.
532 */
533 if (entry->direction == DMA_TO_DEVICE)
534 return 0;
535
Dan Williams0abdd7a2014-01-21 15:48:12 -0800536 spin_lock_irqsave(&radix_lock, flags);
Dan Williams3b7a6412014-03-03 15:38:21 -0800537 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800538 if (rc == -EEXIST)
Dan Williams3b7a6412014-03-03 15:38:21 -0800539 active_cacheline_inc_overlap(cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800540 spin_unlock_irqrestore(&radix_lock, flags);
541
542 return rc;
543}
544
Dan Williams3b7a6412014-03-03 15:38:21 -0800545static void active_cacheline_remove(struct dma_debug_entry *entry)
Dan Williams0abdd7a2014-01-21 15:48:12 -0800546{
Dan Williams3b7a6412014-03-03 15:38:21 -0800547 phys_addr_t cln = to_cacheline_number(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800548 unsigned long flags;
549
Dan Williams3b7a6412014-03-03 15:38:21 -0800550 /* ...mirror the insert case */
551 if (entry->direction == DMA_TO_DEVICE)
552 return;
553
Dan Williams0abdd7a2014-01-21 15:48:12 -0800554 spin_lock_irqsave(&radix_lock, flags);
Dan Williams59f2e7d2014-01-29 14:05:53 -0800555 /* since we are counting overlaps the final put of the
Dan Williams3b7a6412014-03-03 15:38:21 -0800556 * cacheline will occur when the overlap count is 0.
557 * active_cacheline_dec_overlap() returns -1 in that case
Dan Williams59f2e7d2014-01-29 14:05:53 -0800558 */
Dan Williams3b7a6412014-03-03 15:38:21 -0800559 if (active_cacheline_dec_overlap(cln) < 0)
560 radix_tree_delete(&dma_active_cacheline, cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800561 spin_unlock_irqrestore(&radix_lock, flags);
562}
563
564/**
565 * debug_dma_assert_idle() - assert that a page is not undergoing dma
Dan Williams3b7a6412014-03-03 15:38:21 -0800566 * @page: page to lookup in the dma_active_cacheline tree
Dan Williams0abdd7a2014-01-21 15:48:12 -0800567 *
568 * Place a call to this routine in cases where the cpu touching the page
569 * before the dma completes (page is dma_unmapped) will lead to data
570 * corruption.
571 */
572void debug_dma_assert_idle(struct page *page)
573{
Dan Williams3b7a6412014-03-03 15:38:21 -0800574 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
575 struct dma_debug_entry *entry = NULL;
576 void **results = (void **) &ents;
577 unsigned int nents, i;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800578 unsigned long flags;
Dan Williams3b7a6412014-03-03 15:38:21 -0800579 phys_addr_t cln;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800580
Haggai Eranc9d120b2015-07-17 16:24:06 -0700581 if (dma_debug_disabled())
582 return;
583
Dan Williams0abdd7a2014-01-21 15:48:12 -0800584 if (!page)
585 return;
586
Dan Williams3b7a6412014-03-03 15:38:21 -0800587 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800588 spin_lock_irqsave(&radix_lock, flags);
Dan Williams3b7a6412014-03-03 15:38:21 -0800589 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
590 CACHELINES_PER_PAGE);
591 for (i = 0; i < nents; i++) {
592 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
593
594 if (ent_cln == cln) {
595 entry = ents[i];
596 break;
597 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
598 break;
599 }
Dan Williams0abdd7a2014-01-21 15:48:12 -0800600 spin_unlock_irqrestore(&radix_lock, flags);
601
602 if (!entry)
603 return;
604
Dan Williams3b7a6412014-03-03 15:38:21 -0800605 cln = to_cacheline_number(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800606 err_printk(entry->dev, entry,
Robin Murphyf737b092018-12-10 14:00:27 +0000607 "cpu touching an active dma mapped cacheline [cln=%pa]\n",
Dan Williams3b7a6412014-03-03 15:38:21 -0800608 &cln);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800609}
610
611/*
Joerg Roedel30dfa902009-01-09 12:34:49 +0100612 * Wrapper function for adding an entry to the hash.
613 * This function takes care of locking itself.
614 */
615static void add_dma_entry(struct dma_debug_entry *entry)
616{
617 struct hash_bucket *bucket;
618 unsigned long flags;
Dan Williams0abdd7a2014-01-21 15:48:12 -0800619 int rc;
Joerg Roedel30dfa902009-01-09 12:34:49 +0100620
621 bucket = get_hash_bucket(entry, &flags);
622 hash_bucket_add(bucket, entry);
623 put_hash_bucket(bucket, &flags);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800624
Dan Williams3b7a6412014-03-03 15:38:21 -0800625 rc = active_cacheline_insert(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800626 if (rc == -ENOMEM) {
Robin Murphyf737b092018-12-10 14:00:27 +0000627 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
Dan Williams0abdd7a2014-01-21 15:48:12 -0800628 global_disable = true;
629 }
630
631 /* TODO: report -EEXIST errors here as overlapping mappings are
632 * not supported by the DMA API
633 */
Joerg Roedel30dfa902009-01-09 12:34:49 +0100634}
635
Robin Murphyad78dee2018-12-10 14:00:33 +0000636static int dma_debug_create_entries(gfp_t gfp)
Robin Murphy2b9d9ac2018-12-10 14:00:29 +0000637{
Robin Murphyad78dee2018-12-10 14:00:33 +0000638 struct dma_debug_entry *entry;
Robin Murphy2b9d9ac2018-12-10 14:00:29 +0000639 int i;
640
Robin Murphyad78dee2018-12-10 14:00:33 +0000641 entry = (void *)get_zeroed_page(gfp);
642 if (!entry)
643 return -ENOMEM;
Robin Murphy2b9d9ac2018-12-10 14:00:29 +0000644
Robin Murphyad78dee2018-12-10 14:00:33 +0000645 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
646 list_add_tail(&entry[i].list, &free_entries);
Robin Murphy2b9d9ac2018-12-10 14:00:29 +0000647
Robin Murphyad78dee2018-12-10 14:00:33 +0000648 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
649 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
Robin Murphy2b9d9ac2018-12-10 14:00:29 +0000650
651 return 0;
Robin Murphy2b9d9ac2018-12-10 14:00:29 +0000652}
653
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900654static struct dma_debug_entry *__dma_entry_alloc(void)
655{
656 struct dma_debug_entry *entry;
657
658 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
659 list_del(&entry->list);
660 memset(entry, 0, sizeof(*entry));
661
662 num_free_entries -= 1;
663 if (num_free_entries < min_free_entries)
664 min_free_entries = num_free_entries;
665
666 return entry;
667}
668
Robin Murphyceb51172018-12-10 14:00:30 +0000669void __dma_entry_alloc_check_leak(void)
670{
671 u32 tmp = nr_total_entries % nr_prealloc_entries;
672
673 /* Shout each time we tick over some multiple of the initial pool */
674 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
675 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
676 nr_total_entries,
677 (nr_total_entries / nr_prealloc_entries));
678 }
679}
680
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100681/* struct dma_entry allocator
682 *
683 * The next two functions implement the allocator for
684 * struct dma_debug_entries.
685 */
686static struct dma_debug_entry *dma_entry_alloc(void)
687{
Jakub Kicinski29cdd4e2012-04-04 03:19:10 +0200688 struct dma_debug_entry *entry;
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100689 unsigned long flags;
690
691 spin_lock_irqsave(&free_entries_lock, flags);
Robin Murphy2b9d9ac2018-12-10 14:00:29 +0000692 if (num_free_entries == 0) {
Robin Murphyad78dee2018-12-10 14:00:33 +0000693 if (dma_debug_create_entries(GFP_ATOMIC)) {
Robin Murphy2b9d9ac2018-12-10 14:00:29 +0000694 global_disable = true;
695 spin_unlock_irqrestore(&free_entries_lock, flags);
696 pr_err("debugging out of memory - disabling\n");
697 return NULL;
698 }
Robin Murphyceb51172018-12-10 14:00:30 +0000699 __dma_entry_alloc_check_leak();
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100700 }
701
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900702 entry = __dma_entry_alloc();
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100703
Jakub Kicinski29cdd4e2012-04-04 03:19:10 +0200704 spin_unlock_irqrestore(&free_entries_lock, flags);
705
David Woodhouse6c132d12009-01-19 16:52:39 +0100706#ifdef CONFIG_STACKTRACE
Thomas Gleixner746017e2019-04-25 11:45:05 +0200707 entry->stack_len = stack_trace_save(entry->stack_entries,
708 ARRAY_SIZE(entry->stack_entries),
709 1);
David Woodhouse6c132d12009-01-19 16:52:39 +0100710#endif
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100711 return entry;
712}
713
714static void dma_entry_free(struct dma_debug_entry *entry)
715{
716 unsigned long flags;
717
Dan Williams3b7a6412014-03-03 15:38:21 -0800718 active_cacheline_remove(entry);
Dan Williams0abdd7a2014-01-21 15:48:12 -0800719
Joerg Roedel3b1e79e2009-01-09 12:42:46 +0100720 /*
721 * add to beginning of the list - this way the entries are
722 * more likely cache hot when they are reallocated.
723 */
724 spin_lock_irqsave(&free_entries_lock, flags);
725 list_add(&entry->list, &free_entries);
726 num_free_entries += 1;
727 spin_unlock_irqrestore(&free_entries_lock, flags);
728}
729
Joerg Roedel6bf07872009-01-09 12:54:42 +0100730/*
731 * DMA-API debugging init code
732 *
733 * The init code does two things:
734 * 1. Initialize core data structures
735 * 2. Preallocate a given number of dma_debug_entry structs
736 */
737
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200738static ssize_t filter_read(struct file *file, char __user *user_buf,
739 size_t count, loff_t *ppos)
740{
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200741 char buf[NAME_MAX_LEN + 1];
Joerg Roedelc17e2cf2009-06-08 15:19:29 +0200742 unsigned long flags;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200743 int len;
744
745 if (!current_driver_name[0])
746 return 0;
747
748 /*
749 * We can't copy to userspace directly because current_driver_name can
750 * only be read under the driver_name_lock with irqs disabled. So
751 * create a temporary copy first.
752 */
753 read_lock_irqsave(&driver_name_lock, flags);
754 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
755 read_unlock_irqrestore(&driver_name_lock, flags);
756
757 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
758}
759
760static ssize_t filter_write(struct file *file, const char __user *userbuf,
761 size_t count, loff_t *ppos)
762{
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200763 char buf[NAME_MAX_LEN];
Joerg Roedelc17e2cf2009-06-08 15:19:29 +0200764 unsigned long flags;
765 size_t len;
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200766 int i;
767
768 /*
769 * We can't copy from userspace directly. Access to
770 * current_driver_name is protected with a write_lock with irqs
771 * disabled. Since copy_from_user can fault and may sleep we
772 * need to copy to temporary buffer first
773 */
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200774 len = min(count, (size_t)(NAME_MAX_LEN - 1));
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200775 if (copy_from_user(buf, userbuf, len))
776 return -EFAULT;
777
778 buf[len] = 0;
779
780 write_lock_irqsave(&driver_name_lock, flags);
781
Joerg Roedel312325092009-06-08 15:07:08 +0200782 /*
783 * Now handle the string we got from userspace very carefully.
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200784 * The rules are:
785 * - only use the first token we got
786 * - token delimiter is everything looking like a space
787 * character (' ', '\n', '\t' ...)
788 *
789 */
790 if (!isalnum(buf[0])) {
791 /*
Joerg Roedel312325092009-06-08 15:07:08 +0200792 * If the first character userspace gave us is not
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200793 * alphanumerical then assume the filter should be
794 * switched off.
795 */
796 if (current_driver_name[0])
Robin Murphyf737b092018-12-10 14:00:27 +0000797 pr_info("switching off dma-debug driver filter\n");
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200798 current_driver_name[0] = 0;
799 current_driver = NULL;
800 goto out_unlock;
801 }
802
803 /*
804 * Now parse out the first token and use it as the name for the
805 * driver to filter for.
806 */
Dan Carpenter39a37ce2010-04-06 19:45:12 +0300807 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200808 current_driver_name[i] = buf[i];
809 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
810 break;
811 }
812 current_driver_name[i] = 0;
813 current_driver = NULL;
814
Robin Murphyf737b092018-12-10 14:00:27 +0000815 pr_info("enable driver filter for driver [%s]\n",
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200816 current_driver_name);
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200817
818out_unlock:
819 write_unlock_irqrestore(&driver_name_lock, flags);
820
821 return count;
822}
823
Thiago Farinaaeb583d2010-01-18 18:57:33 -0500824static const struct file_operations filter_fops = {
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200825 .read = filter_read,
826 .write = filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200827 .llseek = default_llseek,
Joerg Roedel8a6fc702009-05-22 21:23:13 +0200828};
829
Corentin Labbe0a3b1922019-01-18 13:44:18 +0000830static int dump_show(struct seq_file *seq, void *v)
831{
832 int idx;
833
834 for (idx = 0; idx < HASH_SIZE; idx++) {
835 struct hash_bucket *bucket = &dma_entry_hash[idx];
836 struct dma_debug_entry *entry;
837 unsigned long flags;
838
839 spin_lock_irqsave(&bucket->lock, flags);
840 list_for_each_entry(entry, &bucket->list, list) {
841 seq_printf(seq,
842 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
843 dev_name(entry->dev),
844 dev_driver_string(entry->dev),
845 type2name[entry->type], idx,
846 phys_addr(entry), entry->pfn,
847 entry->dev_addr, entry->size,
848 dir2name[entry->direction],
849 maperr2str[entry->map_err_type]);
850 }
851 spin_unlock_irqrestore(&bucket->lock, flags);
852 }
853 return 0;
854}
855DEFINE_SHOW_ATTRIBUTE(dump);
856
Greg Kroah-Hartman8e4d81b2019-01-22 16:21:38 +0100857static void dma_debug_fs_init(void)
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100858{
Greg Kroah-Hartman8e4d81b2019-01-22 16:21:38 +0100859 struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100860
Greg Kroah-Hartman8e4d81b2019-01-22 16:21:38 +0100861 debugfs_create_bool("disabled", 0444, dentry, &global_disable);
862 debugfs_create_u32("error_count", 0444, dentry, &error_count);
863 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
864 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
865 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
866 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
867 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
868 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
Corentin Labbe0a3b1922019-01-18 13:44:18 +0000869 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100870}
871
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400872static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
Joerg Roedeled888ae2009-05-22 17:16:04 +0200873{
874 struct dma_debug_entry *entry;
875 unsigned long flags;
876 int count = 0, i;
877
878 for (i = 0; i < HASH_SIZE; ++i) {
Pankaj Gupta6a5cd602017-05-03 14:51:28 -0700879 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200880 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400881 if (entry->dev == dev) {
Joerg Roedeled888ae2009-05-22 17:16:04 +0200882 count += 1;
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400883 *out_entry = entry;
884 }
Joerg Roedeled888ae2009-05-22 17:16:04 +0200885 }
Pankaj Gupta6a5cd602017-05-03 14:51:28 -0700886 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200887 }
888
889 return count;
890}
891
Ingo Molnara8fe9ea2009-12-31 15:16:23 +0100892static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
Joerg Roedeled888ae2009-05-22 17:16:04 +0200893{
894 struct device *dev = data;
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400895 struct dma_debug_entry *uninitialized_var(entry);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200896 int count;
897
Florian Fainelli01ce18b2014-12-10 15:41:23 -0800898 if (dma_debug_disabled())
Ingo Molnara8fe9ea2009-12-31 15:16:23 +0100899 return 0;
Joerg Roedeled888ae2009-05-22 17:16:04 +0200900
901 switch (action) {
902 case BUS_NOTIFY_UNBOUND_DRIVER:
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400903 count = device_dma_allocations(dev, &entry);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200904 if (count == 0)
905 break;
Robin Murphyf737b092018-12-10 14:00:27 +0000906 err_printk(dev, entry, "device driver has pending "
Joerg Roedeled888ae2009-05-22 17:16:04 +0200907 "DMA allocations while released from device "
Stanislaw Gruszkaba4b87ad2011-03-31 08:08:09 -0400908 "[count=%d]\n"
909 "One of leaked entries details: "
910 "[device address=0x%016llx] [size=%llu bytes] "
911 "[mapped with %s] [mapped as %s]\n",
912 count, entry->dev_addr, entry->size,
913 dir2name[entry->direction], type2name[entry->type]);
Joerg Roedeled888ae2009-05-22 17:16:04 +0200914 break;
915 default:
916 break;
917 }
918
919 return 0;
920}
921
Joerg Roedel41531c82009-03-16 17:32:14 +0100922void dma_debug_add_bus(struct bus_type *bus)
923{
Joerg Roedeled888ae2009-05-22 17:16:04 +0200924 struct notifier_block *nb;
925
Florian Fainelli01ce18b2014-12-10 15:41:23 -0800926 if (dma_debug_disabled())
Shaun Ruffellf797d982009-12-17 18:00:36 -0600927 return;
928
Joerg Roedeled888ae2009-05-22 17:16:04 +0200929 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
930 if (nb == NULL) {
Joerg Roedele7ed70e2009-06-08 15:39:24 +0200931 pr_err("dma_debug_add_bus: out of memory\n");
Joerg Roedeled888ae2009-05-22 17:16:04 +0200932 return;
933 }
934
935 nb->notifier_call = dma_debug_device_change;
936
937 bus_register_notifier(bus, nb);
Joerg Roedel41531c82009-03-16 17:32:14 +0100938}
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100939
Christoph Hellwig15b28bb2018-04-16 17:22:28 +0200940static int dma_debug_init(void)
Joerg Roedel6bf07872009-01-09 12:54:42 +0100941{
Robin Murphyad78dee2018-12-10 14:00:33 +0000942 int i, nr_pages;
Joerg Roedel6bf07872009-01-09 12:54:42 +0100943
Florian Fainelli2ce8e7e2014-12-10 15:41:25 -0800944 /* Do not use dma_debug_initialized here, since we really want to be
945 * called to set dma_debug_initialized
946 */
947 if (global_disable)
Christoph Hellwig15b28bb2018-04-16 17:22:28 +0200948 return 0;
Joerg Roedel6bf07872009-01-09 12:54:42 +0100949
950 for (i = 0; i < HASH_SIZE; ++i) {
951 INIT_LIST_HEAD(&dma_entry_hash[i].list);
Ingo Molnarb0a5b832009-06-16 16:11:14 +0200952 spin_lock_init(&dma_entry_hash[i].lock);
Joerg Roedel6bf07872009-01-09 12:54:42 +0100953 }
954
Greg Kroah-Hartman8e4d81b2019-01-22 16:21:38 +0100955 dma_debug_fs_init();
Joerg Roedel788dcfa2009-01-09 13:13:27 +0100956
Robin Murphyad78dee2018-12-10 14:00:33 +0000957 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
958 for (i = 0; i < nr_pages; ++i)
959 dma_debug_create_entries(GFP_KERNEL);
960 if (num_free_entries >= nr_prealloc_entries) {
961 pr_info("preallocated %d debug entries\n", nr_total_entries);
962 } else if (num_free_entries > 0) {
963 pr_warn("%d debug entries requested but only %d allocated\n",
964 nr_prealloc_entries, nr_total_entries);
965 } else {
Robin Murphyf737b092018-12-10 14:00:27 +0000966 pr_err("debugging out of memory error - disabled\n");
Joerg Roedel6bf07872009-01-09 12:54:42 +0100967 global_disable = true;
968
Christoph Hellwig15b28bb2018-04-16 17:22:28 +0200969 return 0;
Joerg Roedel6bf07872009-01-09 12:54:42 +0100970 }
Robin Murphy2b9d9ac2018-12-10 14:00:29 +0000971 min_free_entries = num_free_entries;
FUJITA Tomonorie6a1a892009-04-15 18:22:41 +0900972
Florian Fainelli2ce8e7e2014-12-10 15:41:25 -0800973 dma_debug_initialized = true;
974
Robin Murphyf737b092018-12-10 14:00:27 +0000975 pr_info("debugging enabled by kernel config\n");
Christoph Hellwig15b28bb2018-04-16 17:22:28 +0200976 return 0;
Joerg Roedel6bf07872009-01-09 12:54:42 +0100977}
Christoph Hellwig15b28bb2018-04-16 17:22:28 +0200978core_initcall(dma_debug_init);
Joerg Roedel6bf07872009-01-09 12:54:42 +0100979
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100980static __init int dma_debug_cmdline(char *str)
981{
982 if (!str)
983 return -EINVAL;
984
985 if (strncmp(str, "off", 3) == 0) {
Robin Murphyf737b092018-12-10 14:00:27 +0000986 pr_info("debugging disabled on kernel command line\n");
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100987 global_disable = true;
988 }
989
990 return 0;
991}
992
993static __init int dma_debug_entries_cmdline(char *str)
994{
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100995 if (!str)
996 return -EINVAL;
Christoph Hellwigbcebe322018-04-24 09:40:51 +0200997 if (!get_option(&str, &nr_prealloc_entries))
998 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
Joerg Roedel59d3daa2009-01-09 13:01:56 +0100999 return 0;
1000}
1001
1002__setup("dma_debug=", dma_debug_cmdline);
1003__setup("dma_debug_entries=", dma_debug_entries_cmdline);
1004
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001005static void check_unmap(struct dma_debug_entry *ref)
1006{
1007 struct dma_debug_entry *entry;
1008 struct hash_bucket *bucket;
1009 unsigned long flags;
1010
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001011 bucket = get_hash_bucket(ref, &flags);
Neil Hormanc6a21d02011-08-08 15:13:54 -04001012 entry = bucket_find_exact(bucket, ref);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001013
1014 if (!entry) {
Alexander Duyck8d640a52013-03-22 15:04:48 -07001015 /* must drop lock before calling dma_mapping_error */
1016 put_hash_bucket(bucket, &flags);
1017
Shuah Khanbfe0fb02012-11-03 17:00:07 -06001018 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1019 err_printk(ref->dev, NULL,
Robin Murphyf737b092018-12-10 14:00:27 +00001020 "device driver tries to free an "
Alexander Duyck8d640a52013-03-22 15:04:48 -07001021 "invalid DMA memory address\n");
1022 } else {
1023 err_printk(ref->dev, NULL,
Robin Murphyf737b092018-12-10 14:00:27 +00001024 "device driver tries to free DMA "
Alexander Duyck8d640a52013-03-22 15:04:48 -07001025 "memory it has not allocated [device "
1026 "address=0x%016llx] [size=%llu bytes]\n",
1027 ref->dev_addr, ref->size);
Shuah Khanbfe0fb02012-11-03 17:00:07 -06001028 }
Alexander Duyck8d640a52013-03-22 15:04:48 -07001029 return;
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001030 }
1031
1032 if (ref->size != entry->size) {
Robin Murphyf737b092018-12-10 14:00:27 +00001033 err_printk(ref->dev, entry, "device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001034 "DMA memory with different size "
1035 "[device address=0x%016llx] [map size=%llu bytes] "
1036 "[unmap size=%llu bytes]\n",
1037 ref->dev_addr, entry->size, ref->size);
1038 }
1039
1040 if (ref->type != entry->type) {
Robin Murphyf737b092018-12-10 14:00:27 +00001041 err_printk(ref->dev, entry, "device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001042 "DMA memory with wrong function "
1043 "[device address=0x%016llx] [size=%llu bytes] "
1044 "[mapped as %s] [unmapped as %s]\n",
1045 ref->dev_addr, ref->size,
1046 type2name[entry->type], type2name[ref->type]);
1047 } else if ((entry->type == dma_debug_coherent) &&
Dan Williams0abdd7a2014-01-21 15:48:12 -08001048 (phys_addr(ref) != phys_addr(entry))) {
Robin Murphyf737b092018-12-10 14:00:27 +00001049 err_printk(ref->dev, entry, "device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001050 "DMA memory with different CPU address "
1051 "[device address=0x%016llx] [size=%llu bytes] "
Joerg Roedel59a40e702009-10-29 16:25:50 +01001052 "[cpu alloc address=0x%016llx] "
1053 "[cpu free address=0x%016llx]",
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001054 ref->dev_addr, ref->size,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001055 phys_addr(entry),
1056 phys_addr(ref));
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001057 }
1058
1059 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1060 ref->sg_call_ents != entry->sg_call_ents) {
Robin Murphyf737b092018-12-10 14:00:27 +00001061 err_printk(ref->dev, entry, "device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001062 "DMA sg list with different entry count "
1063 "[map count=%d] [unmap count=%d]\n",
1064 entry->sg_call_ents, ref->sg_call_ents);
1065 }
1066
1067 /*
1068 * This may be no bug in reality - but most implementations of the
1069 * DMA API don't handle this properly, so check for it here
1070 */
1071 if (ref->direction != entry->direction) {
Robin Murphyf737b092018-12-10 14:00:27 +00001072 err_printk(ref->dev, entry, "device driver frees "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001073 "DMA memory with different direction "
1074 "[device address=0x%016llx] [size=%llu bytes] "
1075 "[mapped with %s] [unmapped with %s]\n",
1076 ref->dev_addr, ref->size,
1077 dir2name[entry->direction],
1078 dir2name[ref->direction]);
1079 }
1080
Miles Chena5759b22017-02-22 15:40:09 -08001081 /*
1082 * Drivers should use dma_mapping_error() to check the returned
1083 * addresses of dma_map_single() and dma_map_page().
1084 * If not, print this warning message. See Documentation/DMA-API.txt.
1085 */
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001086 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1087 err_printk(ref->dev, entry,
Robin Murphyf737b092018-12-10 14:00:27 +00001088 "device driver failed to check map error"
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001089 "[device address=0x%016llx] [size=%llu bytes] "
1090 "[mapped as %s]",
1091 ref->dev_addr, ref->size,
1092 type2name[entry->type]);
1093 }
1094
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001095 hash_bucket_del(entry);
1096 dma_entry_free(entry);
1097
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001098 put_hash_bucket(bucket, &flags);
1099}
1100
Andy Lutomirskib4a0f532016-08-11 02:35:22 -07001101static void check_for_stack(struct device *dev,
1102 struct page *page, size_t offset)
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001103{
Andy Lutomirskib4a0f532016-08-11 02:35:22 -07001104 void *addr;
1105 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1106
1107 if (!stack_vm_area) {
1108 /* Stack is direct-mapped. */
1109 if (PageHighMem(page))
1110 return;
1111 addr = page_address(page) + offset;
1112 if (object_is_on_stack(addr))
Robin Murphyf737b092018-12-10 14:00:27 +00001113 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
Andy Lutomirskib4a0f532016-08-11 02:35:22 -07001114 } else {
1115 /* Stack is vmalloced. */
1116 int i;
1117
1118 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1119 if (page != stack_vm_area->pages[i])
1120 continue;
1121
1122 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
Robin Murphyf737b092018-12-10 14:00:27 +00001123 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
Andy Lutomirskib4a0f532016-08-11 02:35:22 -07001124 break;
1125 }
1126 }
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001127}
1128
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001129static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001130{
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001131 unsigned long a1 = (unsigned long)addr;
1132 unsigned long b1 = a1 + len;
1133 unsigned long a2 = (unsigned long)start;
1134 unsigned long b2 = (unsigned long)end;
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001135
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001136 return !(b1 <= a2 || a1 >= b2);
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001137}
1138
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001139static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001140{
Laura Abbottea535e42016-01-14 15:16:50 -08001141 if (overlap(addr, len, _stext, _etext) ||
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001142 overlap(addr, len, __start_rodata, __end_rodata))
Robin Murphyf737b092018-12-10 14:00:27 +00001143 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001144}
1145
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001146static void check_sync(struct device *dev,
1147 struct dma_debug_entry *ref,
1148 bool to_cpu)
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001149{
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001150 struct dma_debug_entry *entry;
1151 struct hash_bucket *bucket;
1152 unsigned long flags;
1153
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001154 bucket = get_hash_bucket(ref, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001155
Neil Hormanc6a21d02011-08-08 15:13:54 -04001156 entry = bucket_find_contain(&bucket, ref, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001157
1158 if (!entry) {
Robin Murphyf737b092018-12-10 14:00:27 +00001159 err_printk(dev, NULL, "device driver tries "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001160 "to sync DMA memory it has not allocated "
1161 "[device address=0x%016llx] [size=%llu bytes]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001162 (unsigned long long)ref->dev_addr, ref->size);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001163 goto out;
1164 }
1165
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001166 if (ref->size > entry->size) {
Robin Murphyf737b092018-12-10 14:00:27 +00001167 err_printk(dev, entry, "device driver syncs"
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001168 " DMA memory outside allocated range "
1169 "[device address=0x%016llx] "
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001170 "[allocation size=%llu bytes] "
1171 "[sync offset+size=%llu]\n",
1172 entry->dev_addr, entry->size,
1173 ref->size);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001174 }
1175
Krzysztof Halasa42d53b42010-01-08 14:42:36 -08001176 if (entry->direction == DMA_BIDIRECTIONAL)
1177 goto out;
1178
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001179 if (ref->direction != entry->direction) {
Robin Murphyf737b092018-12-10 14:00:27 +00001180 err_printk(dev, entry, "device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001181 "DMA memory with different direction "
1182 "[device address=0x%016llx] [size=%llu bytes] "
1183 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001184 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001185 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001186 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001187 }
1188
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001189 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001190 !(ref->direction == DMA_TO_DEVICE))
Robin Murphyf737b092018-12-10 14:00:27 +00001191 err_printk(dev, entry, "device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001192 "device read-only DMA memory for cpu "
1193 "[device address=0x%016llx] [size=%llu bytes] "
1194 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001195 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001196 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001197 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001198
1199 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001200 !(ref->direction == DMA_FROM_DEVICE))
Robin Murphyf737b092018-12-10 14:00:27 +00001201 err_printk(dev, entry, "device driver syncs "
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001202 "device write-only DMA memory to device "
1203 "[device address=0x%016llx] [size=%llu bytes] "
1204 "[mapped with %s] [synced with %s]\n",
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001205 (unsigned long long)ref->dev_addr, entry->size,
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001206 dir2name[entry->direction],
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001207 dir2name[ref->direction]);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001208
Robin Murphy7f830642015-11-06 16:32:55 -08001209 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1210 ref->sg_call_ents != entry->sg_call_ents) {
Robin Murphyf737b092018-12-10 14:00:27 +00001211 err_printk(ref->dev, entry, "device driver syncs "
Robin Murphy7f830642015-11-06 16:32:55 -08001212 "DMA sg list with different entry count "
1213 "[map count=%d] [sync count=%d]\n",
1214 entry->sg_call_ents, ref->sg_call_ents);
1215 }
1216
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001217out:
1218 put_hash_bucket(bucket, &flags);
Joerg Roedel2d62ece2009-01-09 14:10:26 +01001219}
1220
Robin Murphy78c47832018-05-21 12:35:13 +01001221static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1222{
1223#ifdef CONFIG_DMA_API_DEBUG_SG
1224 unsigned int max_seg = dma_get_max_seg_size(dev);
1225 u64 start, end, boundary = dma_get_seg_boundary(dev);
1226
1227 /*
1228 * Either the driver forgot to set dma_parms appropriately, or
1229 * whoever generated the list forgot to check them.
1230 */
1231 if (sg->length > max_seg)
Robin Murphyf737b092018-12-10 14:00:27 +00001232 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
Robin Murphy78c47832018-05-21 12:35:13 +01001233 sg->length, max_seg);
1234 /*
1235 * In some cases this could potentially be the DMA API
1236 * implementation's fault, but it would usually imply that
1237 * the scatterlist was built inappropriately to begin with.
1238 */
1239 start = sg_dma_address(sg);
1240 end = start + sg_dma_len(sg) - 1;
1241 if ((start ^ end) & ~boundary)
Robin Murphyf737b092018-12-10 14:00:27 +00001242 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
Robin Murphy78c47832018-05-21 12:35:13 +01001243 start, end, boundary);
1244#endif
1245}
1246
Stephen Boyd99c65fa2018-10-08 00:20:07 -07001247void debug_dma_map_single(struct device *dev, const void *addr,
1248 unsigned long len)
1249{
1250 if (unlikely(dma_debug_disabled()))
1251 return;
1252
1253 if (!virt_addr_valid(addr))
Robin Murphyf737b092018-12-10 14:00:27 +00001254 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
Stephen Boyd99c65fa2018-10-08 00:20:07 -07001255 addr, len);
1256
1257 if (is_vmalloc_addr(addr))
Robin Murphyf737b092018-12-10 14:00:27 +00001258 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
Stephen Boyd99c65fa2018-10-08 00:20:07 -07001259 addr, len);
1260}
1261EXPORT_SYMBOL(debug_dma_map_single);
1262
Joerg Roedelf62bc982009-01-09 14:14:49 +01001263void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
Christoph Hellwig2e05ea52018-12-25 08:50:35 +01001264 size_t size, int direction, dma_addr_t dma_addr)
Joerg Roedelf62bc982009-01-09 14:14:49 +01001265{
1266 struct dma_debug_entry *entry;
1267
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001268 if (unlikely(dma_debug_disabled()))
Joerg Roedelf62bc982009-01-09 14:14:49 +01001269 return;
1270
Shuah Khanbfe0fb02012-11-03 17:00:07 -06001271 if (dma_mapping_error(dev, dma_addr))
Joerg Roedelf62bc982009-01-09 14:14:49 +01001272 return;
1273
1274 entry = dma_entry_alloc();
1275 if (!entry)
1276 return;
1277
1278 entry->dev = dev;
Christoph Hellwig2e05ea52018-12-25 08:50:35 +01001279 entry->type = dma_debug_single;
Dan Williams0abdd7a2014-01-21 15:48:12 -08001280 entry->pfn = page_to_pfn(page);
1281 entry->offset = offset,
Joerg Roedelf62bc982009-01-09 14:14:49 +01001282 entry->dev_addr = dma_addr;
1283 entry->size = size;
1284 entry->direction = direction;
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001285 entry->map_err_type = MAP_ERR_NOT_CHECKED;
Joerg Roedelf62bc982009-01-09 14:14:49 +01001286
Andy Lutomirskib4a0f532016-08-11 02:35:22 -07001287 check_for_stack(dev, page, offset);
1288
Joerg Roedel9537a482009-03-23 15:35:08 +01001289 if (!PageHighMem(page)) {
Ingo Molnarf39d1b92009-07-10 21:38:02 +02001290 void *addr = page_address(page) + offset;
1291
Joerg Roedel2e34bde2009-03-16 16:51:55 +01001292 check_for_illegal_area(dev, addr, size);
Joerg Roedelf62bc982009-01-09 14:14:49 +01001293 }
1294
1295 add_dma_entry(entry);
1296}
1297EXPORT_SYMBOL(debug_dma_map_page);
1298
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001299void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1300{
1301 struct dma_debug_entry ref;
1302 struct dma_debug_entry *entry;
1303 struct hash_bucket *bucket;
1304 unsigned long flags;
1305
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001306 if (unlikely(dma_debug_disabled()))
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001307 return;
1308
1309 ref.dev = dev;
1310 ref.dev_addr = dma_addr;
1311 bucket = get_hash_bucket(&ref, &flags);
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001312
Alexander Duyck96e7d7a2013-03-22 15:04:49 -07001313 list_for_each_entry(entry, &bucket->list, list) {
1314 if (!exact_match(&ref, entry))
1315 continue;
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001316
Alexander Duyck96e7d7a2013-03-22 15:04:49 -07001317 /*
1318 * The same physical address can be mapped multiple
1319 * times. Without a hardware IOMMU this results in the
1320 * same device addresses being put into the dma-debug
1321 * hash multiple times too. This can result in false
1322 * positives being reported. Therefore we implement a
1323 * best-fit algorithm here which updates the first entry
1324 * from the hash which fits the reference value and is
1325 * not currently listed as being checked.
1326 */
1327 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1328 entry->map_err_type = MAP_ERR_CHECKED;
1329 break;
1330 }
1331 }
1332
Shuah Khan6c9c6d62012-10-08 11:08:06 -06001333 put_hash_bucket(bucket, &flags);
1334}
1335EXPORT_SYMBOL(debug_dma_mapping_error);
1336
Joerg Roedelf62bc982009-01-09 14:14:49 +01001337void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
Christoph Hellwig2e05ea52018-12-25 08:50:35 +01001338 size_t size, int direction)
Joerg Roedelf62bc982009-01-09 14:14:49 +01001339{
1340 struct dma_debug_entry ref = {
Christoph Hellwig2e05ea52018-12-25 08:50:35 +01001341 .type = dma_debug_single,
Joerg Roedelf62bc982009-01-09 14:14:49 +01001342 .dev = dev,
1343 .dev_addr = addr,
1344 .size = size,
1345 .direction = direction,
1346 };
1347
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001348 if (unlikely(dma_debug_disabled()))
Joerg Roedelf62bc982009-01-09 14:14:49 +01001349 return;
Joerg Roedelf62bc982009-01-09 14:14:49 +01001350 check_unmap(&ref);
1351}
1352EXPORT_SYMBOL(debug_dma_unmap_page);
1353
Joerg Roedel972aa452009-01-09 14:19:54 +01001354void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1355 int nents, int mapped_ents, int direction)
1356{
1357 struct dma_debug_entry *entry;
1358 struct scatterlist *s;
1359 int i;
1360
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001361 if (unlikely(dma_debug_disabled()))
Joerg Roedel972aa452009-01-09 14:19:54 +01001362 return;
1363
1364 for_each_sg(sg, s, mapped_ents, i) {
1365 entry = dma_entry_alloc();
1366 if (!entry)
1367 return;
1368
1369 entry->type = dma_debug_sg;
1370 entry->dev = dev;
Dan Williams0abdd7a2014-01-21 15:48:12 -08001371 entry->pfn = page_to_pfn(sg_page(s));
1372 entry->offset = s->offset,
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001373 entry->size = sg_dma_len(s);
FUJITA Tomonori15aedea2009-05-27 09:43:01 +09001374 entry->dev_addr = sg_dma_address(s);
Joerg Roedel972aa452009-01-09 14:19:54 +01001375 entry->direction = direction;
1376 entry->sg_call_ents = nents;
1377 entry->sg_mapped_ents = mapped_ents;
1378
Andy Lutomirskib4a0f532016-08-11 02:35:22 -07001379 check_for_stack(dev, sg_page(s), s->offset);
1380
Joerg Roedel9537a482009-03-23 15:35:08 +01001381 if (!PageHighMem(sg_page(s))) {
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001382 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
Joerg Roedel9537a482009-03-23 15:35:08 +01001383 }
Joerg Roedel972aa452009-01-09 14:19:54 +01001384
Robin Murphy78c47832018-05-21 12:35:13 +01001385 check_sg_segment(dev, s);
1386
Joerg Roedel972aa452009-01-09 14:19:54 +01001387 add_dma_entry(entry);
1388 }
1389}
1390EXPORT_SYMBOL(debug_dma_map_sg);
1391
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001392static int get_nr_mapped_entries(struct device *dev,
1393 struct dma_debug_entry *ref)
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001394{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001395 struct dma_debug_entry *entry;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001396 struct hash_bucket *bucket;
1397 unsigned long flags;
Joerg Roedelc17e2cf2009-06-08 15:19:29 +02001398 int mapped_ents;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001399
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001400 bucket = get_hash_bucket(ref, &flags);
Neil Hormanc6a21d02011-08-08 15:13:54 -04001401 entry = bucket_find_exact(bucket, ref);
Joerg Roedelc17e2cf2009-06-08 15:19:29 +02001402 mapped_ents = 0;
1403
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001404 if (entry)
1405 mapped_ents = entry->sg_mapped_ents;
1406 put_hash_bucket(bucket, &flags);
1407
1408 return mapped_ents;
1409}
1410
Joerg Roedel972aa452009-01-09 14:19:54 +01001411void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1412 int nelems, int dir)
1413{
Joerg Roedel972aa452009-01-09 14:19:54 +01001414 struct scatterlist *s;
1415 int mapped_ents = 0, i;
Joerg Roedel972aa452009-01-09 14:19:54 +01001416
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001417 if (unlikely(dma_debug_disabled()))
Joerg Roedel972aa452009-01-09 14:19:54 +01001418 return;
1419
1420 for_each_sg(sglist, s, nelems, i) {
1421
1422 struct dma_debug_entry ref = {
1423 .type = dma_debug_sg,
1424 .dev = dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001425 .pfn = page_to_pfn(sg_page(s)),
1426 .offset = s->offset,
FUJITA Tomonori15aedea2009-05-27 09:43:01 +09001427 .dev_addr = sg_dma_address(s),
FUJITA Tomonori884d0592009-05-27 09:43:02 +09001428 .size = sg_dma_len(s),
Joerg Roedel972aa452009-01-09 14:19:54 +01001429 .direction = dir,
Joerg Roedele5e8c5b2009-06-11 10:03:42 +02001430 .sg_call_ents = nelems,
Joerg Roedel972aa452009-01-09 14:19:54 +01001431 };
1432
1433 if (mapped_ents && i >= mapped_ents)
1434 break;
1435
Joerg Roedele5e8c5b2009-06-11 10:03:42 +02001436 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001437 mapped_ents = get_nr_mapped_entries(dev, &ref);
Joerg Roedel972aa452009-01-09 14:19:54 +01001438
1439 check_unmap(&ref);
1440 }
1441}
1442EXPORT_SYMBOL(debug_dma_unmap_sg);
1443
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001444void debug_dma_alloc_coherent(struct device *dev, size_t size,
1445 dma_addr_t dma_addr, void *virt)
1446{
1447 struct dma_debug_entry *entry;
1448
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001449 if (unlikely(dma_debug_disabled()))
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001450 return;
1451
1452 if (unlikely(virt == NULL))
1453 return;
1454
Miles Chenaf1da682018-02-22 19:22:20 +08001455 /* handle vmalloc and linear addresses */
1456 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001457 return;
1458
Miles Chenaf1da682018-02-22 19:22:20 +08001459 entry = dma_entry_alloc();
1460 if (!entry)
Miles Chen3aaabbf2017-11-17 15:26:19 -08001461 return;
1462
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001463 entry->type = dma_debug_coherent;
1464 entry->dev = dev;
Geliang Tange57d0552017-04-22 09:18:05 +08001465 entry->offset = offset_in_page(virt);
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001466 entry->size = size;
1467 entry->dev_addr = dma_addr;
1468 entry->direction = DMA_BIDIRECTIONAL;
1469
Miles Chen3aaabbf2017-11-17 15:26:19 -08001470 if (is_vmalloc_addr(virt))
1471 entry->pfn = vmalloc_to_pfn(virt);
1472 else
1473 entry->pfn = page_to_pfn(virt_to_page(virt));
1474
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001475 add_dma_entry(entry);
1476}
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001477
1478void debug_dma_free_coherent(struct device *dev, size_t size,
1479 void *virt, dma_addr_t addr)
1480{
1481 struct dma_debug_entry ref = {
1482 .type = dma_debug_coherent,
1483 .dev = dev,
Geliang Tange57d0552017-04-22 09:18:05 +08001484 .offset = offset_in_page(virt),
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001485 .dev_addr = addr,
1486 .size = size,
1487 .direction = DMA_BIDIRECTIONAL,
1488 };
1489
Miles Chen3aaabbf2017-11-17 15:26:19 -08001490 /* handle vmalloc and linear addresses */
Miles Chenaf1da682018-02-22 19:22:20 +08001491 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
Miles Chen3aaabbf2017-11-17 15:26:19 -08001492 return;
1493
1494 if (is_vmalloc_addr(virt))
1495 ref.pfn = vmalloc_to_pfn(virt);
1496 else
1497 ref.pfn = page_to_pfn(virt_to_page(virt));
1498
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001499 if (unlikely(dma_debug_disabled()))
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001500 return;
1501
1502 check_unmap(&ref);
1503}
Joerg Roedel6bfd4492009-01-09 14:38:50 +01001504
Niklas Söderlund0e74b342016-08-10 13:22:15 +02001505void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1506 int direction, dma_addr_t dma_addr)
1507{
1508 struct dma_debug_entry *entry;
1509
1510 if (unlikely(dma_debug_disabled()))
1511 return;
1512
1513 entry = dma_entry_alloc();
1514 if (!entry)
1515 return;
1516
1517 entry->type = dma_debug_resource;
1518 entry->dev = dev;
Niklas Söderlund2e0cc302016-09-29 21:59:15 +02001519 entry->pfn = PHYS_PFN(addr);
Niklas Söderlund0e74b342016-08-10 13:22:15 +02001520 entry->offset = offset_in_page(addr);
1521 entry->size = size;
1522 entry->dev_addr = dma_addr;
1523 entry->direction = direction;
1524 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1525
1526 add_dma_entry(entry);
1527}
1528EXPORT_SYMBOL(debug_dma_map_resource);
1529
1530void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1531 size_t size, int direction)
1532{
1533 struct dma_debug_entry ref = {
1534 .type = dma_debug_resource,
1535 .dev = dev,
1536 .dev_addr = dma_addr,
1537 .size = size,
1538 .direction = direction,
1539 };
1540
1541 if (unlikely(dma_debug_disabled()))
1542 return;
1543
1544 check_unmap(&ref);
1545}
1546EXPORT_SYMBOL(debug_dma_unmap_resource);
1547
Joerg Roedelb9d23172009-01-09 14:43:04 +01001548void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1549 size_t size, int direction)
1550{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001551 struct dma_debug_entry ref;
1552
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001553 if (unlikely(dma_debug_disabled()))
Joerg Roedelb9d23172009-01-09 14:43:04 +01001554 return;
1555
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001556 ref.type = dma_debug_single;
1557 ref.dev = dev;
1558 ref.dev_addr = dma_handle;
1559 ref.size = size;
1560 ref.direction = direction;
1561 ref.sg_call_ents = 0;
1562
1563 check_sync(dev, &ref, true);
Joerg Roedelb9d23172009-01-09 14:43:04 +01001564}
1565EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1566
1567void debug_dma_sync_single_for_device(struct device *dev,
1568 dma_addr_t dma_handle, size_t size,
1569 int direction)
1570{
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001571 struct dma_debug_entry ref;
1572
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001573 if (unlikely(dma_debug_disabled()))
Joerg Roedelb9d23172009-01-09 14:43:04 +01001574 return;
1575
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001576 ref.type = dma_debug_single;
1577 ref.dev = dev;
1578 ref.dev_addr = dma_handle;
1579 ref.size = size;
1580 ref.direction = direction;
1581 ref.sg_call_ents = 0;
1582
1583 check_sync(dev, &ref, false);
Joerg Roedelb9d23172009-01-09 14:43:04 +01001584}
1585EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1586
Joerg Roedela31fba52009-01-09 15:01:12 +01001587void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1588 int nelems, int direction)
1589{
1590 struct scatterlist *s;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001591 int mapped_ents = 0, i;
Joerg Roedela31fba52009-01-09 15:01:12 +01001592
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001593 if (unlikely(dma_debug_disabled()))
Joerg Roedela31fba52009-01-09 15:01:12 +01001594 return;
1595
1596 for_each_sg(sg, s, nelems, i) {
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001597
1598 struct dma_debug_entry ref = {
1599 .type = dma_debug_sg,
1600 .dev = dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001601 .pfn = page_to_pfn(sg_page(s)),
1602 .offset = s->offset,
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001603 .dev_addr = sg_dma_address(s),
1604 .size = sg_dma_len(s),
1605 .direction = direction,
1606 .sg_call_ents = nelems,
1607 };
1608
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001609 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001610 mapped_ents = get_nr_mapped_entries(dev, &ref);
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001611
1612 if (i >= mapped_ents)
1613 break;
1614
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001615 check_sync(dev, &ref, true);
Joerg Roedela31fba52009-01-09 15:01:12 +01001616 }
1617}
1618EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1619
1620void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1621 int nelems, int direction)
1622{
1623 struct scatterlist *s;
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001624 int mapped_ents = 0, i;
Joerg Roedela31fba52009-01-09 15:01:12 +01001625
Florian Fainelli01ce18b2014-12-10 15:41:23 -08001626 if (unlikely(dma_debug_disabled()))
Joerg Roedela31fba52009-01-09 15:01:12 +01001627 return;
1628
1629 for_each_sg(sg, s, nelems, i) {
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001630
1631 struct dma_debug_entry ref = {
1632 .type = dma_debug_sg,
1633 .dev = dev,
Dan Williams0abdd7a2014-01-21 15:48:12 -08001634 .pfn = page_to_pfn(sg_page(s)),
1635 .offset = s->offset,
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001636 .dev_addr = sg_dma_address(s),
1637 .size = sg_dma_len(s),
1638 .direction = direction,
1639 .sg_call_ents = nelems,
1640 };
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001641 if (!i)
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001642 mapped_ents = get_nr_mapped_entries(dev, &ref);
FUJITA Tomonori88f39072009-05-27 09:43:03 +09001643
1644 if (i >= mapped_ents)
1645 break;
1646
Joerg Roedelaa010ef2009-06-12 15:25:06 +02001647 check_sync(dev, &ref, false);
Joerg Roedela31fba52009-01-09 15:01:12 +01001648 }
1649}
1650EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1651
Joerg Roedel1745de52009-05-22 21:49:51 +02001652static int __init dma_debug_driver_setup(char *str)
1653{
1654 int i;
1655
1656 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1657 current_driver_name[i] = *str;
1658 if (*str == 0)
1659 break;
1660 }
1661
1662 if (current_driver_name[0])
Robin Murphyf737b092018-12-10 14:00:27 +00001663 pr_info("enable driver filter for driver [%s]\n",
Joerg Roedele7ed70e2009-06-08 15:39:24 +02001664 current_driver_name);
Joerg Roedel1745de52009-05-22 21:49:51 +02001665
1666
1667 return 1;
1668}
1669__setup("dma_debug_driver=", dma_debug_driver_setup);