dma-debug.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651
  1. /*
  2. * Copyright (C) 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Author: Joerg Roedel <joerg.roedel@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/scatterlist.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/stacktrace.h>
  22. #include <linux/dma-debug.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/export.h>
  27. #include <linux/device.h>
  28. #include <linux/types.h>
  29. #include <linux/sched.h>
  30. #include <linux/ctype.h>
  31. #include <linux/list.h>
  32. #include <linux/slab.h>
  33. #include <asm/sections.h>
  34. #define HASH_SIZE 1024ULL
  35. #define HASH_FN_SHIFT 13
  36. #define HASH_FN_MASK (HASH_SIZE - 1)
  37. enum {
  38. dma_debug_single,
  39. dma_debug_page,
  40. dma_debug_sg,
  41. dma_debug_coherent,
  42. };
  43. enum map_err_types {
  44. MAP_ERR_CHECK_NOT_APPLICABLE,
  45. MAP_ERR_NOT_CHECKED,
  46. MAP_ERR_CHECKED,
  47. };
  48. #define DMA_DEBUG_STACKTRACE_ENTRIES 5
  49. /**
  50. * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
  51. * @list: node on pre-allocated free_entries list
  52. * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
  53. * @type: single, page, sg, coherent
  54. * @pfn: page frame of the start address
  55. * @offset: offset of mapping relative to pfn
  56. * @size: length of the mapping
  57. * @direction: enum dma_data_direction
  58. * @sg_call_ents: 'nents' from dma_map_sg
  59. * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
  60. * @map_err_type: track whether dma_mapping_error() was checked
  61. * @stacktrace: support backtraces when a violation is detected
  62. */
  63. struct dma_debug_entry {
  64. struct list_head list;
  65. struct device *dev;
  66. int type;
  67. unsigned long pfn;
  68. size_t offset;
  69. u64 dev_addr;
  70. u64 size;
  71. int direction;
  72. int sg_call_ents;
  73. int sg_mapped_ents;
  74. enum map_err_types map_err_type;
  75. #ifdef CONFIG_STACKTRACE
  76. struct stack_trace stacktrace;
  77. unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
  78. #endif
  79. };
  80. typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
  81. struct hash_bucket {
  82. struct list_head list;
  83. spinlock_t lock;
  84. } ____cacheline_aligned_in_smp;
  85. /* Hash list to save the allocated dma addresses */
  86. static struct hash_bucket dma_entry_hash[HASH_SIZE];
  87. /* List of pre-allocated dma_debug_entry's */
  88. static LIST_HEAD(free_entries);
  89. /* Lock for the list above */
  90. static DEFINE_SPINLOCK(free_entries_lock);
  91. /* Global disable flag - will be set in case of an error */
  92. static u32 global_disable __read_mostly;
  93. /* Early initialization disable flag, set at the end of dma_debug_init */
  94. static bool dma_debug_initialized __read_mostly;
  95. static inline bool dma_debug_disabled(void)
  96. {
  97. return global_disable || !dma_debug_initialized;
  98. }
  99. /* Global error count */
  100. static u32 error_count;
  101. /* Global error show enable*/
  102. static u32 show_all_errors __read_mostly;
  103. /* Number of errors to show */
  104. static u32 show_num_errors = 1;
  105. static u32 num_free_entries;
  106. static u32 min_free_entries;
  107. static u32 nr_total_entries;
  108. /* number of preallocated entries requested by kernel cmdline */
  109. static u32 req_entries;
  110. /* debugfs dentry's for the stuff above */
  111. static struct dentry *dma_debug_dent __read_mostly;
  112. static struct dentry *global_disable_dent __read_mostly;
  113. static struct dentry *error_count_dent __read_mostly;
  114. static struct dentry *show_all_errors_dent __read_mostly;
  115. static struct dentry *show_num_errors_dent __read_mostly;
  116. static struct dentry *num_free_entries_dent __read_mostly;
  117. static struct dentry *min_free_entries_dent __read_mostly;
  118. static struct dentry *filter_dent __read_mostly;
  119. /* per-driver filter related state */
  120. #define NAME_MAX_LEN 64
  121. static char current_driver_name[NAME_MAX_LEN] __read_mostly;
  122. static struct device_driver *current_driver __read_mostly;
  123. static DEFINE_RWLOCK(driver_name_lock);
  124. static const char *const maperr2str[] = {
  125. [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
  126. [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
  127. [MAP_ERR_CHECKED] = "dma map error checked",
  128. };
  129. static const char *type2name[4] = { "single", "page",
  130. "scather-gather", "coherent" };
  131. static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
  132. "DMA_FROM_DEVICE", "DMA_NONE" };
  133. /*
  134. * The access to some variables in this macro is racy. We can't use atomic_t
  135. * here because all these variables are exported to debugfs. Some of them even
  136. * writeable. This is also the reason why a lock won't help much. But anyway,
  137. * the races are no big deal. Here is why:
  138. *
  139. * error_count: the addition is racy, but the worst thing that can happen is
  140. * that we don't count some errors
  141. * show_num_errors: the subtraction is racy. Also no big deal because in
  142. * worst case this will result in one warning more in the
  143. * system log than the user configured. This variable is
  144. * writeable via debugfs.
  145. */
  146. static inline void dump_entry_trace(struct dma_debug_entry *entry)
  147. {
  148. #ifdef CONFIG_STACKTRACE
  149. if (entry) {
  150. pr_warning("Mapped at:\n");
  151. print_stack_trace(&entry->stacktrace, 0);
  152. }
  153. #endif
  154. }
  155. static bool driver_filter(struct device *dev)
  156. {
  157. struct device_driver *drv;
  158. unsigned long flags;
  159. bool ret;
  160. /* driver filter off */
  161. if (likely(!current_driver_name[0]))
  162. return true;
  163. /* driver filter on and initialized */
  164. if (current_driver && dev && dev->driver == current_driver)
  165. return true;
  166. /* driver filter on, but we can't filter on a NULL device... */
  167. if (!dev)
  168. return false;
  169. if (current_driver || !current_driver_name[0])
  170. return false;
  171. /* driver filter on but not yet initialized */
  172. drv = dev->driver;
  173. if (!drv)
  174. return false;
  175. /* lock to protect against change of current_driver_name */
  176. read_lock_irqsave(&driver_name_lock, flags);
  177. ret = false;
  178. if (drv->name &&
  179. strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
  180. current_driver = drv;
  181. ret = true;
  182. }
  183. read_unlock_irqrestore(&driver_name_lock, flags);
  184. return ret;
  185. }
  186. #define err_printk(dev, entry, format, arg...) do { \
  187. error_count += 1; \
  188. if (driver_filter(dev) && \
  189. (show_all_errors || show_num_errors > 0)) { \
  190. WARN(1, "%s %s: " format, \
  191. dev ? dev_driver_string(dev) : "NULL", \
  192. dev ? dev_name(dev) : "NULL", ## arg); \
  193. dump_entry_trace(entry); \
  194. } \
  195. if (!show_all_errors && show_num_errors > 0) \
  196. show_num_errors -= 1; \
  197. } while (0);
  198. /*
  199. * Hash related functions
  200. *
  201. * Every DMA-API request is saved into a struct dma_debug_entry. To
  202. * have quick access to these structs they are stored into a hash.
  203. */
  204. static int hash_fn(struct dma_debug_entry *entry)
  205. {
  206. /*
  207. * Hash function is based on the dma address.
  208. * We use bits 20-27 here as the index into the hash
  209. */
  210. return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
  211. }
  212. /*
  213. * Request exclusive access to a hash bucket for a given dma_debug_entry.
  214. */
  215. static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
  216. unsigned long *flags)
  217. {
  218. int idx = hash_fn(entry);
  219. unsigned long __flags;
  220. spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
  221. *flags = __flags;
  222. return &dma_entry_hash[idx];
  223. }
  224. /*
  225. * Give up exclusive access to the hash bucket
  226. */
  227. static void put_hash_bucket(struct hash_bucket *bucket,
  228. unsigned long *flags)
  229. {
  230. unsigned long __flags = *flags;
  231. spin_unlock_irqrestore(&bucket->lock, __flags);
  232. }
  233. static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
  234. {
  235. return ((a->dev_addr == b->dev_addr) &&
  236. (a->dev == b->dev)) ? true : false;
  237. }
  238. static bool containing_match(struct dma_debug_entry *a,
  239. struct dma_debug_entry *b)
  240. {
  241. if (a->dev != b->dev)
  242. return false;
  243. if ((b->dev_addr <= a->dev_addr) &&
  244. ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
  245. return true;
  246. return false;
  247. }
  248. /*
  249. * Search a given entry in the hash bucket list
  250. */
  251. static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
  252. struct dma_debug_entry *ref,
  253. match_fn match)
  254. {
  255. struct dma_debug_entry *entry, *ret = NULL;
  256. int matches = 0, match_lvl, last_lvl = -1;
  257. list_for_each_entry(entry, &bucket->list, list) {
  258. if (!match(ref, entry))
  259. continue;
  260. /*
  261. * Some drivers map the same physical address multiple
  262. * times. Without a hardware IOMMU this results in the
  263. * same device addresses being put into the dma-debug
  264. * hash multiple times too. This can result in false
  265. * positives being reported. Therefore we implement a
  266. * best-fit algorithm here which returns the entry from
  267. * the hash which fits best to the reference value
  268. * instead of the first-fit.
  269. */
  270. matches += 1;
  271. match_lvl = 0;
  272. entry->size == ref->size ? ++match_lvl : 0;
  273. entry->type == ref->type ? ++match_lvl : 0;
  274. entry->direction == ref->direction ? ++match_lvl : 0;
  275. entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
  276. if (match_lvl == 4) {
  277. /* perfect-fit - return the result */
  278. return entry;
  279. } else if (match_lvl > last_lvl) {
  280. /*
  281. * We found an entry that fits better then the
  282. * previous one or it is the 1st match.
  283. */
  284. last_lvl = match_lvl;
  285. ret = entry;
  286. }
  287. }
  288. /*
  289. * If we have multiple matches but no perfect-fit, just return
  290. * NULL.
  291. */
  292. ret = (matches == 1) ? ret : NULL;
  293. return ret;
  294. }
  295. static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
  296. struct dma_debug_entry *ref)
  297. {
  298. return __hash_bucket_find(bucket, ref, exact_match);
  299. }
  300. static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
  301. struct dma_debug_entry *ref,
  302. unsigned long *flags)
  303. {
  304. unsigned int max_range = dma_get_max_seg_size(ref->dev);
  305. struct dma_debug_entry *entry, index = *ref;
  306. unsigned int range = 0;
  307. while (range <= max_range) {
  308. entry = __hash_bucket_find(*bucket, ref, containing_match);
  309. if (entry)
  310. return entry;
  311. /*
  312. * Nothing found, go back a hash bucket
  313. */
  314. put_hash_bucket(*bucket, flags);
  315. range += (1 << HASH_FN_SHIFT);
  316. index.dev_addr -= (1 << HASH_FN_SHIFT);
  317. *bucket = get_hash_bucket(&index, flags);
  318. }
  319. return NULL;
  320. }
  321. /*
  322. * Add an entry to a hash bucket
  323. */
  324. static void hash_bucket_add(struct hash_bucket *bucket,
  325. struct dma_debug_entry *entry)
  326. {
  327. list_add_tail(&entry->list, &bucket->list);
  328. }
  329. /*
  330. * Remove entry from a hash bucket list
  331. */
  332. static void hash_bucket_del(struct dma_debug_entry *entry)
  333. {
  334. list_del(&entry->list);
  335. }
  336. static unsigned long long phys_addr(struct dma_debug_entry *entry)
  337. {
  338. return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
  339. }
  340. /*
  341. * Dump mapping entries for debugging purposes
  342. */
  343. void debug_dma_dump_mappings(struct device *dev)
  344. {
  345. int idx;
  346. for (idx = 0; idx < HASH_SIZE; idx++) {
  347. struct hash_bucket *bucket = &dma_entry_hash[idx];
  348. struct dma_debug_entry *entry;
  349. unsigned long flags;
  350. spin_lock_irqsave(&bucket->lock, flags);
  351. list_for_each_entry(entry, &bucket->list, list) {
  352. if (!dev || dev == entry->dev) {
  353. dev_info(entry->dev,
  354. "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
  355. type2name[entry->type], idx,
  356. phys_addr(entry), entry->pfn,
  357. entry->dev_addr, entry->size,
  358. dir2name[entry->direction],
  359. maperr2str[entry->map_err_type]);
  360. }
  361. }
  362. spin_unlock_irqrestore(&bucket->lock, flags);
  363. }
  364. }
  365. EXPORT_SYMBOL(debug_dma_dump_mappings);
  366. /*
  367. * For each mapping (initial cacheline in the case of
  368. * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
  369. * scatterlist, or the cacheline specified in dma_map_single) insert
  370. * into this tree using the cacheline as the key. At
  371. * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
  372. * the entry already exists at insertion time add a tag as a reference
  373. * count for the overlapping mappings. For now, the overlap tracking
  374. * just ensures that 'unmaps' balance 'maps' before marking the
  375. * cacheline idle, but we should also be flagging overlaps as an API
  376. * violation.
  377. *
  378. * Memory usage is mostly constrained by the maximum number of available
  379. * dma-debug entries in that we need a free dma_debug_entry before
  380. * inserting into the tree. In the case of dma_map_page and
  381. * dma_alloc_coherent there is only one dma_debug_entry and one
  382. * dma_active_cacheline entry to track per event. dma_map_sg(), on the
  383. * other hand, consumes a single dma_debug_entry, but inserts 'nents'
  384. * entries into the tree.
  385. *
  386. * At any time debug_dma_assert_idle() can be called to trigger a
  387. * warning if any cachelines in the given page are in the active set.
  388. */
  389. static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
  390. static DEFINE_SPINLOCK(radix_lock);
  391. #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
  392. #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
  393. #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
  394. static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
  395. {
  396. return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
  397. (entry->offset >> L1_CACHE_SHIFT);
  398. }
  399. static int active_cacheline_read_overlap(phys_addr_t cln)
  400. {
  401. int overlap = 0, i;
  402. for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
  403. if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
  404. overlap |= 1 << i;
  405. return overlap;
  406. }
  407. static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
  408. {
  409. int i;
  410. if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
  411. return overlap;
  412. for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
  413. if (overlap & 1 << i)
  414. radix_tree_tag_set(&dma_active_cacheline, cln, i);
  415. else
  416. radix_tree_tag_clear(&dma_active_cacheline, cln, i);
  417. return overlap;
  418. }
  419. static void active_cacheline_inc_overlap(phys_addr_t cln)
  420. {
  421. int overlap = active_cacheline_read_overlap(cln);
  422. overlap = active_cacheline_set_overlap(cln, ++overlap);
  423. /* If we overflowed the overlap counter then we're potentially
  424. * leaking dma-mappings. Otherwise, if maps and unmaps are
  425. * balanced then this overflow may cause false negatives in
  426. * debug_dma_assert_idle() as the cacheline may be marked idle
  427. * prematurely.
  428. */
  429. WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
  430. "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
  431. ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
  432. }
  433. static int active_cacheline_dec_overlap(phys_addr_t cln)
  434. {
  435. int overlap = active_cacheline_read_overlap(cln);
  436. return active_cacheline_set_overlap(cln, --overlap);
  437. }
  438. static int active_cacheline_insert(struct dma_debug_entry *entry)
  439. {
  440. phys_addr_t cln = to_cacheline_number(entry);
  441. unsigned long flags;
  442. int rc;
  443. /* If the device is not writing memory then we don't have any
  444. * concerns about the cpu consuming stale data. This mitigates
  445. * legitimate usages of overlapping mappings.
  446. */
  447. if (entry->direction == DMA_TO_DEVICE)
  448. return 0;
  449. spin_lock_irqsave(&radix_lock, flags);
  450. rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
  451. if (rc == -EEXIST)
  452. active_cacheline_inc_overlap(cln);
  453. spin_unlock_irqrestore(&radix_lock, flags);
  454. return rc;
  455. }
  456. static void active_cacheline_remove(struct dma_debug_entry *entry)
  457. {
  458. phys_addr_t cln = to_cacheline_number(entry);
  459. unsigned long flags;
  460. /* ...mirror the insert case */
  461. if (entry->direction == DMA_TO_DEVICE)
  462. return;
  463. spin_lock_irqsave(&radix_lock, flags);
  464. /* since we are counting overlaps the final put of the
  465. * cacheline will occur when the overlap count is 0.
  466. * active_cacheline_dec_overlap() returns -1 in that case
  467. */
  468. if (active_cacheline_dec_overlap(cln) < 0)
  469. radix_tree_delete(&dma_active_cacheline, cln);
  470. spin_unlock_irqrestore(&radix_lock, flags);
  471. }
  472. /**
  473. * debug_dma_assert_idle() - assert that a page is not undergoing dma
  474. * @page: page to lookup in the dma_active_cacheline tree
  475. *
  476. * Place a call to this routine in cases where the cpu touching the page
  477. * before the dma completes (page is dma_unmapped) will lead to data
  478. * corruption.
  479. */
  480. void debug_dma_assert_idle(struct page *page)
  481. {
  482. static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
  483. struct dma_debug_entry *entry = NULL;
  484. void **results = (void **) &ents;
  485. unsigned int nents, i;
  486. unsigned long flags;
  487. phys_addr_t cln;
  488. if (dma_debug_disabled())
  489. return;
  490. if (!page)
  491. return;
  492. cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
  493. spin_lock_irqsave(&radix_lock, flags);
  494. nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
  495. CACHELINES_PER_PAGE);
  496. for (i = 0; i < nents; i++) {
  497. phys_addr_t ent_cln = to_cacheline_number(ents[i]);
  498. if (ent_cln == cln) {
  499. entry = ents[i];
  500. break;
  501. } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
  502. break;
  503. }
  504. spin_unlock_irqrestore(&radix_lock, flags);
  505. if (!entry)
  506. return;
  507. cln = to_cacheline_number(entry);
  508. err_printk(entry->dev, entry,
  509. "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
  510. &cln);
  511. }
  512. /*
  513. * Wrapper function for adding an entry to the hash.
  514. * This function takes care of locking itself.
  515. */
  516. static void add_dma_entry(struct dma_debug_entry *entry)
  517. {
  518. struct hash_bucket *bucket;
  519. unsigned long flags;
  520. int rc;
  521. bucket = get_hash_bucket(entry, &flags);
  522. hash_bucket_add(bucket, entry);
  523. put_hash_bucket(bucket, &flags);
  524. rc = active_cacheline_insert(entry);
  525. if (rc == -ENOMEM) {
  526. pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
  527. global_disable = true;
  528. }
  529. /* TODO: report -EEXIST errors here as overlapping mappings are
  530. * not supported by the DMA API
  531. */
  532. }
  533. static struct dma_debug_entry *__dma_entry_alloc(void)
  534. {
  535. struct dma_debug_entry *entry;
  536. entry = list_entry(free_entries.next, struct dma_debug_entry, list);
  537. list_del(&entry->list);
  538. memset(entry, 0, sizeof(*entry));
  539. num_free_entries -= 1;
  540. if (num_free_entries < min_free_entries)
  541. min_free_entries = num_free_entries;
  542. return entry;
  543. }
  544. /* struct dma_entry allocator
  545. *
  546. * The next two functions implement the allocator for
  547. * struct dma_debug_entries.
  548. */
  549. static struct dma_debug_entry *dma_entry_alloc(void)
  550. {
  551. struct dma_debug_entry *entry;
  552. unsigned long flags;
  553. spin_lock_irqsave(&free_entries_lock, flags);
  554. if (list_empty(&free_entries)) {
  555. pr_err("DMA-API: debugging out of memory - disabling\n");
  556. global_disable = true;
  557. spin_unlock_irqrestore(&free_entries_lock, flags);
  558. return NULL;
  559. }
  560. entry = __dma_entry_alloc();
  561. spin_unlock_irqrestore(&free_entries_lock, flags);
  562. #ifdef CONFIG_STACKTRACE
  563. entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
  564. entry->stacktrace.entries = entry->st_entries;
  565. entry->stacktrace.skip = 2;
  566. save_stack_trace(&entry->stacktrace);
  567. #endif
  568. return entry;
  569. }
  570. static void dma_entry_free(struct dma_debug_entry *entry)
  571. {
  572. unsigned long flags;
  573. active_cacheline_remove(entry);
  574. /*
  575. * add to beginning of the list - this way the entries are
  576. * more likely cache hot when they are reallocated.
  577. */
  578. spin_lock_irqsave(&free_entries_lock, flags);
  579. list_add(&entry->list, &free_entries);
  580. num_free_entries += 1;
  581. spin_unlock_irqrestore(&free_entries_lock, flags);
  582. }
  583. int dma_debug_resize_entries(u32 num_entries)
  584. {
  585. int i, delta, ret = 0;
  586. unsigned long flags;
  587. struct dma_debug_entry *entry;
  588. LIST_HEAD(tmp);
  589. spin_lock_irqsave(&free_entries_lock, flags);
  590. if (nr_total_entries < num_entries) {
  591. delta = num_entries - nr_total_entries;
  592. spin_unlock_irqrestore(&free_entries_lock, flags);
  593. for (i = 0; i < delta; i++) {
  594. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  595. if (!entry)
  596. break;
  597. list_add_tail(&entry->list, &tmp);
  598. }
  599. spin_lock_irqsave(&free_entries_lock, flags);
  600. list_splice(&tmp, &free_entries);
  601. nr_total_entries += i;
  602. num_free_entries += i;
  603. } else {
  604. delta = nr_total_entries - num_entries;
  605. for (i = 0; i < delta && !list_empty(&free_entries); i++) {
  606. entry = __dma_entry_alloc();
  607. kfree(entry);
  608. }
  609. nr_total_entries -= i;
  610. }
  611. if (nr_total_entries != num_entries)
  612. ret = 1;
  613. spin_unlock_irqrestore(&free_entries_lock, flags);
  614. return ret;
  615. }
  616. EXPORT_SYMBOL(dma_debug_resize_entries);
  617. /*
  618. * DMA-API debugging init code
  619. *
  620. * The init code does two things:
  621. * 1. Initialize core data structures
  622. * 2. Preallocate a given number of dma_debug_entry structs
  623. */
  624. static int prealloc_memory(u32 num_entries)
  625. {
  626. struct dma_debug_entry *entry, *next_entry;
  627. int i;
  628. for (i = 0; i < num_entries; ++i) {
  629. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  630. if (!entry)
  631. goto out_err;
  632. list_add_tail(&entry->list, &free_entries);
  633. }
  634. num_free_entries = num_entries;
  635. min_free_entries = num_entries;
  636. pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
  637. return 0;
  638. out_err:
  639. list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
  640. list_del(&entry->list);
  641. kfree(entry);
  642. }
  643. return -ENOMEM;
  644. }
  645. static ssize_t filter_read(struct file *file, char __user *user_buf,
  646. size_t count, loff_t *ppos)
  647. {
  648. char buf[NAME_MAX_LEN + 1];
  649. unsigned long flags;
  650. int len;
  651. if (!current_driver_name[0])
  652. return 0;
  653. /*
  654. * We can't copy to userspace directly because current_driver_name can
  655. * only be read under the driver_name_lock with irqs disabled. So
  656. * create a temporary copy first.
  657. */
  658. read_lock_irqsave(&driver_name_lock, flags);
  659. len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
  660. read_unlock_irqrestore(&driver_name_lock, flags);
  661. return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  662. }
  663. static ssize_t filter_write(struct file *file, const char __user *userbuf,
  664. size_t count, loff_t *ppos)
  665. {
  666. char buf[NAME_MAX_LEN];
  667. unsigned long flags;
  668. size_t len;
  669. int i;
  670. /*
  671. * We can't copy from userspace directly. Access to
  672. * current_driver_name is protected with a write_lock with irqs
  673. * disabled. Since copy_from_user can fault and may sleep we
  674. * need to copy to temporary buffer first
  675. */
  676. len = min(count, (size_t)(NAME_MAX_LEN - 1));
  677. if (copy_from_user(buf, userbuf, len))
  678. return -EFAULT;
  679. buf[len] = 0;
  680. write_lock_irqsave(&driver_name_lock, flags);
  681. /*
  682. * Now handle the string we got from userspace very carefully.
  683. * The rules are:
  684. * - only use the first token we got
  685. * - token delimiter is everything looking like a space
  686. * character (' ', '\n', '\t' ...)
  687. *
  688. */
  689. if (!isalnum(buf[0])) {
  690. /*
  691. * If the first character userspace gave us is not
  692. * alphanumerical then assume the filter should be
  693. * switched off.
  694. */
  695. if (current_driver_name[0])
  696. pr_info("DMA-API: switching off dma-debug driver filter\n");
  697. current_driver_name[0] = 0;
  698. current_driver = NULL;
  699. goto out_unlock;
  700. }
  701. /*
  702. * Now parse out the first token and use it as the name for the
  703. * driver to filter for.
  704. */
  705. for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
  706. current_driver_name[i] = buf[i];
  707. if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
  708. break;
  709. }
  710. current_driver_name[i] = 0;
  711. current_driver = NULL;
  712. pr_info("DMA-API: enable driver filter for driver [%s]\n",
  713. current_driver_name);
  714. out_unlock:
  715. write_unlock_irqrestore(&driver_name_lock, flags);
  716. return count;
  717. }
  718. static const struct file_operations filter_fops = {
  719. .read = filter_read,
  720. .write = filter_write,
  721. .llseek = default_llseek,
  722. };
  723. static int dma_debug_fs_init(void)
  724. {
  725. dma_debug_dent = debugfs_create_dir("dma-api", NULL);
  726. if (!dma_debug_dent) {
  727. pr_err("DMA-API: can not create debugfs directory\n");
  728. return -ENOMEM;
  729. }
  730. global_disable_dent = debugfs_create_bool("disabled", 0444,
  731. dma_debug_dent,
  732. &global_disable);
  733. if (!global_disable_dent)
  734. goto out_err;
  735. error_count_dent = debugfs_create_u32("error_count", 0444,
  736. dma_debug_dent, &error_count);
  737. if (!error_count_dent)
  738. goto out_err;
  739. show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
  740. dma_debug_dent,
  741. &show_all_errors);
  742. if (!show_all_errors_dent)
  743. goto out_err;
  744. show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
  745. dma_debug_dent,
  746. &show_num_errors);
  747. if (!show_num_errors_dent)
  748. goto out_err;
  749. num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
  750. dma_debug_dent,
  751. &num_free_entries);
  752. if (!num_free_entries_dent)
  753. goto out_err;
  754. min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
  755. dma_debug_dent,
  756. &min_free_entries);
  757. if (!min_free_entries_dent)
  758. goto out_err;
  759. filter_dent = debugfs_create_file("driver_filter", 0644,
  760. dma_debug_dent, NULL, &filter_fops);
  761. if (!filter_dent)
  762. goto out_err;
  763. return 0;
  764. out_err:
  765. debugfs_remove_recursive(dma_debug_dent);
  766. return -ENOMEM;
  767. }
  768. static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
  769. {
  770. struct dma_debug_entry *entry;
  771. unsigned long flags;
  772. int count = 0, i;
  773. local_irq_save(flags);
  774. for (i = 0; i < HASH_SIZE; ++i) {
  775. spin_lock(&dma_entry_hash[i].lock);
  776. list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
  777. if (entry->dev == dev) {
  778. count += 1;
  779. *out_entry = entry;
  780. }
  781. }
  782. spin_unlock(&dma_entry_hash[i].lock);
  783. }
  784. local_irq_restore(flags);
  785. return count;
  786. }
  787. static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
  788. {
  789. struct device *dev = data;
  790. struct dma_debug_entry *uninitialized_var(entry);
  791. int count;
  792. if (dma_debug_disabled())
  793. return 0;
  794. switch (action) {
  795. case BUS_NOTIFY_UNBOUND_DRIVER:
  796. count = device_dma_allocations(dev, &entry);
  797. if (count == 0)
  798. break;
  799. err_printk(dev, entry, "DMA-API: device driver has pending "
  800. "DMA allocations while released from device "
  801. "[count=%d]\n"
  802. "One of leaked entries details: "
  803. "[device address=0x%016llx] [size=%llu bytes] "
  804. "[mapped with %s] [mapped as %s]\n",
  805. count, entry->dev_addr, entry->size,
  806. dir2name[entry->direction], type2name[entry->type]);
  807. break;
  808. default:
  809. break;
  810. }
  811. return 0;
  812. }
  813. void dma_debug_add_bus(struct bus_type *bus)
  814. {
  815. struct notifier_block *nb;
  816. if (dma_debug_disabled())
  817. return;
  818. nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
  819. if (nb == NULL) {
  820. pr_err("dma_debug_add_bus: out of memory\n");
  821. return;
  822. }
  823. nb->notifier_call = dma_debug_device_change;
  824. bus_register_notifier(bus, nb);
  825. }
  826. /*
  827. * Let the architectures decide how many entries should be preallocated.
  828. */
  829. void dma_debug_init(u32 num_entries)
  830. {
  831. int i;
  832. /* Do not use dma_debug_initialized here, since we really want to be
  833. * called to set dma_debug_initialized
  834. */
  835. if (global_disable)
  836. return;
  837. for (i = 0; i < HASH_SIZE; ++i) {
  838. INIT_LIST_HEAD(&dma_entry_hash[i].list);
  839. spin_lock_init(&dma_entry_hash[i].lock);
  840. }
  841. if (dma_debug_fs_init() != 0) {
  842. pr_err("DMA-API: error creating debugfs entries - disabling\n");
  843. global_disable = true;
  844. return;
  845. }
  846. if (req_entries)
  847. num_entries = req_entries;
  848. if (prealloc_memory(num_entries) != 0) {
  849. pr_err("DMA-API: debugging out of memory error - disabled\n");
  850. global_disable = true;
  851. return;
  852. }
  853. nr_total_entries = num_free_entries;
  854. dma_debug_initialized = true;
  855. pr_info("DMA-API: debugging enabled by kernel config\n");
  856. }
  857. static __init int dma_debug_cmdline(char *str)
  858. {
  859. if (!str)
  860. return -EINVAL;
  861. if (strncmp(str, "off", 3) == 0) {
  862. pr_info("DMA-API: debugging disabled on kernel command line\n");
  863. global_disable = true;
  864. }
  865. return 0;
  866. }
  867. static __init int dma_debug_entries_cmdline(char *str)
  868. {
  869. int res;
  870. if (!str)
  871. return -EINVAL;
  872. res = get_option(&str, &req_entries);
  873. if (!res)
  874. req_entries = 0;
  875. return 0;
  876. }
  877. __setup("dma_debug=", dma_debug_cmdline);
  878. __setup("dma_debug_entries=", dma_debug_entries_cmdline);
  879. static void check_unmap(struct dma_debug_entry *ref)
  880. {
  881. struct dma_debug_entry *entry;
  882. struct hash_bucket *bucket;
  883. unsigned long flags;
  884. bucket = get_hash_bucket(ref, &flags);
  885. entry = bucket_find_exact(bucket, ref);
  886. if (!entry) {
  887. /* must drop lock before calling dma_mapping_error */
  888. put_hash_bucket(bucket, &flags);
  889. if (dma_mapping_error(ref->dev, ref->dev_addr)) {
  890. err_printk(ref->dev, NULL,
  891. "DMA-API: device driver tries to free an "
  892. "invalid DMA memory address\n");
  893. } else {
  894. err_printk(ref->dev, NULL,
  895. "DMA-API: device driver tries to free DMA "
  896. "memory it has not allocated [device "
  897. "address=0x%016llx] [size=%llu bytes]\n",
  898. ref->dev_addr, ref->size);
  899. }
  900. return;
  901. }
  902. if (ref->size != entry->size) {
  903. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  904. "DMA memory with different size "
  905. "[device address=0x%016llx] [map size=%llu bytes] "
  906. "[unmap size=%llu bytes]\n",
  907. ref->dev_addr, entry->size, ref->size);
  908. }
  909. if (ref->type != entry->type) {
  910. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  911. "DMA memory with wrong function "
  912. "[device address=0x%016llx] [size=%llu bytes] "
  913. "[mapped as %s] [unmapped as %s]\n",
  914. ref->dev_addr, ref->size,
  915. type2name[entry->type], type2name[ref->type]);
  916. } else if ((entry->type == dma_debug_coherent) &&
  917. (phys_addr(ref) != phys_addr(entry))) {
  918. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  919. "DMA memory with different CPU address "
  920. "[device address=0x%016llx] [size=%llu bytes] "
  921. "[cpu alloc address=0x%016llx] "
  922. "[cpu free address=0x%016llx]",
  923. ref->dev_addr, ref->size,
  924. phys_addr(entry),
  925. phys_addr(ref));
  926. }
  927. if (ref->sg_call_ents && ref->type == dma_debug_sg &&
  928. ref->sg_call_ents != entry->sg_call_ents) {
  929. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  930. "DMA sg list with different entry count "
  931. "[map count=%d] [unmap count=%d]\n",
  932. entry->sg_call_ents, ref->sg_call_ents);
  933. }
  934. /*
  935. * This may be no bug in reality - but most implementations of the
  936. * DMA API don't handle this properly, so check for it here
  937. */
  938. if (ref->direction != entry->direction) {
  939. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  940. "DMA memory with different direction "
  941. "[device address=0x%016llx] [size=%llu bytes] "
  942. "[mapped with %s] [unmapped with %s]\n",
  943. ref->dev_addr, ref->size,
  944. dir2name[entry->direction],
  945. dir2name[ref->direction]);
  946. }
  947. if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
  948. err_printk(ref->dev, entry,
  949. "DMA-API: device driver failed to check map error"
  950. "[device address=0x%016llx] [size=%llu bytes] "
  951. "[mapped as %s]",
  952. ref->dev_addr, ref->size,
  953. type2name[entry->type]);
  954. }
  955. hash_bucket_del(entry);
  956. dma_entry_free(entry);
  957. put_hash_bucket(bucket, &flags);
  958. }
  959. static void check_for_stack(struct device *dev, void *addr)
  960. {
  961. if (object_is_on_stack(addr))
  962. err_printk(dev, NULL, "DMA-API: device driver maps memory from "
  963. "stack [addr=%p]\n", addr);
  964. }
  965. static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
  966. {
  967. unsigned long a1 = (unsigned long)addr;
  968. unsigned long b1 = a1 + len;
  969. unsigned long a2 = (unsigned long)start;
  970. unsigned long b2 = (unsigned long)end;
  971. return !(b1 <= a2 || a1 >= b2);
  972. }
  973. static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
  974. {
  975. if (overlap(addr, len, _text, _etext) ||
  976. overlap(addr, len, __start_rodata, __end_rodata))
  977. err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
  978. }
  979. static void check_sync(struct device *dev,
  980. struct dma_debug_entry *ref,
  981. bool to_cpu)
  982. {
  983. struct dma_debug_entry *entry;
  984. struct hash_bucket *bucket;
  985. unsigned long flags;
  986. bucket = get_hash_bucket(ref, &flags);
  987. entry = bucket_find_contain(&bucket, ref, &flags);
  988. if (!entry) {
  989. err_printk(dev, NULL, "DMA-API: device driver tries "
  990. "to sync DMA memory it has not allocated "
  991. "[device address=0x%016llx] [size=%llu bytes]\n",
  992. (unsigned long long)ref->dev_addr, ref->size);
  993. goto out;
  994. }
  995. if (ref->size > entry->size) {
  996. err_printk(dev, entry, "DMA-API: device driver syncs"
  997. " DMA memory outside allocated range "
  998. "[device address=0x%016llx] "
  999. "[allocation size=%llu bytes] "
  1000. "[sync offset+size=%llu]\n",
  1001. entry->dev_addr, entry->size,
  1002. ref->size);
  1003. }
  1004. if (entry->direction == DMA_BIDIRECTIONAL)
  1005. goto out;
  1006. if (ref->direction != entry->direction) {
  1007. err_printk(dev, entry, "DMA-API: device driver syncs "
  1008. "DMA memory with different direction "
  1009. "[device address=0x%016llx] [size=%llu bytes] "
  1010. "[mapped with %s] [synced with %s]\n",
  1011. (unsigned long long)ref->dev_addr, entry->size,
  1012. dir2name[entry->direction],
  1013. dir2name[ref->direction]);
  1014. }
  1015. if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
  1016. !(ref->direction == DMA_TO_DEVICE))
  1017. err_printk(dev, entry, "DMA-API: device driver syncs "
  1018. "device read-only DMA memory for cpu "
  1019. "[device address=0x%016llx] [size=%llu bytes] "
  1020. "[mapped with %s] [synced with %s]\n",
  1021. (unsigned long long)ref->dev_addr, entry->size,
  1022. dir2name[entry->direction],
  1023. dir2name[ref->direction]);
  1024. if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
  1025. !(ref->direction == DMA_FROM_DEVICE))
  1026. err_printk(dev, entry, "DMA-API: device driver syncs "
  1027. "device write-only DMA memory to device "
  1028. "[device address=0x%016llx] [size=%llu bytes] "
  1029. "[mapped with %s] [synced with %s]\n",
  1030. (unsigned long long)ref->dev_addr, entry->size,
  1031. dir2name[entry->direction],
  1032. dir2name[ref->direction]);
  1033. out:
  1034. put_hash_bucket(bucket, &flags);
  1035. }
  1036. void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
  1037. size_t size, int direction, dma_addr_t dma_addr,
  1038. bool map_single)
  1039. {
  1040. struct dma_debug_entry *entry;
  1041. if (unlikely(dma_debug_disabled()))
  1042. return;
  1043. if (dma_mapping_error(dev, dma_addr))
  1044. return;
  1045. entry = dma_entry_alloc();
  1046. if (!entry)
  1047. return;
  1048. entry->dev = dev;
  1049. entry->type = dma_debug_page;
  1050. entry->pfn = page_to_pfn(page);
  1051. entry->offset = offset,
  1052. entry->dev_addr = dma_addr;
  1053. entry->size = size;
  1054. entry->direction = direction;
  1055. entry->map_err_type = MAP_ERR_NOT_CHECKED;
  1056. if (map_single)
  1057. entry->type = dma_debug_single;
  1058. if (!PageHighMem(page)) {
  1059. void *addr = page_address(page) + offset;
  1060. check_for_stack(dev, addr);
  1061. check_for_illegal_area(dev, addr, size);
  1062. }
  1063. add_dma_entry(entry);
  1064. }
  1065. EXPORT_SYMBOL(debug_dma_map_page);
  1066. void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  1067. {
  1068. struct dma_debug_entry ref;
  1069. struct dma_debug_entry *entry;
  1070. struct hash_bucket *bucket;
  1071. unsigned long flags;
  1072. if (unlikely(dma_debug_disabled()))
  1073. return;
  1074. ref.dev = dev;
  1075. ref.dev_addr = dma_addr;
  1076. bucket = get_hash_bucket(&ref, &flags);
  1077. list_for_each_entry(entry, &bucket->list, list) {
  1078. if (!exact_match(&ref, entry))
  1079. continue;
  1080. /*
  1081. * The same physical address can be mapped multiple
  1082. * times. Without a hardware IOMMU this results in the
  1083. * same device addresses being put into the dma-debug
  1084. * hash multiple times too. This can result in false
  1085. * positives being reported. Therefore we implement a
  1086. * best-fit algorithm here which updates the first entry
  1087. * from the hash which fits the reference value and is
  1088. * not currently listed as being checked.
  1089. */
  1090. if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
  1091. entry->map_err_type = MAP_ERR_CHECKED;
  1092. break;
  1093. }
  1094. }
  1095. put_hash_bucket(bucket, &flags);
  1096. }
  1097. EXPORT_SYMBOL(debug_dma_mapping_error);
  1098. void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
  1099. size_t size, int direction, bool map_single)
  1100. {
  1101. struct dma_debug_entry ref = {
  1102. .type = dma_debug_page,
  1103. .dev = dev,
  1104. .dev_addr = addr,
  1105. .size = size,
  1106. .direction = direction,
  1107. };
  1108. if (unlikely(dma_debug_disabled()))
  1109. return;
  1110. if (map_single)
  1111. ref.type = dma_debug_single;
  1112. check_unmap(&ref);
  1113. }
  1114. EXPORT_SYMBOL(debug_dma_unmap_page);
  1115. void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
  1116. int nents, int mapped_ents, int direction)
  1117. {
  1118. struct dma_debug_entry *entry;
  1119. struct scatterlist *s;
  1120. int i;
  1121. if (unlikely(dma_debug_disabled()))
  1122. return;
  1123. for_each_sg(sg, s, mapped_ents, i) {
  1124. entry = dma_entry_alloc();
  1125. if (!entry)
  1126. return;
  1127. entry->type = dma_debug_sg;
  1128. entry->dev = dev;
  1129. entry->pfn = page_to_pfn(sg_page(s));
  1130. entry->offset = s->offset,
  1131. entry->size = sg_dma_len(s);
  1132. entry->dev_addr = sg_dma_address(s);
  1133. entry->direction = direction;
  1134. entry->sg_call_ents = nents;
  1135. entry->sg_mapped_ents = mapped_ents;
  1136. if (!PageHighMem(sg_page(s))) {
  1137. check_for_stack(dev, sg_virt(s));
  1138. check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
  1139. }
  1140. add_dma_entry(entry);
  1141. }
  1142. }
  1143. EXPORT_SYMBOL(debug_dma_map_sg);
  1144. static int get_nr_mapped_entries(struct device *dev,
  1145. struct dma_debug_entry *ref)
  1146. {
  1147. struct dma_debug_entry *entry;
  1148. struct hash_bucket *bucket;
  1149. unsigned long flags;
  1150. int mapped_ents;
  1151. bucket = get_hash_bucket(ref, &flags);
  1152. entry = bucket_find_exact(bucket, ref);
  1153. mapped_ents = 0;
  1154. if (entry)
  1155. mapped_ents = entry->sg_mapped_ents;
  1156. put_hash_bucket(bucket, &flags);
  1157. return mapped_ents;
  1158. }
  1159. void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
  1160. int nelems, int dir)
  1161. {
  1162. struct scatterlist *s;
  1163. int mapped_ents = 0, i;
  1164. if (unlikely(dma_debug_disabled()))
  1165. return;
  1166. for_each_sg(sglist, s, nelems, i) {
  1167. struct dma_debug_entry ref = {
  1168. .type = dma_debug_sg,
  1169. .dev = dev,
  1170. .pfn = page_to_pfn(sg_page(s)),
  1171. .offset = s->offset,
  1172. .dev_addr = sg_dma_address(s),
  1173. .size = sg_dma_len(s),
  1174. .direction = dir,
  1175. .sg_call_ents = nelems,
  1176. };
  1177. if (mapped_ents && i >= mapped_ents)
  1178. break;
  1179. if (!i)
  1180. mapped_ents = get_nr_mapped_entries(dev, &ref);
  1181. check_unmap(&ref);
  1182. }
  1183. }
  1184. EXPORT_SYMBOL(debug_dma_unmap_sg);
  1185. void debug_dma_alloc_coherent(struct device *dev, size_t size,
  1186. dma_addr_t dma_addr, void *virt)
  1187. {
  1188. struct dma_debug_entry *entry;
  1189. if (unlikely(dma_debug_disabled()))
  1190. return;
  1191. if (unlikely(virt == NULL))
  1192. return;
  1193. entry = dma_entry_alloc();
  1194. if (!entry)
  1195. return;
  1196. entry->type = dma_debug_coherent;
  1197. entry->dev = dev;
  1198. entry->pfn = page_to_pfn(virt_to_page(virt));
  1199. entry->offset = (size_t) virt & PAGE_MASK;
  1200. entry->size = size;
  1201. entry->dev_addr = dma_addr;
  1202. entry->direction = DMA_BIDIRECTIONAL;
  1203. add_dma_entry(entry);
  1204. }
  1205. EXPORT_SYMBOL(debug_dma_alloc_coherent);
  1206. void debug_dma_free_coherent(struct device *dev, size_t size,
  1207. void *virt, dma_addr_t addr)
  1208. {
  1209. struct dma_debug_entry ref = {
  1210. .type = dma_debug_coherent,
  1211. .dev = dev,
  1212. .pfn = page_to_pfn(virt_to_page(virt)),
  1213. .offset = (size_t) virt & PAGE_MASK,
  1214. .dev_addr = addr,
  1215. .size = size,
  1216. .direction = DMA_BIDIRECTIONAL,
  1217. };
  1218. if (unlikely(dma_debug_disabled()))
  1219. return;
  1220. check_unmap(&ref);
  1221. }
  1222. EXPORT_SYMBOL(debug_dma_free_coherent);
  1223. void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  1224. size_t size, int direction)
  1225. {
  1226. struct dma_debug_entry ref;
  1227. if (unlikely(dma_debug_disabled()))
  1228. return;
  1229. ref.type = dma_debug_single;
  1230. ref.dev = dev;
  1231. ref.dev_addr = dma_handle;
  1232. ref.size = size;
  1233. ref.direction = direction;
  1234. ref.sg_call_ents = 0;
  1235. check_sync(dev, &ref, true);
  1236. }
  1237. EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
  1238. void debug_dma_sync_single_for_device(struct device *dev,
  1239. dma_addr_t dma_handle, size_t size,
  1240. int direction)
  1241. {
  1242. struct dma_debug_entry ref;
  1243. if (unlikely(dma_debug_disabled()))
  1244. return;
  1245. ref.type = dma_debug_single;
  1246. ref.dev = dev;
  1247. ref.dev_addr = dma_handle;
  1248. ref.size = size;
  1249. ref.direction = direction;
  1250. ref.sg_call_ents = 0;
  1251. check_sync(dev, &ref, false);
  1252. }
  1253. EXPORT_SYMBOL(debug_dma_sync_single_for_device);
  1254. void debug_dma_sync_single_range_for_cpu(struct device *dev,
  1255. dma_addr_t dma_handle,
  1256. unsigned long offset, size_t size,
  1257. int direction)
  1258. {
  1259. struct dma_debug_entry ref;
  1260. if (unlikely(dma_debug_disabled()))
  1261. return;
  1262. ref.type = dma_debug_single;
  1263. ref.dev = dev;
  1264. ref.dev_addr = dma_handle;
  1265. ref.size = offset + size;
  1266. ref.direction = direction;
  1267. ref.sg_call_ents = 0;
  1268. check_sync(dev, &ref, true);
  1269. }
  1270. EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
  1271. void debug_dma_sync_single_range_for_device(struct device *dev,
  1272. dma_addr_t dma_handle,
  1273. unsigned long offset,
  1274. size_t size, int direction)
  1275. {
  1276. struct dma_debug_entry ref;
  1277. if (unlikely(dma_debug_disabled()))
  1278. return;
  1279. ref.type = dma_debug_single;
  1280. ref.dev = dev;
  1281. ref.dev_addr = dma_handle;
  1282. ref.size = offset + size;
  1283. ref.direction = direction;
  1284. ref.sg_call_ents = 0;
  1285. check_sync(dev, &ref, false);
  1286. }
  1287. EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
  1288. void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  1289. int nelems, int direction)
  1290. {
  1291. struct scatterlist *s;
  1292. int mapped_ents = 0, i;
  1293. if (unlikely(dma_debug_disabled()))
  1294. return;
  1295. for_each_sg(sg, s, nelems, i) {
  1296. struct dma_debug_entry ref = {
  1297. .type = dma_debug_sg,
  1298. .dev = dev,
  1299. .pfn = page_to_pfn(sg_page(s)),
  1300. .offset = s->offset,
  1301. .dev_addr = sg_dma_address(s),
  1302. .size = sg_dma_len(s),
  1303. .direction = direction,
  1304. .sg_call_ents = nelems,
  1305. };
  1306. if (!i)
  1307. mapped_ents = get_nr_mapped_entries(dev, &ref);
  1308. if (i >= mapped_ents)
  1309. break;
  1310. check_sync(dev, &ref, true);
  1311. }
  1312. }
  1313. EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
  1314. void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  1315. int nelems, int direction)
  1316. {
  1317. struct scatterlist *s;
  1318. int mapped_ents = 0, i;
  1319. if (unlikely(dma_debug_disabled()))
  1320. return;
  1321. for_each_sg(sg, s, nelems, i) {
  1322. struct dma_debug_entry ref = {
  1323. .type = dma_debug_sg,
  1324. .dev = dev,
  1325. .pfn = page_to_pfn(sg_page(s)),
  1326. .offset = s->offset,
  1327. .dev_addr = sg_dma_address(s),
  1328. .size = sg_dma_len(s),
  1329. .direction = direction,
  1330. .sg_call_ents = nelems,
  1331. };
  1332. if (!i)
  1333. mapped_ents = get_nr_mapped_entries(dev, &ref);
  1334. if (i >= mapped_ents)
  1335. break;
  1336. check_sync(dev, &ref, false);
  1337. }
  1338. }
  1339. EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
  1340. static int __init dma_debug_driver_setup(char *str)
  1341. {
  1342. int i;
  1343. for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
  1344. current_driver_name[i] = *str;
  1345. if (*str == 0)
  1346. break;
  1347. }
  1348. if (current_driver_name[0])
  1349. pr_info("DMA-API: enable driver filter for driver [%s]\n",
  1350. current_driver_name);
  1351. return 1;
  1352. }
  1353. __setup("dma_debug_driver=", dma_debug_driver_setup);