dma-debug.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596
  1. /*
  2. * Copyright (C) 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Author: Joerg Roedel <joerg.roedel@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/scatterlist.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/stacktrace.h>
  22. #include <linux/dma-debug.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/export.h>
  27. #include <linux/device.h>
  28. #include <linux/types.h>
  29. #include <linux/sched.h>
  30. #include <linux/ctype.h>
  31. #include <linux/list.h>
  32. #include <linux/slab.h>
  33. #include <asm/sections.h>
  34. #define HASH_SIZE 1024ULL
  35. #define HASH_FN_SHIFT 13
  36. #define HASH_FN_MASK (HASH_SIZE - 1)
  37. enum {
  38. dma_debug_single,
  39. dma_debug_page,
  40. dma_debug_sg,
  41. dma_debug_coherent,
  42. };
  43. enum map_err_types {
  44. MAP_ERR_CHECK_NOT_APPLICABLE,
  45. MAP_ERR_NOT_CHECKED,
  46. MAP_ERR_CHECKED,
  47. };
  48. #define DMA_DEBUG_STACKTRACE_ENTRIES 5
  49. /**
  50. * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
  51. * @list: node on pre-allocated free_entries list
  52. * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
  53. * @type: single, page, sg, coherent
  54. * @pfn: page frame of the start address
  55. * @offset: offset of mapping relative to pfn
  56. * @size: length of the mapping
  57. * @direction: enum dma_data_direction
  58. * @sg_call_ents: 'nents' from dma_map_sg
  59. * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
  60. * @map_err_type: track whether dma_mapping_error() was checked
  61. * @stacktrace: support backtraces when a violation is detected
  62. */
  63. struct dma_debug_entry {
  64. struct list_head list;
  65. struct device *dev;
  66. int type;
  67. unsigned long pfn;
  68. size_t offset;
  69. u64 dev_addr;
  70. u64 size;
  71. int direction;
  72. int sg_call_ents;
  73. int sg_mapped_ents;
  74. enum map_err_types map_err_type;
  75. #ifdef CONFIG_STACKTRACE
  76. struct stack_trace stacktrace;
  77. unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
  78. #endif
  79. };
  80. typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
  81. struct hash_bucket {
  82. struct list_head list;
  83. spinlock_t lock;
  84. } ____cacheline_aligned_in_smp;
  85. /* Hash list to save the allocated dma addresses */
  86. static struct hash_bucket dma_entry_hash[HASH_SIZE];
  87. /* List of pre-allocated dma_debug_entry's */
  88. static LIST_HEAD(free_entries);
  89. /* Lock for the list above */
  90. static DEFINE_SPINLOCK(free_entries_lock);
  91. /* Global disable flag - will be set in case of an error */
  92. static u32 global_disable __read_mostly;
  93. /* Global error count */
  94. static u32 error_count;
  95. /* Global error show enable*/
  96. static u32 show_all_errors __read_mostly;
  97. /* Number of errors to show */
  98. static u32 show_num_errors = 1;
  99. static u32 num_free_entries;
  100. static u32 min_free_entries;
  101. static u32 nr_total_entries;
  102. /* number of preallocated entries requested by kernel cmdline */
  103. static u32 req_entries;
  104. /* debugfs dentry's for the stuff above */
  105. static struct dentry *dma_debug_dent __read_mostly;
  106. static struct dentry *global_disable_dent __read_mostly;
  107. static struct dentry *error_count_dent __read_mostly;
  108. static struct dentry *show_all_errors_dent __read_mostly;
  109. static struct dentry *show_num_errors_dent __read_mostly;
  110. static struct dentry *num_free_entries_dent __read_mostly;
  111. static struct dentry *min_free_entries_dent __read_mostly;
  112. static struct dentry *filter_dent __read_mostly;
  113. /* per-driver filter related state */
  114. #define NAME_MAX_LEN 64
  115. static char current_driver_name[NAME_MAX_LEN] __read_mostly;
  116. static struct device_driver *current_driver __read_mostly;
  117. static DEFINE_RWLOCK(driver_name_lock);
  118. static const char *const maperr2str[] = {
  119. [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
  120. [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
  121. [MAP_ERR_CHECKED] = "dma map error checked",
  122. };
  123. static const char *type2name[4] = { "single", "page",
  124. "scather-gather", "coherent" };
  125. static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
  126. "DMA_FROM_DEVICE", "DMA_NONE" };
  127. /*
  128. * The access to some variables in this macro is racy. We can't use atomic_t
  129. * here because all these variables are exported to debugfs. Some of them even
  130. * writeable. This is also the reason why a lock won't help much. But anyway,
  131. * the races are no big deal. Here is why:
  132. *
  133. * error_count: the addition is racy, but the worst thing that can happen is
  134. * that we don't count some errors
  135. * show_num_errors: the subtraction is racy. Also no big deal because in
  136. * worst case this will result in one warning more in the
  137. * system log than the user configured. This variable is
  138. * writeable via debugfs.
  139. */
  140. static inline void dump_entry_trace(struct dma_debug_entry *entry)
  141. {
  142. #ifdef CONFIG_STACKTRACE
  143. if (entry) {
  144. pr_warning("Mapped at:\n");
  145. print_stack_trace(&entry->stacktrace, 0);
  146. }
  147. #endif
  148. }
  149. static bool driver_filter(struct device *dev)
  150. {
  151. struct device_driver *drv;
  152. unsigned long flags;
  153. bool ret;
  154. /* driver filter off */
  155. if (likely(!current_driver_name[0]))
  156. return true;
  157. /* driver filter on and initialized */
  158. if (current_driver && dev && dev->driver == current_driver)
  159. return true;
  160. /* driver filter on, but we can't filter on a NULL device... */
  161. if (!dev)
  162. return false;
  163. if (current_driver || !current_driver_name[0])
  164. return false;
  165. /* driver filter on but not yet initialized */
  166. drv = dev->driver;
  167. if (!drv)
  168. return false;
  169. /* lock to protect against change of current_driver_name */
  170. read_lock_irqsave(&driver_name_lock, flags);
  171. ret = false;
  172. if (drv->name &&
  173. strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
  174. current_driver = drv;
  175. ret = true;
  176. }
  177. read_unlock_irqrestore(&driver_name_lock, flags);
  178. return ret;
  179. }
  180. #define err_printk(dev, entry, format, arg...) do { \
  181. error_count += 1; \
  182. if (driver_filter(dev) && \
  183. (show_all_errors || show_num_errors > 0)) { \
  184. WARN(1, "%s %s: " format, \
  185. dev ? dev_driver_string(dev) : "NULL", \
  186. dev ? dev_name(dev) : "NULL", ## arg); \
  187. dump_entry_trace(entry); \
  188. } \
  189. if (!show_all_errors && show_num_errors > 0) \
  190. show_num_errors -= 1; \
  191. } while (0);
  192. /*
  193. * Hash related functions
  194. *
  195. * Every DMA-API request is saved into a struct dma_debug_entry. To
  196. * have quick access to these structs they are stored into a hash.
  197. */
  198. static int hash_fn(struct dma_debug_entry *entry)
  199. {
  200. /*
  201. * Hash function is based on the dma address.
  202. * We use bits 20-27 here as the index into the hash
  203. */
  204. return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
  205. }
  206. /*
  207. * Request exclusive access to a hash bucket for a given dma_debug_entry.
  208. */
  209. static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
  210. unsigned long *flags)
  211. {
  212. int idx = hash_fn(entry);
  213. unsigned long __flags;
  214. spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
  215. *flags = __flags;
  216. return &dma_entry_hash[idx];
  217. }
  218. /*
  219. * Give up exclusive access to the hash bucket
  220. */
  221. static void put_hash_bucket(struct hash_bucket *bucket,
  222. unsigned long *flags)
  223. {
  224. unsigned long __flags = *flags;
  225. spin_unlock_irqrestore(&bucket->lock, __flags);
  226. }
  227. static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
  228. {
  229. return ((a->dev_addr == b->dev_addr) &&
  230. (a->dev == b->dev)) ? true : false;
  231. }
  232. static bool containing_match(struct dma_debug_entry *a,
  233. struct dma_debug_entry *b)
  234. {
  235. if (a->dev != b->dev)
  236. return false;
  237. if ((b->dev_addr <= a->dev_addr) &&
  238. ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
  239. return true;
  240. return false;
  241. }
  242. /*
  243. * Search a given entry in the hash bucket list
  244. */
  245. static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
  246. struct dma_debug_entry *ref,
  247. match_fn match)
  248. {
  249. struct dma_debug_entry *entry, *ret = NULL;
  250. int matches = 0, match_lvl, last_lvl = -1;
  251. list_for_each_entry(entry, &bucket->list, list) {
  252. if (!match(ref, entry))
  253. continue;
  254. /*
  255. * Some drivers map the same physical address multiple
  256. * times. Without a hardware IOMMU this results in the
  257. * same device addresses being put into the dma-debug
  258. * hash multiple times too. This can result in false
  259. * positives being reported. Therefore we implement a
  260. * best-fit algorithm here which returns the entry from
  261. * the hash which fits best to the reference value
  262. * instead of the first-fit.
  263. */
  264. matches += 1;
  265. match_lvl = 0;
  266. entry->size == ref->size ? ++match_lvl : 0;
  267. entry->type == ref->type ? ++match_lvl : 0;
  268. entry->direction == ref->direction ? ++match_lvl : 0;
  269. entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
  270. if (match_lvl == 4) {
  271. /* perfect-fit - return the result */
  272. return entry;
  273. } else if (match_lvl > last_lvl) {
  274. /*
  275. * We found an entry that fits better then the
  276. * previous one or it is the 1st match.
  277. */
  278. last_lvl = match_lvl;
  279. ret = entry;
  280. }
  281. }
  282. /*
  283. * If we have multiple matches but no perfect-fit, just return
  284. * NULL.
  285. */
  286. ret = (matches == 1) ? ret : NULL;
  287. return ret;
  288. }
  289. static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
  290. struct dma_debug_entry *ref)
  291. {
  292. return __hash_bucket_find(bucket, ref, exact_match);
  293. }
  294. static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
  295. struct dma_debug_entry *ref,
  296. unsigned long *flags)
  297. {
  298. unsigned int max_range = dma_get_max_seg_size(ref->dev);
  299. struct dma_debug_entry *entry, index = *ref;
  300. unsigned int range = 0;
  301. while (range <= max_range) {
  302. entry = __hash_bucket_find(*bucket, &index, containing_match);
  303. if (entry)
  304. return entry;
  305. /*
  306. * Nothing found, go back a hash bucket
  307. */
  308. put_hash_bucket(*bucket, flags);
  309. range += (1 << HASH_FN_SHIFT);
  310. index.dev_addr -= (1 << HASH_FN_SHIFT);
  311. *bucket = get_hash_bucket(&index, flags);
  312. }
  313. return NULL;
  314. }
  315. /*
  316. * Add an entry to a hash bucket
  317. */
  318. static void hash_bucket_add(struct hash_bucket *bucket,
  319. struct dma_debug_entry *entry)
  320. {
  321. list_add_tail(&entry->list, &bucket->list);
  322. }
  323. /*
  324. * Remove entry from a hash bucket list
  325. */
  326. static void hash_bucket_del(struct dma_debug_entry *entry)
  327. {
  328. list_del(&entry->list);
  329. }
  330. static unsigned long long phys_addr(struct dma_debug_entry *entry)
  331. {
  332. return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
  333. }
  334. /*
  335. * Dump mapping entries for debugging purposes
  336. */
  337. void debug_dma_dump_mappings(struct device *dev)
  338. {
  339. int idx;
  340. for (idx = 0; idx < HASH_SIZE; idx++) {
  341. struct hash_bucket *bucket = &dma_entry_hash[idx];
  342. struct dma_debug_entry *entry;
  343. unsigned long flags;
  344. spin_lock_irqsave(&bucket->lock, flags);
  345. list_for_each_entry(entry, &bucket->list, list) {
  346. if (!dev || dev == entry->dev) {
  347. dev_info(entry->dev,
  348. "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
  349. type2name[entry->type], idx,
  350. phys_addr(entry), entry->pfn,
  351. entry->dev_addr, entry->size,
  352. dir2name[entry->direction],
  353. maperr2str[entry->map_err_type]);
  354. }
  355. }
  356. spin_unlock_irqrestore(&bucket->lock, flags);
  357. }
  358. }
  359. EXPORT_SYMBOL(debug_dma_dump_mappings);
  360. /*
  361. * For each page mapped (initial page in the case of
  362. * dma_alloc_coherent/dma_map_{single|page}, or each page in a
  363. * scatterlist) insert into this tree using the pfn as the key. At
  364. * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
  365. * the pfn already exists at insertion time add a tag as a reference
  366. * count for the overlapping mappings. For now, the overlap tracking
  367. * just ensures that 'unmaps' balance 'maps' before marking the pfn
  368. * idle, but we should also be flagging overlaps as an API violation.
  369. *
  370. * Memory usage is mostly constrained by the maximum number of available
  371. * dma-debug entries in that we need a free dma_debug_entry before
  372. * inserting into the tree. In the case of dma_map_{single|page} and
  373. * dma_alloc_coherent there is only one dma_debug_entry and one pfn to
  374. * track per event. dma_map_sg(), on the other hand,
  375. * consumes a single dma_debug_entry, but inserts 'nents' entries into
  376. * the tree.
  377. *
  378. * At any time debug_dma_assert_idle() can be called to trigger a
  379. * warning if the given page is in the active set.
  380. */
  381. static RADIX_TREE(dma_active_pfn, GFP_NOWAIT);
  382. static DEFINE_SPINLOCK(radix_lock);
  383. #define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
  384. static int active_pfn_read_overlap(unsigned long pfn)
  385. {
  386. int overlap = 0, i;
  387. for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
  388. if (radix_tree_tag_get(&dma_active_pfn, pfn, i))
  389. overlap |= 1 << i;
  390. return overlap;
  391. }
  392. static int active_pfn_set_overlap(unsigned long pfn, int overlap)
  393. {
  394. int i;
  395. if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0)
  396. return overlap;
  397. for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
  398. if (overlap & 1 << i)
  399. radix_tree_tag_set(&dma_active_pfn, pfn, i);
  400. else
  401. radix_tree_tag_clear(&dma_active_pfn, pfn, i);
  402. return overlap;
  403. }
  404. static void active_pfn_inc_overlap(unsigned long pfn)
  405. {
  406. int overlap = active_pfn_read_overlap(pfn);
  407. overlap = active_pfn_set_overlap(pfn, ++overlap);
  408. /* If we overflowed the overlap counter then we're potentially
  409. * leaking dma-mappings. Otherwise, if maps and unmaps are
  410. * balanced then this overflow may cause false negatives in
  411. * debug_dma_assert_idle() as the pfn may be marked idle
  412. * prematurely.
  413. */
  414. WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP,
  415. "DMA-API: exceeded %d overlapping mappings of pfn %lx\n",
  416. ACTIVE_PFN_MAX_OVERLAP, pfn);
  417. }
  418. static int active_pfn_dec_overlap(unsigned long pfn)
  419. {
  420. int overlap = active_pfn_read_overlap(pfn);
  421. return active_pfn_set_overlap(pfn, --overlap);
  422. }
  423. static int active_pfn_insert(struct dma_debug_entry *entry)
  424. {
  425. unsigned long flags;
  426. int rc;
  427. spin_lock_irqsave(&radix_lock, flags);
  428. rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry);
  429. if (rc == -EEXIST)
  430. active_pfn_inc_overlap(entry->pfn);
  431. spin_unlock_irqrestore(&radix_lock, flags);
  432. return rc;
  433. }
  434. static void active_pfn_remove(struct dma_debug_entry *entry)
  435. {
  436. unsigned long flags;
  437. spin_lock_irqsave(&radix_lock, flags);
  438. /* since we are counting overlaps the final put of the
  439. * entry->pfn will occur when the overlap count is 0.
  440. * active_pfn_dec_overlap() returns -1 in that case
  441. */
  442. if (active_pfn_dec_overlap(entry->pfn) < 0)
  443. radix_tree_delete(&dma_active_pfn, entry->pfn);
  444. spin_unlock_irqrestore(&radix_lock, flags);
  445. }
  446. /**
  447. * debug_dma_assert_idle() - assert that a page is not undergoing dma
  448. * @page: page to lookup in the dma_active_pfn tree
  449. *
  450. * Place a call to this routine in cases where the cpu touching the page
  451. * before the dma completes (page is dma_unmapped) will lead to data
  452. * corruption.
  453. */
  454. void debug_dma_assert_idle(struct page *page)
  455. {
  456. unsigned long flags;
  457. struct dma_debug_entry *entry;
  458. if (!page)
  459. return;
  460. spin_lock_irqsave(&radix_lock, flags);
  461. entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page));
  462. spin_unlock_irqrestore(&radix_lock, flags);
  463. if (!entry)
  464. return;
  465. err_printk(entry->dev, entry,
  466. "DMA-API: cpu touching an active dma mapped page "
  467. "[pfn=0x%lx]\n", entry->pfn);
  468. }
  469. /*
  470. * Wrapper function for adding an entry to the hash.
  471. * This function takes care of locking itself.
  472. */
  473. static void add_dma_entry(struct dma_debug_entry *entry)
  474. {
  475. struct hash_bucket *bucket;
  476. unsigned long flags;
  477. int rc;
  478. bucket = get_hash_bucket(entry, &flags);
  479. hash_bucket_add(bucket, entry);
  480. put_hash_bucket(bucket, &flags);
  481. rc = active_pfn_insert(entry);
  482. if (rc == -ENOMEM) {
  483. pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n");
  484. global_disable = true;
  485. }
  486. /* TODO: report -EEXIST errors here as overlapping mappings are
  487. * not supported by the DMA API
  488. */
  489. }
  490. static struct dma_debug_entry *__dma_entry_alloc(void)
  491. {
  492. struct dma_debug_entry *entry;
  493. entry = list_entry(free_entries.next, struct dma_debug_entry, list);
  494. list_del(&entry->list);
  495. memset(entry, 0, sizeof(*entry));
  496. num_free_entries -= 1;
  497. if (num_free_entries < min_free_entries)
  498. min_free_entries = num_free_entries;
  499. return entry;
  500. }
  501. /* struct dma_entry allocator
  502. *
  503. * The next two functions implement the allocator for
  504. * struct dma_debug_entries.
  505. */
  506. static struct dma_debug_entry *dma_entry_alloc(void)
  507. {
  508. struct dma_debug_entry *entry;
  509. unsigned long flags;
  510. spin_lock_irqsave(&free_entries_lock, flags);
  511. if (list_empty(&free_entries)) {
  512. pr_err("DMA-API: debugging out of memory - disabling\n");
  513. global_disable = true;
  514. spin_unlock_irqrestore(&free_entries_lock, flags);
  515. return NULL;
  516. }
  517. entry = __dma_entry_alloc();
  518. spin_unlock_irqrestore(&free_entries_lock, flags);
  519. #ifdef CONFIG_STACKTRACE
  520. entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
  521. entry->stacktrace.entries = entry->st_entries;
  522. entry->stacktrace.skip = 2;
  523. save_stack_trace(&entry->stacktrace);
  524. #endif
  525. return entry;
  526. }
  527. static void dma_entry_free(struct dma_debug_entry *entry)
  528. {
  529. unsigned long flags;
  530. active_pfn_remove(entry);
  531. /*
  532. * add to beginning of the list - this way the entries are
  533. * more likely cache hot when they are reallocated.
  534. */
  535. spin_lock_irqsave(&free_entries_lock, flags);
  536. list_add(&entry->list, &free_entries);
  537. num_free_entries += 1;
  538. spin_unlock_irqrestore(&free_entries_lock, flags);
  539. }
  540. int dma_debug_resize_entries(u32 num_entries)
  541. {
  542. int i, delta, ret = 0;
  543. unsigned long flags;
  544. struct dma_debug_entry *entry;
  545. LIST_HEAD(tmp);
  546. spin_lock_irqsave(&free_entries_lock, flags);
  547. if (nr_total_entries < num_entries) {
  548. delta = num_entries - nr_total_entries;
  549. spin_unlock_irqrestore(&free_entries_lock, flags);
  550. for (i = 0; i < delta; i++) {
  551. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  552. if (!entry)
  553. break;
  554. list_add_tail(&entry->list, &tmp);
  555. }
  556. spin_lock_irqsave(&free_entries_lock, flags);
  557. list_splice(&tmp, &free_entries);
  558. nr_total_entries += i;
  559. num_free_entries += i;
  560. } else {
  561. delta = nr_total_entries - num_entries;
  562. for (i = 0; i < delta && !list_empty(&free_entries); i++) {
  563. entry = __dma_entry_alloc();
  564. kfree(entry);
  565. }
  566. nr_total_entries -= i;
  567. }
  568. if (nr_total_entries != num_entries)
  569. ret = 1;
  570. spin_unlock_irqrestore(&free_entries_lock, flags);
  571. return ret;
  572. }
  573. EXPORT_SYMBOL(dma_debug_resize_entries);
  574. /*
  575. * DMA-API debugging init code
  576. *
  577. * The init code does two things:
  578. * 1. Initialize core data structures
  579. * 2. Preallocate a given number of dma_debug_entry structs
  580. */
  581. static int prealloc_memory(u32 num_entries)
  582. {
  583. struct dma_debug_entry *entry, *next_entry;
  584. int i;
  585. for (i = 0; i < num_entries; ++i) {
  586. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  587. if (!entry)
  588. goto out_err;
  589. list_add_tail(&entry->list, &free_entries);
  590. }
  591. num_free_entries = num_entries;
  592. min_free_entries = num_entries;
  593. pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
  594. return 0;
  595. out_err:
  596. list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
  597. list_del(&entry->list);
  598. kfree(entry);
  599. }
  600. return -ENOMEM;
  601. }
  602. static ssize_t filter_read(struct file *file, char __user *user_buf,
  603. size_t count, loff_t *ppos)
  604. {
  605. char buf[NAME_MAX_LEN + 1];
  606. unsigned long flags;
  607. int len;
  608. if (!current_driver_name[0])
  609. return 0;
  610. /*
  611. * We can't copy to userspace directly because current_driver_name can
  612. * only be read under the driver_name_lock with irqs disabled. So
  613. * create a temporary copy first.
  614. */
  615. read_lock_irqsave(&driver_name_lock, flags);
  616. len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
  617. read_unlock_irqrestore(&driver_name_lock, flags);
  618. return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  619. }
  620. static ssize_t filter_write(struct file *file, const char __user *userbuf,
  621. size_t count, loff_t *ppos)
  622. {
  623. char buf[NAME_MAX_LEN];
  624. unsigned long flags;
  625. size_t len;
  626. int i;
  627. /*
  628. * We can't copy from userspace directly. Access to
  629. * current_driver_name is protected with a write_lock with irqs
  630. * disabled. Since copy_from_user can fault and may sleep we
  631. * need to copy to temporary buffer first
  632. */
  633. len = min(count, (size_t)(NAME_MAX_LEN - 1));
  634. if (copy_from_user(buf, userbuf, len))
  635. return -EFAULT;
  636. buf[len] = 0;
  637. write_lock_irqsave(&driver_name_lock, flags);
  638. /*
  639. * Now handle the string we got from userspace very carefully.
  640. * The rules are:
  641. * - only use the first token we got
  642. * - token delimiter is everything looking like a space
  643. * character (' ', '\n', '\t' ...)
  644. *
  645. */
  646. if (!isalnum(buf[0])) {
  647. /*
  648. * If the first character userspace gave us is not
  649. * alphanumerical then assume the filter should be
  650. * switched off.
  651. */
  652. if (current_driver_name[0])
  653. pr_info("DMA-API: switching off dma-debug driver filter\n");
  654. current_driver_name[0] = 0;
  655. current_driver = NULL;
  656. goto out_unlock;
  657. }
  658. /*
  659. * Now parse out the first token and use it as the name for the
  660. * driver to filter for.
  661. */
  662. for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
  663. current_driver_name[i] = buf[i];
  664. if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
  665. break;
  666. }
  667. current_driver_name[i] = 0;
  668. current_driver = NULL;
  669. pr_info("DMA-API: enable driver filter for driver [%s]\n",
  670. current_driver_name);
  671. out_unlock:
  672. write_unlock_irqrestore(&driver_name_lock, flags);
  673. return count;
  674. }
  675. static const struct file_operations filter_fops = {
  676. .read = filter_read,
  677. .write = filter_write,
  678. .llseek = default_llseek,
  679. };
  680. static int dma_debug_fs_init(void)
  681. {
  682. dma_debug_dent = debugfs_create_dir("dma-api", NULL);
  683. if (!dma_debug_dent) {
  684. pr_err("DMA-API: can not create debugfs directory\n");
  685. return -ENOMEM;
  686. }
  687. global_disable_dent = debugfs_create_bool("disabled", 0444,
  688. dma_debug_dent,
  689. &global_disable);
  690. if (!global_disable_dent)
  691. goto out_err;
  692. error_count_dent = debugfs_create_u32("error_count", 0444,
  693. dma_debug_dent, &error_count);
  694. if (!error_count_dent)
  695. goto out_err;
  696. show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
  697. dma_debug_dent,
  698. &show_all_errors);
  699. if (!show_all_errors_dent)
  700. goto out_err;
  701. show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
  702. dma_debug_dent,
  703. &show_num_errors);
  704. if (!show_num_errors_dent)
  705. goto out_err;
  706. num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
  707. dma_debug_dent,
  708. &num_free_entries);
  709. if (!num_free_entries_dent)
  710. goto out_err;
  711. min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
  712. dma_debug_dent,
  713. &min_free_entries);
  714. if (!min_free_entries_dent)
  715. goto out_err;
  716. filter_dent = debugfs_create_file("driver_filter", 0644,
  717. dma_debug_dent, NULL, &filter_fops);
  718. if (!filter_dent)
  719. goto out_err;
  720. return 0;
  721. out_err:
  722. debugfs_remove_recursive(dma_debug_dent);
  723. return -ENOMEM;
  724. }
  725. static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
  726. {
  727. struct dma_debug_entry *entry;
  728. unsigned long flags;
  729. int count = 0, i;
  730. local_irq_save(flags);
  731. for (i = 0; i < HASH_SIZE; ++i) {
  732. spin_lock(&dma_entry_hash[i].lock);
  733. list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
  734. if (entry->dev == dev) {
  735. count += 1;
  736. *out_entry = entry;
  737. }
  738. }
  739. spin_unlock(&dma_entry_hash[i].lock);
  740. }
  741. local_irq_restore(flags);
  742. return count;
  743. }
  744. static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
  745. {
  746. struct device *dev = data;
  747. struct dma_debug_entry *uninitialized_var(entry);
  748. int count;
  749. if (global_disable)
  750. return 0;
  751. switch (action) {
  752. case BUS_NOTIFY_UNBOUND_DRIVER:
  753. count = device_dma_allocations(dev, &entry);
  754. if (count == 0)
  755. break;
  756. err_printk(dev, entry, "DMA-API: device driver has pending "
  757. "DMA allocations while released from device "
  758. "[count=%d]\n"
  759. "One of leaked entries details: "
  760. "[device address=0x%016llx] [size=%llu bytes] "
  761. "[mapped with %s] [mapped as %s]\n",
  762. count, entry->dev_addr, entry->size,
  763. dir2name[entry->direction], type2name[entry->type]);
  764. break;
  765. default:
  766. break;
  767. }
  768. return 0;
  769. }
  770. void dma_debug_add_bus(struct bus_type *bus)
  771. {
  772. struct notifier_block *nb;
  773. if (global_disable)
  774. return;
  775. nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
  776. if (nb == NULL) {
  777. pr_err("dma_debug_add_bus: out of memory\n");
  778. return;
  779. }
  780. nb->notifier_call = dma_debug_device_change;
  781. bus_register_notifier(bus, nb);
  782. }
  783. /*
  784. * Let the architectures decide how many entries should be preallocated.
  785. */
  786. void dma_debug_init(u32 num_entries)
  787. {
  788. int i;
  789. if (global_disable)
  790. return;
  791. for (i = 0; i < HASH_SIZE; ++i) {
  792. INIT_LIST_HEAD(&dma_entry_hash[i].list);
  793. spin_lock_init(&dma_entry_hash[i].lock);
  794. }
  795. if (dma_debug_fs_init() != 0) {
  796. pr_err("DMA-API: error creating debugfs entries - disabling\n");
  797. global_disable = true;
  798. return;
  799. }
  800. if (req_entries)
  801. num_entries = req_entries;
  802. if (prealloc_memory(num_entries) != 0) {
  803. pr_err("DMA-API: debugging out of memory error - disabled\n");
  804. global_disable = true;
  805. return;
  806. }
  807. nr_total_entries = num_free_entries;
  808. pr_info("DMA-API: debugging enabled by kernel config\n");
  809. }
  810. static __init int dma_debug_cmdline(char *str)
  811. {
  812. if (!str)
  813. return -EINVAL;
  814. if (strncmp(str, "off", 3) == 0) {
  815. pr_info("DMA-API: debugging disabled on kernel command line\n");
  816. global_disable = true;
  817. }
  818. return 0;
  819. }
  820. static __init int dma_debug_entries_cmdline(char *str)
  821. {
  822. int res;
  823. if (!str)
  824. return -EINVAL;
  825. res = get_option(&str, &req_entries);
  826. if (!res)
  827. req_entries = 0;
  828. return 0;
  829. }
  830. __setup("dma_debug=", dma_debug_cmdline);
  831. __setup("dma_debug_entries=", dma_debug_entries_cmdline);
  832. static void check_unmap(struct dma_debug_entry *ref)
  833. {
  834. struct dma_debug_entry *entry;
  835. struct hash_bucket *bucket;
  836. unsigned long flags;
  837. bucket = get_hash_bucket(ref, &flags);
  838. entry = bucket_find_exact(bucket, ref);
  839. if (!entry) {
  840. /* must drop lock before calling dma_mapping_error */
  841. put_hash_bucket(bucket, &flags);
  842. if (dma_mapping_error(ref->dev, ref->dev_addr)) {
  843. err_printk(ref->dev, NULL,
  844. "DMA-API: device driver tries to free an "
  845. "invalid DMA memory address\n");
  846. } else {
  847. err_printk(ref->dev, NULL,
  848. "DMA-API: device driver tries to free DMA "
  849. "memory it has not allocated [device "
  850. "address=0x%016llx] [size=%llu bytes]\n",
  851. ref->dev_addr, ref->size);
  852. }
  853. return;
  854. }
  855. if (ref->size != entry->size) {
  856. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  857. "DMA memory with different size "
  858. "[device address=0x%016llx] [map size=%llu bytes] "
  859. "[unmap size=%llu bytes]\n",
  860. ref->dev_addr, entry->size, ref->size);
  861. }
  862. if (ref->type != entry->type) {
  863. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  864. "DMA memory with wrong function "
  865. "[device address=0x%016llx] [size=%llu bytes] "
  866. "[mapped as %s] [unmapped as %s]\n",
  867. ref->dev_addr, ref->size,
  868. type2name[entry->type], type2name[ref->type]);
  869. } else if ((entry->type == dma_debug_coherent) &&
  870. (phys_addr(ref) != phys_addr(entry))) {
  871. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  872. "DMA memory with different CPU address "
  873. "[device address=0x%016llx] [size=%llu bytes] "
  874. "[cpu alloc address=0x%016llx] "
  875. "[cpu free address=0x%016llx]",
  876. ref->dev_addr, ref->size,
  877. phys_addr(entry),
  878. phys_addr(ref));
  879. }
  880. if (ref->sg_call_ents && ref->type == dma_debug_sg &&
  881. ref->sg_call_ents != entry->sg_call_ents) {
  882. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  883. "DMA sg list with different entry count "
  884. "[map count=%d] [unmap count=%d]\n",
  885. entry->sg_call_ents, ref->sg_call_ents);
  886. }
  887. /*
  888. * This may be no bug in reality - but most implementations of the
  889. * DMA API don't handle this properly, so check for it here
  890. */
  891. if (ref->direction != entry->direction) {
  892. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  893. "DMA memory with different direction "
  894. "[device address=0x%016llx] [size=%llu bytes] "
  895. "[mapped with %s] [unmapped with %s]\n",
  896. ref->dev_addr, ref->size,
  897. dir2name[entry->direction],
  898. dir2name[ref->direction]);
  899. }
  900. if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
  901. err_printk(ref->dev, entry,
  902. "DMA-API: device driver failed to check map error"
  903. "[device address=0x%016llx] [size=%llu bytes] "
  904. "[mapped as %s]",
  905. ref->dev_addr, ref->size,
  906. type2name[entry->type]);
  907. }
  908. hash_bucket_del(entry);
  909. dma_entry_free(entry);
  910. put_hash_bucket(bucket, &flags);
  911. }
  912. static void check_for_stack(struct device *dev, void *addr)
  913. {
  914. if (object_is_on_stack(addr))
  915. err_printk(dev, NULL, "DMA-API: device driver maps memory from"
  916. "stack [addr=%p]\n", addr);
  917. }
  918. static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
  919. {
  920. unsigned long a1 = (unsigned long)addr;
  921. unsigned long b1 = a1 + len;
  922. unsigned long a2 = (unsigned long)start;
  923. unsigned long b2 = (unsigned long)end;
  924. return !(b1 <= a2 || a1 >= b2);
  925. }
  926. static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
  927. {
  928. if (overlap(addr, len, _text, _etext) ||
  929. overlap(addr, len, __start_rodata, __end_rodata))
  930. err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
  931. }
  932. static void check_sync(struct device *dev,
  933. struct dma_debug_entry *ref,
  934. bool to_cpu)
  935. {
  936. struct dma_debug_entry *entry;
  937. struct hash_bucket *bucket;
  938. unsigned long flags;
  939. bucket = get_hash_bucket(ref, &flags);
  940. entry = bucket_find_contain(&bucket, ref, &flags);
  941. if (!entry) {
  942. err_printk(dev, NULL, "DMA-API: device driver tries "
  943. "to sync DMA memory it has not allocated "
  944. "[device address=0x%016llx] [size=%llu bytes]\n",
  945. (unsigned long long)ref->dev_addr, ref->size);
  946. goto out;
  947. }
  948. if (ref->size > entry->size) {
  949. err_printk(dev, entry, "DMA-API: device driver syncs"
  950. " DMA memory outside allocated range "
  951. "[device address=0x%016llx] "
  952. "[allocation size=%llu bytes] "
  953. "[sync offset+size=%llu]\n",
  954. entry->dev_addr, entry->size,
  955. ref->size);
  956. }
  957. if (entry->direction == DMA_BIDIRECTIONAL)
  958. goto out;
  959. if (ref->direction != entry->direction) {
  960. err_printk(dev, entry, "DMA-API: device driver syncs "
  961. "DMA memory with different direction "
  962. "[device address=0x%016llx] [size=%llu bytes] "
  963. "[mapped with %s] [synced with %s]\n",
  964. (unsigned long long)ref->dev_addr, entry->size,
  965. dir2name[entry->direction],
  966. dir2name[ref->direction]);
  967. }
  968. if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
  969. !(ref->direction == DMA_TO_DEVICE))
  970. err_printk(dev, entry, "DMA-API: device driver syncs "
  971. "device read-only DMA memory for cpu "
  972. "[device address=0x%016llx] [size=%llu bytes] "
  973. "[mapped with %s] [synced with %s]\n",
  974. (unsigned long long)ref->dev_addr, entry->size,
  975. dir2name[entry->direction],
  976. dir2name[ref->direction]);
  977. if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
  978. !(ref->direction == DMA_FROM_DEVICE))
  979. err_printk(dev, entry, "DMA-API: device driver syncs "
  980. "device write-only DMA memory to device "
  981. "[device address=0x%016llx] [size=%llu bytes] "
  982. "[mapped with %s] [synced with %s]\n",
  983. (unsigned long long)ref->dev_addr, entry->size,
  984. dir2name[entry->direction],
  985. dir2name[ref->direction]);
  986. out:
  987. put_hash_bucket(bucket, &flags);
  988. }
  989. void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
  990. size_t size, int direction, dma_addr_t dma_addr,
  991. bool map_single)
  992. {
  993. struct dma_debug_entry *entry;
  994. if (unlikely(global_disable))
  995. return;
  996. if (dma_mapping_error(dev, dma_addr))
  997. return;
  998. entry = dma_entry_alloc();
  999. if (!entry)
  1000. return;
  1001. entry->dev = dev;
  1002. entry->type = dma_debug_page;
  1003. entry->pfn = page_to_pfn(page);
  1004. entry->offset = offset,
  1005. entry->dev_addr = dma_addr;
  1006. entry->size = size;
  1007. entry->direction = direction;
  1008. entry->map_err_type = MAP_ERR_NOT_CHECKED;
  1009. if (map_single)
  1010. entry->type = dma_debug_single;
  1011. if (!PageHighMem(page)) {
  1012. void *addr = page_address(page) + offset;
  1013. check_for_stack(dev, addr);
  1014. check_for_illegal_area(dev, addr, size);
  1015. }
  1016. add_dma_entry(entry);
  1017. }
  1018. EXPORT_SYMBOL(debug_dma_map_page);
  1019. void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  1020. {
  1021. struct dma_debug_entry ref;
  1022. struct dma_debug_entry *entry;
  1023. struct hash_bucket *bucket;
  1024. unsigned long flags;
  1025. if (unlikely(global_disable))
  1026. return;
  1027. ref.dev = dev;
  1028. ref.dev_addr = dma_addr;
  1029. bucket = get_hash_bucket(&ref, &flags);
  1030. list_for_each_entry(entry, &bucket->list, list) {
  1031. if (!exact_match(&ref, entry))
  1032. continue;
  1033. /*
  1034. * The same physical address can be mapped multiple
  1035. * times. Without a hardware IOMMU this results in the
  1036. * same device addresses being put into the dma-debug
  1037. * hash multiple times too. This can result in false
  1038. * positives being reported. Therefore we implement a
  1039. * best-fit algorithm here which updates the first entry
  1040. * from the hash which fits the reference value and is
  1041. * not currently listed as being checked.
  1042. */
  1043. if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
  1044. entry->map_err_type = MAP_ERR_CHECKED;
  1045. break;
  1046. }
  1047. }
  1048. put_hash_bucket(bucket, &flags);
  1049. }
  1050. EXPORT_SYMBOL(debug_dma_mapping_error);
  1051. void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
  1052. size_t size, int direction, bool map_single)
  1053. {
  1054. struct dma_debug_entry ref = {
  1055. .type = dma_debug_page,
  1056. .dev = dev,
  1057. .dev_addr = addr,
  1058. .size = size,
  1059. .direction = direction,
  1060. };
  1061. if (unlikely(global_disable))
  1062. return;
  1063. if (map_single)
  1064. ref.type = dma_debug_single;
  1065. check_unmap(&ref);
  1066. }
  1067. EXPORT_SYMBOL(debug_dma_unmap_page);
  1068. void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
  1069. int nents, int mapped_ents, int direction)
  1070. {
  1071. struct dma_debug_entry *entry;
  1072. struct scatterlist *s;
  1073. int i;
  1074. if (unlikely(global_disable))
  1075. return;
  1076. for_each_sg(sg, s, mapped_ents, i) {
  1077. entry = dma_entry_alloc();
  1078. if (!entry)
  1079. return;
  1080. entry->type = dma_debug_sg;
  1081. entry->dev = dev;
  1082. entry->pfn = page_to_pfn(sg_page(s));
  1083. entry->offset = s->offset,
  1084. entry->size = sg_dma_len(s);
  1085. entry->dev_addr = sg_dma_address(s);
  1086. entry->direction = direction;
  1087. entry->sg_call_ents = nents;
  1088. entry->sg_mapped_ents = mapped_ents;
  1089. if (!PageHighMem(sg_page(s))) {
  1090. check_for_stack(dev, sg_virt(s));
  1091. check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
  1092. }
  1093. add_dma_entry(entry);
  1094. }
  1095. }
  1096. EXPORT_SYMBOL(debug_dma_map_sg);
  1097. static int get_nr_mapped_entries(struct device *dev,
  1098. struct dma_debug_entry *ref)
  1099. {
  1100. struct dma_debug_entry *entry;
  1101. struct hash_bucket *bucket;
  1102. unsigned long flags;
  1103. int mapped_ents;
  1104. bucket = get_hash_bucket(ref, &flags);
  1105. entry = bucket_find_exact(bucket, ref);
  1106. mapped_ents = 0;
  1107. if (entry)
  1108. mapped_ents = entry->sg_mapped_ents;
  1109. put_hash_bucket(bucket, &flags);
  1110. return mapped_ents;
  1111. }
  1112. void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
  1113. int nelems, int dir)
  1114. {
  1115. struct scatterlist *s;
  1116. int mapped_ents = 0, i;
  1117. if (unlikely(global_disable))
  1118. return;
  1119. for_each_sg(sglist, s, nelems, i) {
  1120. struct dma_debug_entry ref = {
  1121. .type = dma_debug_sg,
  1122. .dev = dev,
  1123. .pfn = page_to_pfn(sg_page(s)),
  1124. .offset = s->offset,
  1125. .dev_addr = sg_dma_address(s),
  1126. .size = sg_dma_len(s),
  1127. .direction = dir,
  1128. .sg_call_ents = nelems,
  1129. };
  1130. if (mapped_ents && i >= mapped_ents)
  1131. break;
  1132. if (!i)
  1133. mapped_ents = get_nr_mapped_entries(dev, &ref);
  1134. check_unmap(&ref);
  1135. }
  1136. }
  1137. EXPORT_SYMBOL(debug_dma_unmap_sg);
  1138. void debug_dma_alloc_coherent(struct device *dev, size_t size,
  1139. dma_addr_t dma_addr, void *virt)
  1140. {
  1141. struct dma_debug_entry *entry;
  1142. if (unlikely(global_disable))
  1143. return;
  1144. if (unlikely(virt == NULL))
  1145. return;
  1146. entry = dma_entry_alloc();
  1147. if (!entry)
  1148. return;
  1149. entry->type = dma_debug_coherent;
  1150. entry->dev = dev;
  1151. entry->pfn = page_to_pfn(virt_to_page(virt));
  1152. entry->offset = (size_t) virt & PAGE_MASK;
  1153. entry->size = size;
  1154. entry->dev_addr = dma_addr;
  1155. entry->direction = DMA_BIDIRECTIONAL;
  1156. add_dma_entry(entry);
  1157. }
  1158. EXPORT_SYMBOL(debug_dma_alloc_coherent);
  1159. void debug_dma_free_coherent(struct device *dev, size_t size,
  1160. void *virt, dma_addr_t addr)
  1161. {
  1162. struct dma_debug_entry ref = {
  1163. .type = dma_debug_coherent,
  1164. .dev = dev,
  1165. .pfn = page_to_pfn(virt_to_page(virt)),
  1166. .offset = (size_t) virt & PAGE_MASK,
  1167. .dev_addr = addr,
  1168. .size = size,
  1169. .direction = DMA_BIDIRECTIONAL,
  1170. };
  1171. if (unlikely(global_disable))
  1172. return;
  1173. check_unmap(&ref);
  1174. }
  1175. EXPORT_SYMBOL(debug_dma_free_coherent);
  1176. void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  1177. size_t size, int direction)
  1178. {
  1179. struct dma_debug_entry ref;
  1180. if (unlikely(global_disable))
  1181. return;
  1182. ref.type = dma_debug_single;
  1183. ref.dev = dev;
  1184. ref.dev_addr = dma_handle;
  1185. ref.size = size;
  1186. ref.direction = direction;
  1187. ref.sg_call_ents = 0;
  1188. check_sync(dev, &ref, true);
  1189. }
  1190. EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
  1191. void debug_dma_sync_single_for_device(struct device *dev,
  1192. dma_addr_t dma_handle, size_t size,
  1193. int direction)
  1194. {
  1195. struct dma_debug_entry ref;
  1196. if (unlikely(global_disable))
  1197. return;
  1198. ref.type = dma_debug_single;
  1199. ref.dev = dev;
  1200. ref.dev_addr = dma_handle;
  1201. ref.size = size;
  1202. ref.direction = direction;
  1203. ref.sg_call_ents = 0;
  1204. check_sync(dev, &ref, false);
  1205. }
  1206. EXPORT_SYMBOL(debug_dma_sync_single_for_device);
  1207. void debug_dma_sync_single_range_for_cpu(struct device *dev,
  1208. dma_addr_t dma_handle,
  1209. unsigned long offset, size_t size,
  1210. int direction)
  1211. {
  1212. struct dma_debug_entry ref;
  1213. if (unlikely(global_disable))
  1214. return;
  1215. ref.type = dma_debug_single;
  1216. ref.dev = dev;
  1217. ref.dev_addr = dma_handle;
  1218. ref.size = offset + size;
  1219. ref.direction = direction;
  1220. ref.sg_call_ents = 0;
  1221. check_sync(dev, &ref, true);
  1222. }
  1223. EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
  1224. void debug_dma_sync_single_range_for_device(struct device *dev,
  1225. dma_addr_t dma_handle,
  1226. unsigned long offset,
  1227. size_t size, int direction)
  1228. {
  1229. struct dma_debug_entry ref;
  1230. if (unlikely(global_disable))
  1231. return;
  1232. ref.type = dma_debug_single;
  1233. ref.dev = dev;
  1234. ref.dev_addr = dma_handle;
  1235. ref.size = offset + size;
  1236. ref.direction = direction;
  1237. ref.sg_call_ents = 0;
  1238. check_sync(dev, &ref, false);
  1239. }
  1240. EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
  1241. void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  1242. int nelems, int direction)
  1243. {
  1244. struct scatterlist *s;
  1245. int mapped_ents = 0, i;
  1246. if (unlikely(global_disable))
  1247. return;
  1248. for_each_sg(sg, s, nelems, i) {
  1249. struct dma_debug_entry ref = {
  1250. .type = dma_debug_sg,
  1251. .dev = dev,
  1252. .pfn = page_to_pfn(sg_page(s)),
  1253. .offset = s->offset,
  1254. .dev_addr = sg_dma_address(s),
  1255. .size = sg_dma_len(s),
  1256. .direction = direction,
  1257. .sg_call_ents = nelems,
  1258. };
  1259. if (!i)
  1260. mapped_ents = get_nr_mapped_entries(dev, &ref);
  1261. if (i >= mapped_ents)
  1262. break;
  1263. check_sync(dev, &ref, true);
  1264. }
  1265. }
  1266. EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
  1267. void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  1268. int nelems, int direction)
  1269. {
  1270. struct scatterlist *s;
  1271. int mapped_ents = 0, i;
  1272. if (unlikely(global_disable))
  1273. return;
  1274. for_each_sg(sg, s, nelems, i) {
  1275. struct dma_debug_entry ref = {
  1276. .type = dma_debug_sg,
  1277. .dev = dev,
  1278. .pfn = page_to_pfn(sg_page(s)),
  1279. .offset = s->offset,
  1280. .dev_addr = sg_dma_address(s),
  1281. .size = sg_dma_len(s),
  1282. .direction = direction,
  1283. .sg_call_ents = nelems,
  1284. };
  1285. if (!i)
  1286. mapped_ents = get_nr_mapped_entries(dev, &ref);
  1287. if (i >= mapped_ents)
  1288. break;
  1289. check_sync(dev, &ref, false);
  1290. }
  1291. }
  1292. EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
  1293. static int __init dma_debug_driver_setup(char *str)
  1294. {
  1295. int i;
  1296. for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
  1297. current_driver_name[i] = *str;
  1298. if (*str == 0)
  1299. break;
  1300. }
  1301. if (current_driver_name[0])
  1302. pr_info("DMA-API: enable driver filter for driver [%s]\n",
  1303. current_driver_name);
  1304. return 1;
  1305. }
  1306. __setup("dma_debug_driver=", dma_debug_driver_setup);