regmap-debugfs.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643
  1. /*
  2. * Register map access API - debugfs
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/mutex.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/device.h>
  17. #include <linux/list.h>
  18. #include "internal.h"
  19. struct regmap_debugfs_node {
  20. struct regmap *map;
  21. const char *name;
  22. struct list_head link;
  23. };
  24. static struct dentry *regmap_debugfs_root;
  25. static LIST_HEAD(regmap_debugfs_early_list);
  26. static DEFINE_MUTEX(regmap_debugfs_early_lock);
  27. /* Calculate the length of a fixed format */
  28. static size_t regmap_calc_reg_len(int max_val)
  29. {
  30. return snprintf(NULL, 0, "%x", max_val);
  31. }
  32. static ssize_t regmap_name_read_file(struct file *file,
  33. char __user *user_buf, size_t count,
  34. loff_t *ppos)
  35. {
  36. struct regmap *map = file->private_data;
  37. int ret;
  38. char *buf;
  39. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  40. if (!buf)
  41. return -ENOMEM;
  42. ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name);
  43. if (ret < 0) {
  44. kfree(buf);
  45. return ret;
  46. }
  47. ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
  48. kfree(buf);
  49. return ret;
  50. }
  51. static const struct file_operations regmap_name_fops = {
  52. .open = simple_open,
  53. .read = regmap_name_read_file,
  54. .llseek = default_llseek,
  55. };
  56. static void regmap_debugfs_free_dump_cache(struct regmap *map)
  57. {
  58. struct regmap_debugfs_off_cache *c;
  59. while (!list_empty(&map->debugfs_off_cache)) {
  60. c = list_first_entry(&map->debugfs_off_cache,
  61. struct regmap_debugfs_off_cache,
  62. list);
  63. list_del(&c->list);
  64. kfree(c);
  65. }
  66. }
  67. /*
  68. * Work out where the start offset maps into register numbers, bearing
  69. * in mind that we suppress hidden registers.
  70. */
  71. static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
  72. unsigned int base,
  73. loff_t from,
  74. loff_t *pos)
  75. {
  76. struct regmap_debugfs_off_cache *c = NULL;
  77. loff_t p = 0;
  78. unsigned int i, ret;
  79. unsigned int fpos_offset;
  80. unsigned int reg_offset;
  81. /* Suppress the cache if we're using a subrange */
  82. if (base)
  83. return base;
  84. /*
  85. * If we don't have a cache build one so we don't have to do a
  86. * linear scan each time.
  87. */
  88. mutex_lock(&map->cache_lock);
  89. i = base;
  90. if (list_empty(&map->debugfs_off_cache)) {
  91. for (; i <= map->max_register; i += map->reg_stride) {
  92. /* Skip unprinted registers, closing off cache entry */
  93. if (!regmap_readable(map, i) ||
  94. regmap_precious(map, i)) {
  95. if (c) {
  96. c->max = p - 1;
  97. c->max_reg = i - map->reg_stride;
  98. list_add_tail(&c->list,
  99. &map->debugfs_off_cache);
  100. c = NULL;
  101. }
  102. continue;
  103. }
  104. /* No cache entry? Start a new one */
  105. if (!c) {
  106. c = kzalloc(sizeof(*c), GFP_KERNEL);
  107. if (!c) {
  108. regmap_debugfs_free_dump_cache(map);
  109. mutex_unlock(&map->cache_lock);
  110. return base;
  111. }
  112. c->min = p;
  113. c->base_reg = i;
  114. }
  115. p += map->debugfs_tot_len;
  116. }
  117. }
  118. /* Close the last entry off if we didn't scan beyond it */
  119. if (c) {
  120. c->max = p - 1;
  121. c->max_reg = i - map->reg_stride;
  122. list_add_tail(&c->list,
  123. &map->debugfs_off_cache);
  124. }
  125. /*
  126. * This should never happen; we return above if we fail to
  127. * allocate and we should never be in this code if there are
  128. * no registers at all.
  129. */
  130. WARN_ON(list_empty(&map->debugfs_off_cache));
  131. ret = base;
  132. /* Find the relevant block:offset */
  133. list_for_each_entry(c, &map->debugfs_off_cache, list) {
  134. if (from >= c->min && from <= c->max) {
  135. fpos_offset = from - c->min;
  136. reg_offset = fpos_offset / map->debugfs_tot_len;
  137. *pos = c->min + (reg_offset * map->debugfs_tot_len);
  138. mutex_unlock(&map->cache_lock);
  139. return c->base_reg + (reg_offset * map->reg_stride);
  140. }
  141. *pos = c->max;
  142. ret = c->max_reg;
  143. }
  144. mutex_unlock(&map->cache_lock);
  145. return ret;
  146. }
  147. static inline void regmap_calc_tot_len(struct regmap *map,
  148. void *buf, size_t count)
  149. {
  150. /* Calculate the length of a fixed format */
  151. if (!map->debugfs_tot_len) {
  152. map->debugfs_reg_len = regmap_calc_reg_len(map->max_register),
  153. map->debugfs_val_len = 2 * map->format.val_bytes;
  154. map->debugfs_tot_len = map->debugfs_reg_len +
  155. map->debugfs_val_len + 3; /* : \n */
  156. }
  157. }
  158. static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
  159. unsigned int to, char __user *user_buf,
  160. size_t count, loff_t *ppos)
  161. {
  162. size_t buf_pos = 0;
  163. loff_t p = *ppos;
  164. ssize_t ret;
  165. int i;
  166. char *buf;
  167. unsigned int val, start_reg;
  168. if (*ppos < 0 || !count)
  169. return -EINVAL;
  170. buf = kmalloc(count, GFP_KERNEL);
  171. if (!buf)
  172. return -ENOMEM;
  173. regmap_calc_tot_len(map, buf, count);
  174. /* Work out which register we're starting at */
  175. start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
  176. for (i = start_reg; i <= to; i += map->reg_stride) {
  177. if (!regmap_readable(map, i))
  178. continue;
  179. if (regmap_precious(map, i))
  180. continue;
  181. /* If we're in the region the user is trying to read */
  182. if (p >= *ppos) {
  183. /* ...but not beyond it */
  184. if (buf_pos + map->debugfs_tot_len > count)
  185. break;
  186. /* Format the register */
  187. snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
  188. map->debugfs_reg_len, i - from);
  189. buf_pos += map->debugfs_reg_len + 2;
  190. /* Format the value, write all X if we can't read */
  191. ret = regmap_read(map, i, &val);
  192. if (ret == 0)
  193. snprintf(buf + buf_pos, count - buf_pos,
  194. "%.*x", map->debugfs_val_len, val);
  195. else
  196. memset(buf + buf_pos, 'X',
  197. map->debugfs_val_len);
  198. buf_pos += 2 * map->format.val_bytes;
  199. buf[buf_pos++] = '\n';
  200. }
  201. p += map->debugfs_tot_len;
  202. }
  203. ret = buf_pos;
  204. if (copy_to_user(user_buf, buf, buf_pos)) {
  205. ret = -EFAULT;
  206. goto out;
  207. }
  208. *ppos += buf_pos;
  209. out:
  210. kfree(buf);
  211. return ret;
  212. }
  213. static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
  214. size_t count, loff_t *ppos)
  215. {
  216. struct regmap *map = file->private_data;
  217. return regmap_read_debugfs(map, 0, map->max_register, user_buf,
  218. count, ppos);
  219. }
  220. #undef REGMAP_ALLOW_WRITE_DEBUGFS
  221. #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
  222. /*
  223. * This can be dangerous especially when we have clients such as
  224. * PMICs, therefore don't provide any real compile time configuration option
  225. * for this feature, people who want to use this will need to modify
  226. * the source code directly.
  227. */
  228. static ssize_t regmap_map_write_file(struct file *file,
  229. const char __user *user_buf,
  230. size_t count, loff_t *ppos)
  231. {
  232. char buf[32];
  233. size_t buf_size;
  234. char *start = buf;
  235. unsigned long reg, value;
  236. struct regmap *map = file->private_data;
  237. int ret;
  238. buf_size = min(count, (sizeof(buf)-1));
  239. if (copy_from_user(buf, user_buf, buf_size))
  240. return -EFAULT;
  241. buf[buf_size] = 0;
  242. while (*start == ' ')
  243. start++;
  244. reg = simple_strtoul(start, &start, 16);
  245. while (*start == ' ')
  246. start++;
  247. if (kstrtoul(start, 16, &value))
  248. return -EINVAL;
  249. /* Userspace has been fiddling around behind the kernel's back */
  250. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  251. ret = regmap_write(map, reg, value);
  252. if (ret < 0)
  253. return ret;
  254. return buf_size;
  255. }
  256. #else
  257. #define regmap_map_write_file NULL
  258. #endif
  259. static const struct file_operations regmap_map_fops = {
  260. .open = simple_open,
  261. .read = regmap_map_read_file,
  262. .write = regmap_map_write_file,
  263. .llseek = default_llseek,
  264. };
  265. static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
  266. size_t count, loff_t *ppos)
  267. {
  268. struct regmap_range_node *range = file->private_data;
  269. struct regmap *map = range->map;
  270. return regmap_read_debugfs(map, range->range_min, range->range_max,
  271. user_buf, count, ppos);
  272. }
  273. static const struct file_operations regmap_range_fops = {
  274. .open = simple_open,
  275. .read = regmap_range_read_file,
  276. .llseek = default_llseek,
  277. };
  278. static ssize_t regmap_reg_ranges_read_file(struct file *file,
  279. char __user *user_buf, size_t count,
  280. loff_t *ppos)
  281. {
  282. struct regmap *map = file->private_data;
  283. struct regmap_debugfs_off_cache *c;
  284. loff_t p = 0;
  285. size_t buf_pos = 0;
  286. char *buf;
  287. char *entry;
  288. int ret;
  289. unsigned entry_len;
  290. if (*ppos < 0 || !count)
  291. return -EINVAL;
  292. buf = kmalloc(count, GFP_KERNEL);
  293. if (!buf)
  294. return -ENOMEM;
  295. entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
  296. if (!entry) {
  297. kfree(buf);
  298. return -ENOMEM;
  299. }
  300. /* While we are at it, build the register dump cache
  301. * now so the read() operation on the `registers' file
  302. * can benefit from using the cache. We do not care
  303. * about the file position information that is contained
  304. * in the cache, just about the actual register blocks */
  305. regmap_calc_tot_len(map, buf, count);
  306. regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
  307. /* Reset file pointer as the fixed-format of the `registers'
  308. * file is not compatible with the `range' file */
  309. p = 0;
  310. mutex_lock(&map->cache_lock);
  311. list_for_each_entry(c, &map->debugfs_off_cache, list) {
  312. entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
  313. c->base_reg, c->max_reg);
  314. if (p >= *ppos) {
  315. if (buf_pos + entry_len > count)
  316. break;
  317. memcpy(buf + buf_pos, entry, entry_len);
  318. buf_pos += entry_len;
  319. }
  320. p += entry_len;
  321. }
  322. mutex_unlock(&map->cache_lock);
  323. kfree(entry);
  324. ret = buf_pos;
  325. if (copy_to_user(user_buf, buf, buf_pos)) {
  326. ret = -EFAULT;
  327. goto out_buf;
  328. }
  329. *ppos += buf_pos;
  330. out_buf:
  331. kfree(buf);
  332. return ret;
  333. }
  334. static const struct file_operations regmap_reg_ranges_fops = {
  335. .open = simple_open,
  336. .read = regmap_reg_ranges_read_file,
  337. .llseek = default_llseek,
  338. };
  339. static int regmap_access_show(struct seq_file *s, void *ignored)
  340. {
  341. struct regmap *map = s->private;
  342. int i, reg_len;
  343. reg_len = regmap_calc_reg_len(map->max_register);
  344. for (i = 0; i <= map->max_register; i += map->reg_stride) {
  345. /* Ignore registers which are neither readable nor writable */
  346. if (!regmap_readable(map, i) && !regmap_writeable(map, i))
  347. continue;
  348. /* Format the register */
  349. seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
  350. regmap_readable(map, i) ? 'y' : 'n',
  351. regmap_writeable(map, i) ? 'y' : 'n',
  352. regmap_volatile(map, i) ? 'y' : 'n',
  353. regmap_precious(map, i) ? 'y' : 'n');
  354. }
  355. return 0;
  356. }
  357. static int access_open(struct inode *inode, struct file *file)
  358. {
  359. return single_open(file, regmap_access_show, inode->i_private);
  360. }
  361. static const struct file_operations regmap_access_fops = {
  362. .open = access_open,
  363. .read = seq_read,
  364. .llseek = seq_lseek,
  365. .release = single_release,
  366. };
  367. static ssize_t regmap_cache_only_write_file(struct file *file,
  368. const char __user *user_buf,
  369. size_t count, loff_t *ppos)
  370. {
  371. struct regmap *map = container_of(file->private_data,
  372. struct regmap, cache_only);
  373. ssize_t result;
  374. bool was_enabled, require_sync = false;
  375. int err;
  376. map->lock(map->lock_arg);
  377. was_enabled = map->cache_only;
  378. result = debugfs_write_file_bool(file, user_buf, count, ppos);
  379. if (result < 0) {
  380. map->unlock(map->lock_arg);
  381. return result;
  382. }
  383. if (map->cache_only && !was_enabled) {
  384. dev_warn(map->dev, "debugfs cache_only=Y forced\n");
  385. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  386. } else if (!map->cache_only && was_enabled) {
  387. dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
  388. require_sync = true;
  389. }
  390. map->unlock(map->lock_arg);
  391. if (require_sync) {
  392. err = regcache_sync(map);
  393. if (err)
  394. dev_err(map->dev, "Failed to sync cache %d\n", err);
  395. }
  396. return result;
  397. }
  398. static const struct file_operations regmap_cache_only_fops = {
  399. .open = simple_open,
  400. .read = debugfs_read_file_bool,
  401. .write = regmap_cache_only_write_file,
  402. };
  403. static ssize_t regmap_cache_bypass_write_file(struct file *file,
  404. const char __user *user_buf,
  405. size_t count, loff_t *ppos)
  406. {
  407. struct regmap *map = container_of(file->private_data,
  408. struct regmap, cache_bypass);
  409. ssize_t result;
  410. bool was_enabled;
  411. map->lock(map->lock_arg);
  412. was_enabled = map->cache_bypass;
  413. result = debugfs_write_file_bool(file, user_buf, count, ppos);
  414. if (result < 0)
  415. goto out;
  416. if (map->cache_bypass && !was_enabled) {
  417. dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
  418. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  419. } else if (!map->cache_bypass && was_enabled) {
  420. dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
  421. }
  422. out:
  423. map->unlock(map->lock_arg);
  424. return result;
  425. }
  426. static const struct file_operations regmap_cache_bypass_fops = {
  427. .open = simple_open,
  428. .read = debugfs_read_file_bool,
  429. .write = regmap_cache_bypass_write_file,
  430. };
  431. void regmap_debugfs_init(struct regmap *map, const char *name)
  432. {
  433. struct rb_node *next;
  434. struct regmap_range_node *range_node;
  435. const char *devname = "dummy";
  436. /* If we don't have the debugfs root yet, postpone init */
  437. if (!regmap_debugfs_root) {
  438. struct regmap_debugfs_node *node;
  439. node = kzalloc(sizeof(*node), GFP_KERNEL);
  440. if (!node)
  441. return;
  442. node->map = map;
  443. node->name = name;
  444. mutex_lock(&regmap_debugfs_early_lock);
  445. list_add(&node->link, &regmap_debugfs_early_list);
  446. mutex_unlock(&regmap_debugfs_early_lock);
  447. return;
  448. }
  449. INIT_LIST_HEAD(&map->debugfs_off_cache);
  450. mutex_init(&map->cache_lock);
  451. if (map->dev)
  452. devname = dev_name(map->dev);
  453. if (name) {
  454. map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
  455. devname, name);
  456. name = map->debugfs_name;
  457. } else {
  458. name = devname;
  459. }
  460. map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
  461. if (!map->debugfs) {
  462. dev_warn(map->dev, "Failed to create debugfs directory\n");
  463. return;
  464. }
  465. debugfs_create_file("name", 0400, map->debugfs,
  466. map, &regmap_name_fops);
  467. debugfs_create_file("range", 0400, map->debugfs,
  468. map, &regmap_reg_ranges_fops);
  469. if (map->max_register || regmap_readable(map, 0)) {
  470. umode_t registers_mode;
  471. #if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
  472. registers_mode = 0600;
  473. #else
  474. registers_mode = 0400;
  475. #endif
  476. debugfs_create_file("registers", registers_mode, map->debugfs,
  477. map, &regmap_map_fops);
  478. debugfs_create_file("access", 0400, map->debugfs,
  479. map, &regmap_access_fops);
  480. }
  481. if (map->cache_type) {
  482. debugfs_create_file("cache_only", 0600, map->debugfs,
  483. &map->cache_only, &regmap_cache_only_fops);
  484. debugfs_create_bool("cache_dirty", 0400, map->debugfs,
  485. &map->cache_dirty);
  486. debugfs_create_file("cache_bypass", 0600, map->debugfs,
  487. &map->cache_bypass,
  488. &regmap_cache_bypass_fops);
  489. }
  490. next = rb_first(&map->range_tree);
  491. while (next) {
  492. range_node = rb_entry(next, struct regmap_range_node, node);
  493. if (range_node->name)
  494. debugfs_create_file(range_node->name, 0400,
  495. map->debugfs, range_node,
  496. &regmap_range_fops);
  497. next = rb_next(&range_node->node);
  498. }
  499. if (map->cache_ops && map->cache_ops->debugfs_init)
  500. map->cache_ops->debugfs_init(map);
  501. }
  502. void regmap_debugfs_exit(struct regmap *map)
  503. {
  504. if (map->debugfs) {
  505. debugfs_remove_recursive(map->debugfs);
  506. mutex_lock(&map->cache_lock);
  507. regmap_debugfs_free_dump_cache(map);
  508. mutex_unlock(&map->cache_lock);
  509. kfree(map->debugfs_name);
  510. } else {
  511. struct regmap_debugfs_node *node, *tmp;
  512. mutex_lock(&regmap_debugfs_early_lock);
  513. list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
  514. link) {
  515. if (node->map == map) {
  516. list_del(&node->link);
  517. kfree(node);
  518. }
  519. }
  520. mutex_unlock(&regmap_debugfs_early_lock);
  521. }
  522. }
  523. void regmap_debugfs_initcall(void)
  524. {
  525. struct regmap_debugfs_node *node, *tmp;
  526. regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
  527. if (!regmap_debugfs_root) {
  528. pr_warn("regmap: Failed to create debugfs root\n");
  529. return;
  530. }
  531. mutex_lock(&regmap_debugfs_early_lock);
  532. list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
  533. regmap_debugfs_init(node->map, node->name);
  534. list_del(&node->link);
  535. kfree(node);
  536. }
  537. mutex_unlock(&regmap_debugfs_early_lock);
  538. }