regmap-debugfs.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. /*
  2. * Register map access API - debugfs
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/mutex.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/device.h>
  17. #include <linux/list.h>
  18. #include "internal.h"
  19. struct regmap_debugfs_node {
  20. struct regmap *map;
  21. const char *name;
  22. struct list_head link;
  23. };
  24. static struct dentry *regmap_debugfs_root;
  25. static LIST_HEAD(regmap_debugfs_early_list);
  26. static DEFINE_MUTEX(regmap_debugfs_early_lock);
  27. /* Calculate the length of a fixed format */
  28. static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
  29. {
  30. return snprintf(NULL, 0, "%x", max_val);
  31. }
  32. static ssize_t regmap_name_read_file(struct file *file,
  33. char __user *user_buf, size_t count,
  34. loff_t *ppos)
  35. {
  36. struct regmap *map = file->private_data;
  37. int ret;
  38. char *buf;
  39. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  40. if (!buf)
  41. return -ENOMEM;
  42. ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name);
  43. if (ret < 0) {
  44. kfree(buf);
  45. return ret;
  46. }
  47. ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
  48. kfree(buf);
  49. return ret;
  50. }
  51. static const struct file_operations regmap_name_fops = {
  52. .open = simple_open,
  53. .read = regmap_name_read_file,
  54. .llseek = default_llseek,
  55. };
  56. static void regmap_debugfs_free_dump_cache(struct regmap *map)
  57. {
  58. struct regmap_debugfs_off_cache *c;
  59. while (!list_empty(&map->debugfs_off_cache)) {
  60. c = list_first_entry(&map->debugfs_off_cache,
  61. struct regmap_debugfs_off_cache,
  62. list);
  63. list_del(&c->list);
  64. kfree(c);
  65. }
  66. }
  67. /*
  68. * Work out where the start offset maps into register numbers, bearing
  69. * in mind that we suppress hidden registers.
  70. */
  71. static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
  72. unsigned int base,
  73. loff_t from,
  74. loff_t *pos)
  75. {
  76. struct regmap_debugfs_off_cache *c = NULL;
  77. loff_t p = 0;
  78. unsigned int i, ret;
  79. unsigned int fpos_offset;
  80. unsigned int reg_offset;
  81. /* Suppress the cache if we're using a subrange */
  82. if (base)
  83. return base;
  84. /*
  85. * If we don't have a cache build one so we don't have to do a
  86. * linear scan each time.
  87. */
  88. mutex_lock(&map->cache_lock);
  89. i = base;
  90. if (list_empty(&map->debugfs_off_cache)) {
  91. for (; i <= map->max_register; i += map->reg_stride) {
  92. /* Skip unprinted registers, closing off cache entry */
  93. if (!regmap_readable(map, i) ||
  94. regmap_precious(map, i)) {
  95. if (c) {
  96. c->max = p - 1;
  97. c->max_reg = i - map->reg_stride;
  98. list_add_tail(&c->list,
  99. &map->debugfs_off_cache);
  100. c = NULL;
  101. }
  102. continue;
  103. }
  104. /* No cache entry? Start a new one */
  105. if (!c) {
  106. c = kzalloc(sizeof(*c), GFP_KERNEL);
  107. if (!c) {
  108. regmap_debugfs_free_dump_cache(map);
  109. mutex_unlock(&map->cache_lock);
  110. return base;
  111. }
  112. c->min = p;
  113. c->base_reg = i;
  114. }
  115. p += map->debugfs_tot_len;
  116. }
  117. }
  118. /* Close the last entry off if we didn't scan beyond it */
  119. if (c) {
  120. c->max = p - 1;
  121. c->max_reg = i - map->reg_stride;
  122. list_add_tail(&c->list,
  123. &map->debugfs_off_cache);
  124. }
  125. /*
  126. * This should never happen; we return above if we fail to
  127. * allocate and we should never be in this code if there are
  128. * no registers at all.
  129. */
  130. WARN_ON(list_empty(&map->debugfs_off_cache));
  131. ret = base;
  132. /* Find the relevant block:offset */
  133. list_for_each_entry(c, &map->debugfs_off_cache, list) {
  134. if (from >= c->min && from <= c->max) {
  135. fpos_offset = from - c->min;
  136. reg_offset = fpos_offset / map->debugfs_tot_len;
  137. *pos = c->min + (reg_offset * map->debugfs_tot_len);
  138. mutex_unlock(&map->cache_lock);
  139. return c->base_reg + (reg_offset * map->reg_stride);
  140. }
  141. *pos = c->max;
  142. ret = c->max_reg;
  143. }
  144. mutex_unlock(&map->cache_lock);
  145. return ret;
  146. }
  147. static inline void regmap_calc_tot_len(struct regmap *map,
  148. void *buf, size_t count)
  149. {
  150. /* Calculate the length of a fixed format */
  151. if (!map->debugfs_tot_len) {
  152. map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
  153. buf, count);
  154. map->debugfs_val_len = 2 * map->format.val_bytes;
  155. map->debugfs_tot_len = map->debugfs_reg_len +
  156. map->debugfs_val_len + 3; /* : \n */
  157. }
  158. }
  159. static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
  160. unsigned int to, char __user *user_buf,
  161. size_t count, loff_t *ppos)
  162. {
  163. size_t buf_pos = 0;
  164. loff_t p = *ppos;
  165. ssize_t ret;
  166. int i;
  167. char *buf;
  168. unsigned int val, start_reg;
  169. if (*ppos < 0 || !count)
  170. return -EINVAL;
  171. buf = kmalloc(count, GFP_KERNEL);
  172. if (!buf)
  173. return -ENOMEM;
  174. regmap_calc_tot_len(map, buf, count);
  175. /* Work out which register we're starting at */
  176. start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
  177. for (i = start_reg; i <= to; i += map->reg_stride) {
  178. if (!regmap_readable(map, i))
  179. continue;
  180. if (regmap_precious(map, i))
  181. continue;
  182. /* If we're in the region the user is trying to read */
  183. if (p >= *ppos) {
  184. /* ...but not beyond it */
  185. if (buf_pos + map->debugfs_tot_len > count)
  186. break;
  187. /* Format the register */
  188. snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
  189. map->debugfs_reg_len, i - from);
  190. buf_pos += map->debugfs_reg_len + 2;
  191. /* Format the value, write all X if we can't read */
  192. ret = regmap_read(map, i, &val);
  193. if (ret == 0)
  194. snprintf(buf + buf_pos, count - buf_pos,
  195. "%.*x", map->debugfs_val_len, val);
  196. else
  197. memset(buf + buf_pos, 'X',
  198. map->debugfs_val_len);
  199. buf_pos += 2 * map->format.val_bytes;
  200. buf[buf_pos++] = '\n';
  201. }
  202. p += map->debugfs_tot_len;
  203. }
  204. ret = buf_pos;
  205. if (copy_to_user(user_buf, buf, buf_pos)) {
  206. ret = -EFAULT;
  207. goto out;
  208. }
  209. *ppos += buf_pos;
  210. out:
  211. kfree(buf);
  212. return ret;
  213. }
  214. static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
  215. size_t count, loff_t *ppos)
  216. {
  217. struct regmap *map = file->private_data;
  218. return regmap_read_debugfs(map, 0, map->max_register, user_buf,
  219. count, ppos);
  220. }
  221. #undef REGMAP_ALLOW_WRITE_DEBUGFS
  222. #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
  223. /*
  224. * This can be dangerous especially when we have clients such as
  225. * PMICs, therefore don't provide any real compile time configuration option
  226. * for this feature, people who want to use this will need to modify
  227. * the source code directly.
  228. */
  229. static ssize_t regmap_map_write_file(struct file *file,
  230. const char __user *user_buf,
  231. size_t count, loff_t *ppos)
  232. {
  233. char buf[32];
  234. size_t buf_size;
  235. char *start = buf;
  236. unsigned long reg, value;
  237. struct regmap *map = file->private_data;
  238. int ret;
  239. buf_size = min(count, (sizeof(buf)-1));
  240. if (copy_from_user(buf, user_buf, buf_size))
  241. return -EFAULT;
  242. buf[buf_size] = 0;
  243. while (*start == ' ')
  244. start++;
  245. reg = simple_strtoul(start, &start, 16);
  246. while (*start == ' ')
  247. start++;
  248. if (kstrtoul(start, 16, &value))
  249. return -EINVAL;
  250. /* Userspace has been fiddling around behind the kernel's back */
  251. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  252. ret = regmap_write(map, reg, value);
  253. if (ret < 0)
  254. return ret;
  255. return buf_size;
  256. }
  257. #else
  258. #define regmap_map_write_file NULL
  259. #endif
  260. static const struct file_operations regmap_map_fops = {
  261. .open = simple_open,
  262. .read = regmap_map_read_file,
  263. .write = regmap_map_write_file,
  264. .llseek = default_llseek,
  265. };
  266. static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
  267. size_t count, loff_t *ppos)
  268. {
  269. struct regmap_range_node *range = file->private_data;
  270. struct regmap *map = range->map;
  271. return regmap_read_debugfs(map, range->range_min, range->range_max,
  272. user_buf, count, ppos);
  273. }
  274. static const struct file_operations regmap_range_fops = {
  275. .open = simple_open,
  276. .read = regmap_range_read_file,
  277. .llseek = default_llseek,
  278. };
  279. static ssize_t regmap_reg_ranges_read_file(struct file *file,
  280. char __user *user_buf, size_t count,
  281. loff_t *ppos)
  282. {
  283. struct regmap *map = file->private_data;
  284. struct regmap_debugfs_off_cache *c;
  285. loff_t p = 0;
  286. size_t buf_pos = 0;
  287. char *buf;
  288. char *entry;
  289. int ret;
  290. if (*ppos < 0 || !count)
  291. return -EINVAL;
  292. buf = kmalloc(count, GFP_KERNEL);
  293. if (!buf)
  294. return -ENOMEM;
  295. entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
  296. if (!entry) {
  297. kfree(buf);
  298. return -ENOMEM;
  299. }
  300. /* While we are at it, build the register dump cache
  301. * now so the read() operation on the `registers' file
  302. * can benefit from using the cache. We do not care
  303. * about the file position information that is contained
  304. * in the cache, just about the actual register blocks */
  305. regmap_calc_tot_len(map, buf, count);
  306. regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
  307. /* Reset file pointer as the fixed-format of the `registers'
  308. * file is not compatible with the `range' file */
  309. p = 0;
  310. mutex_lock(&map->cache_lock);
  311. list_for_each_entry(c, &map->debugfs_off_cache, list) {
  312. snprintf(entry, PAGE_SIZE, "%x-%x",
  313. c->base_reg, c->max_reg);
  314. if (p >= *ppos) {
  315. if (buf_pos + 1 + strlen(entry) > count)
  316. break;
  317. snprintf(buf + buf_pos, count - buf_pos,
  318. "%s", entry);
  319. buf_pos += strlen(entry);
  320. buf[buf_pos] = '\n';
  321. buf_pos++;
  322. }
  323. p += strlen(entry) + 1;
  324. }
  325. mutex_unlock(&map->cache_lock);
  326. kfree(entry);
  327. ret = buf_pos;
  328. if (copy_to_user(user_buf, buf, buf_pos)) {
  329. ret = -EFAULT;
  330. goto out_buf;
  331. }
  332. *ppos += buf_pos;
  333. out_buf:
  334. kfree(buf);
  335. return ret;
  336. }
  337. static const struct file_operations regmap_reg_ranges_fops = {
  338. .open = simple_open,
  339. .read = regmap_reg_ranges_read_file,
  340. .llseek = default_llseek,
  341. };
  342. static ssize_t regmap_access_read_file(struct file *file,
  343. char __user *user_buf, size_t count,
  344. loff_t *ppos)
  345. {
  346. int reg_len, tot_len;
  347. size_t buf_pos = 0;
  348. loff_t p = 0;
  349. ssize_t ret;
  350. int i;
  351. struct regmap *map = file->private_data;
  352. char *buf;
  353. if (*ppos < 0 || !count)
  354. return -EINVAL;
  355. buf = kmalloc(count, GFP_KERNEL);
  356. if (!buf)
  357. return -ENOMEM;
  358. /* Calculate the length of a fixed format */
  359. reg_len = regmap_calc_reg_len(map->max_register, buf, count);
  360. tot_len = reg_len + 10; /* ': R W V P\n' */
  361. for (i = 0; i <= map->max_register; i += map->reg_stride) {
  362. /* Ignore registers which are neither readable nor writable */
  363. if (!regmap_readable(map, i) && !regmap_writeable(map, i))
  364. continue;
  365. /* If we're in the region the user is trying to read */
  366. if (p >= *ppos) {
  367. /* ...but not beyond it */
  368. if (buf_pos + tot_len + 1 >= count)
  369. break;
  370. /* Format the register */
  371. snprintf(buf + buf_pos, count - buf_pos,
  372. "%.*x: %c %c %c %c\n",
  373. reg_len, i,
  374. regmap_readable(map, i) ? 'y' : 'n',
  375. regmap_writeable(map, i) ? 'y' : 'n',
  376. regmap_volatile(map, i) ? 'y' : 'n',
  377. regmap_precious(map, i) ? 'y' : 'n');
  378. buf_pos += tot_len;
  379. }
  380. p += tot_len;
  381. }
  382. ret = buf_pos;
  383. if (copy_to_user(user_buf, buf, buf_pos)) {
  384. ret = -EFAULT;
  385. goto out;
  386. }
  387. *ppos += buf_pos;
  388. out:
  389. kfree(buf);
  390. return ret;
  391. }
  392. static const struct file_operations regmap_access_fops = {
  393. .open = simple_open,
  394. .read = regmap_access_read_file,
  395. .llseek = default_llseek,
  396. };
  397. static ssize_t regmap_cache_only_write_file(struct file *file,
  398. const char __user *user_buf,
  399. size_t count, loff_t *ppos)
  400. {
  401. struct regmap *map = container_of(file->private_data,
  402. struct regmap, cache_only);
  403. ssize_t result;
  404. bool was_enabled, require_sync = false;
  405. int err;
  406. map->lock(map->lock_arg);
  407. was_enabled = map->cache_only;
  408. result = debugfs_write_file_bool(file, user_buf, count, ppos);
  409. if (result < 0) {
  410. map->unlock(map->lock_arg);
  411. return result;
  412. }
  413. if (map->cache_only && !was_enabled) {
  414. dev_warn(map->dev, "debugfs cache_only=Y forced\n");
  415. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  416. } else if (!map->cache_only && was_enabled) {
  417. dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
  418. require_sync = true;
  419. }
  420. map->unlock(map->lock_arg);
  421. if (require_sync) {
  422. err = regcache_sync(map);
  423. if (err)
  424. dev_err(map->dev, "Failed to sync cache %d\n", err);
  425. }
  426. return result;
  427. }
  428. static const struct file_operations regmap_cache_only_fops = {
  429. .open = simple_open,
  430. .read = debugfs_read_file_bool,
  431. .write = regmap_cache_only_write_file,
  432. };
  433. static ssize_t regmap_cache_bypass_write_file(struct file *file,
  434. const char __user *user_buf,
  435. size_t count, loff_t *ppos)
  436. {
  437. struct regmap *map = container_of(file->private_data,
  438. struct regmap, cache_bypass);
  439. ssize_t result;
  440. bool was_enabled;
  441. map->lock(map->lock_arg);
  442. was_enabled = map->cache_bypass;
  443. result = debugfs_write_file_bool(file, user_buf, count, ppos);
  444. if (result < 0)
  445. goto out;
  446. if (map->cache_bypass && !was_enabled) {
  447. dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
  448. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  449. } else if (!map->cache_bypass && was_enabled) {
  450. dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
  451. }
  452. out:
  453. map->unlock(map->lock_arg);
  454. return result;
  455. }
  456. static const struct file_operations regmap_cache_bypass_fops = {
  457. .open = simple_open,
  458. .read = debugfs_read_file_bool,
  459. .write = regmap_cache_bypass_write_file,
  460. };
  461. void regmap_debugfs_init(struct regmap *map, const char *name)
  462. {
  463. struct rb_node *next;
  464. struct regmap_range_node *range_node;
  465. const char *devname = "dummy";
  466. /* If we don't have the debugfs root yet, postpone init */
  467. if (!regmap_debugfs_root) {
  468. struct regmap_debugfs_node *node;
  469. node = kzalloc(sizeof(*node), GFP_KERNEL);
  470. if (!node)
  471. return;
  472. node->map = map;
  473. node->name = name;
  474. mutex_lock(&regmap_debugfs_early_lock);
  475. list_add(&node->link, &regmap_debugfs_early_list);
  476. mutex_unlock(&regmap_debugfs_early_lock);
  477. return;
  478. }
  479. INIT_LIST_HEAD(&map->debugfs_off_cache);
  480. mutex_init(&map->cache_lock);
  481. if (map->dev)
  482. devname = dev_name(map->dev);
  483. if (name) {
  484. map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
  485. devname, name);
  486. name = map->debugfs_name;
  487. } else {
  488. name = devname;
  489. }
  490. map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
  491. if (!map->debugfs) {
  492. dev_warn(map->dev, "Failed to create debugfs directory\n");
  493. return;
  494. }
  495. debugfs_create_file("name", 0400, map->debugfs,
  496. map, &regmap_name_fops);
  497. debugfs_create_file("range", 0400, map->debugfs,
  498. map, &regmap_reg_ranges_fops);
  499. if (map->max_register || regmap_readable(map, 0)) {
  500. umode_t registers_mode;
  501. #if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
  502. registers_mode = 0600;
  503. #else
  504. registers_mode = 0400;
  505. #endif
  506. debugfs_create_file("registers", registers_mode, map->debugfs,
  507. map, &regmap_map_fops);
  508. debugfs_create_file("access", 0400, map->debugfs,
  509. map, &regmap_access_fops);
  510. }
  511. if (map->cache_type) {
  512. debugfs_create_file("cache_only", 0600, map->debugfs,
  513. &map->cache_only, &regmap_cache_only_fops);
  514. debugfs_create_bool("cache_dirty", 0400, map->debugfs,
  515. &map->cache_dirty);
  516. debugfs_create_file("cache_bypass", 0600, map->debugfs,
  517. &map->cache_bypass,
  518. &regmap_cache_bypass_fops);
  519. }
  520. next = rb_first(&map->range_tree);
  521. while (next) {
  522. range_node = rb_entry(next, struct regmap_range_node, node);
  523. if (range_node->name)
  524. debugfs_create_file(range_node->name, 0400,
  525. map->debugfs, range_node,
  526. &regmap_range_fops);
  527. next = rb_next(&range_node->node);
  528. }
  529. if (map->cache_ops && map->cache_ops->debugfs_init)
  530. map->cache_ops->debugfs_init(map);
  531. }
  532. void regmap_debugfs_exit(struct regmap *map)
  533. {
  534. if (map->debugfs) {
  535. debugfs_remove_recursive(map->debugfs);
  536. mutex_lock(&map->cache_lock);
  537. regmap_debugfs_free_dump_cache(map);
  538. mutex_unlock(&map->cache_lock);
  539. kfree(map->debugfs_name);
  540. } else {
  541. struct regmap_debugfs_node *node, *tmp;
  542. mutex_lock(&regmap_debugfs_early_lock);
  543. list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
  544. link) {
  545. if (node->map == map) {
  546. list_del(&node->link);
  547. kfree(node);
  548. }
  549. }
  550. mutex_unlock(&regmap_debugfs_early_lock);
  551. }
  552. }
  553. void regmap_debugfs_initcall(void)
  554. {
  555. struct regmap_debugfs_node *node, *tmp;
  556. regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
  557. if (!regmap_debugfs_root) {
  558. pr_warn("regmap: Failed to create debugfs root\n");
  559. return;
  560. }
  561. mutex_lock(&regmap_debugfs_early_lock);
  562. list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
  563. regmap_debugfs_init(node->map, node->name);
  564. list_del(&node->link);
  565. kfree(node);
  566. }
  567. mutex_unlock(&regmap_debugfs_early_lock);
  568. }