regcache-rbtree.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. /*
  2. * Register cache access API - rbtree caching support
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/debugfs.h>
  13. #include <linux/device.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
  19. unsigned int value);
  20. static int regcache_rbtree_exit(struct regmap *map);
  21. struct regcache_rbtree_node {
  22. /* block of adjacent registers */
  23. void *block;
  24. /* Which registers are present */
  25. long *cache_present;
  26. /* base register handled by this block */
  27. unsigned int base_reg;
  28. /* number of registers available in the block */
  29. unsigned int blklen;
  30. /* the actual rbtree node holding this block */
  31. struct rb_node node;
  32. } __attribute__ ((packed));
  33. struct regcache_rbtree_ctx {
  34. struct rb_root root;
  35. struct regcache_rbtree_node *cached_rbnode;
  36. };
  37. static inline void regcache_rbtree_get_base_top_reg(
  38. struct regmap *map,
  39. struct regcache_rbtree_node *rbnode,
  40. unsigned int *base, unsigned int *top)
  41. {
  42. *base = rbnode->base_reg;
  43. *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
  44. }
  45. static unsigned int regcache_rbtree_get_register(struct regmap *map,
  46. struct regcache_rbtree_node *rbnode, unsigned int idx)
  47. {
  48. return regcache_get_val(map, rbnode->block, idx);
  49. }
  50. static void regcache_rbtree_set_register(struct regmap *map,
  51. struct regcache_rbtree_node *rbnode,
  52. unsigned int idx, unsigned int val)
  53. {
  54. set_bit(idx, rbnode->cache_present);
  55. regcache_set_val(map, rbnode->block, idx, val);
  56. }
  57. static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
  58. unsigned int reg)
  59. {
  60. struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
  61. struct rb_node *node;
  62. struct regcache_rbtree_node *rbnode;
  63. unsigned int base_reg, top_reg;
  64. rbnode = rbtree_ctx->cached_rbnode;
  65. if (rbnode) {
  66. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  67. &top_reg);
  68. if (reg >= base_reg && reg <= top_reg)
  69. return rbnode;
  70. }
  71. node = rbtree_ctx->root.rb_node;
  72. while (node) {
  73. rbnode = container_of(node, struct regcache_rbtree_node, node);
  74. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  75. &top_reg);
  76. if (reg >= base_reg && reg <= top_reg) {
  77. rbtree_ctx->cached_rbnode = rbnode;
  78. return rbnode;
  79. } else if (reg > top_reg) {
  80. node = node->rb_right;
  81. } else if (reg < base_reg) {
  82. node = node->rb_left;
  83. }
  84. }
  85. return NULL;
  86. }
  87. static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
  88. struct regcache_rbtree_node *rbnode)
  89. {
  90. struct rb_node **new, *parent;
  91. struct regcache_rbtree_node *rbnode_tmp;
  92. unsigned int base_reg_tmp, top_reg_tmp;
  93. unsigned int base_reg;
  94. parent = NULL;
  95. new = &root->rb_node;
  96. while (*new) {
  97. rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
  98. node);
  99. /* base and top registers of the current rbnode */
  100. regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
  101. &top_reg_tmp);
  102. /* base register of the rbnode to be added */
  103. base_reg = rbnode->base_reg;
  104. parent = *new;
  105. /* if this register has already been inserted, just return */
  106. if (base_reg >= base_reg_tmp &&
  107. base_reg <= top_reg_tmp)
  108. return 0;
  109. else if (base_reg > top_reg_tmp)
  110. new = &((*new)->rb_right);
  111. else if (base_reg < base_reg_tmp)
  112. new = &((*new)->rb_left);
  113. }
  114. /* insert the node into the rbtree */
  115. rb_link_node(&rbnode->node, parent, new);
  116. rb_insert_color(&rbnode->node, root);
  117. return 1;
  118. }
  119. #ifdef CONFIG_DEBUG_FS
  120. static int rbtree_show(struct seq_file *s, void *ignored)
  121. {
  122. struct regmap *map = s->private;
  123. struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
  124. struct regcache_rbtree_node *n;
  125. struct rb_node *node;
  126. unsigned int base, top;
  127. size_t mem_size;
  128. int nodes = 0;
  129. int registers = 0;
  130. int this_registers, average;
  131. map->lock(map->lock_arg);
  132. mem_size = sizeof(*rbtree_ctx);
  133. for (node = rb_first(&rbtree_ctx->root); node != NULL;
  134. node = rb_next(node)) {
  135. n = container_of(node, struct regcache_rbtree_node, node);
  136. mem_size += sizeof(*n);
  137. mem_size += (n->blklen * map->cache_word_size);
  138. mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
  139. regcache_rbtree_get_base_top_reg(map, n, &base, &top);
  140. this_registers = ((top - base) / map->reg_stride) + 1;
  141. seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
  142. nodes++;
  143. registers += this_registers;
  144. }
  145. if (nodes)
  146. average = registers / nodes;
  147. else
  148. average = 0;
  149. seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
  150. nodes, registers, average, mem_size);
  151. map->unlock(map->lock_arg);
  152. return 0;
  153. }
  154. static int rbtree_open(struct inode *inode, struct file *file)
  155. {
  156. return single_open(file, rbtree_show, inode->i_private);
  157. }
  158. static const struct file_operations rbtree_fops = {
  159. .open = rbtree_open,
  160. .read = seq_read,
  161. .llseek = seq_lseek,
  162. .release = single_release,
  163. };
  164. static void rbtree_debugfs_init(struct regmap *map)
  165. {
  166. debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
  167. }
  168. #endif
  169. static int regcache_rbtree_init(struct regmap *map)
  170. {
  171. struct regcache_rbtree_ctx *rbtree_ctx;
  172. int i;
  173. int ret;
  174. map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
  175. if (!map->cache)
  176. return -ENOMEM;
  177. rbtree_ctx = map->cache;
  178. rbtree_ctx->root = RB_ROOT;
  179. rbtree_ctx->cached_rbnode = NULL;
  180. for (i = 0; i < map->num_reg_defaults; i++) {
  181. ret = regcache_rbtree_write(map,
  182. map->reg_defaults[i].reg,
  183. map->reg_defaults[i].def);
  184. if (ret)
  185. goto err;
  186. }
  187. return 0;
  188. err:
  189. regcache_rbtree_exit(map);
  190. return ret;
  191. }
  192. static int regcache_rbtree_exit(struct regmap *map)
  193. {
  194. struct rb_node *next;
  195. struct regcache_rbtree_ctx *rbtree_ctx;
  196. struct regcache_rbtree_node *rbtree_node;
  197. /* if we've already been called then just return */
  198. rbtree_ctx = map->cache;
  199. if (!rbtree_ctx)
  200. return 0;
  201. /* free up the rbtree */
  202. next = rb_first(&rbtree_ctx->root);
  203. while (next) {
  204. rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
  205. next = rb_next(&rbtree_node->node);
  206. rb_erase(&rbtree_node->node, &rbtree_ctx->root);
  207. kfree(rbtree_node->cache_present);
  208. kfree(rbtree_node->block);
  209. kfree(rbtree_node);
  210. }
  211. /* release the resources */
  212. kfree(map->cache);
  213. map->cache = NULL;
  214. return 0;
  215. }
  216. static int regcache_rbtree_read(struct regmap *map,
  217. unsigned int reg, unsigned int *value)
  218. {
  219. struct regcache_rbtree_node *rbnode;
  220. unsigned int reg_tmp;
  221. rbnode = regcache_rbtree_lookup(map, reg);
  222. if (rbnode) {
  223. reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
  224. if (!test_bit(reg_tmp, rbnode->cache_present))
  225. return -ENOENT;
  226. *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
  227. } else {
  228. return -ENOENT;
  229. }
  230. return 0;
  231. }
  232. static int regcache_rbtree_insert_to_block(struct regmap *map,
  233. struct regcache_rbtree_node *rbnode,
  234. unsigned int base_reg,
  235. unsigned int top_reg,
  236. unsigned int reg,
  237. unsigned int value)
  238. {
  239. unsigned int blklen;
  240. unsigned int pos, offset;
  241. unsigned long *present;
  242. u8 *blk;
  243. blklen = (top_reg - base_reg) / map->reg_stride + 1;
  244. pos = (reg - base_reg) / map->reg_stride;
  245. offset = (rbnode->base_reg - base_reg) / map->reg_stride;
  246. blk = krealloc(rbnode->block,
  247. blklen * map->cache_word_size,
  248. GFP_KERNEL);
  249. if (!blk)
  250. return -ENOMEM;
  251. if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
  252. present = krealloc(rbnode->cache_present,
  253. BITS_TO_LONGS(blklen) * sizeof(*present),
  254. GFP_KERNEL);
  255. if (!present) {
  256. kfree(blk);
  257. return -ENOMEM;
  258. }
  259. memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
  260. (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
  261. * sizeof(*present));
  262. } else {
  263. present = rbnode->cache_present;
  264. }
  265. /* insert the register value in the correct place in the rbnode block */
  266. if (pos == 0) {
  267. memmove(blk + offset * map->cache_word_size,
  268. blk, rbnode->blklen * map->cache_word_size);
  269. bitmap_shift_left(present, present, offset, blklen);
  270. }
  271. /* update the rbnode block, its size and the base register */
  272. rbnode->block = blk;
  273. rbnode->blklen = blklen;
  274. rbnode->base_reg = base_reg;
  275. rbnode->cache_present = present;
  276. regcache_rbtree_set_register(map, rbnode, pos, value);
  277. return 0;
  278. }
  279. static struct regcache_rbtree_node *
  280. regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
  281. {
  282. struct regcache_rbtree_node *rbnode;
  283. const struct regmap_range *range;
  284. int i;
  285. rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
  286. if (!rbnode)
  287. return NULL;
  288. /* If there is a read table then use it to guess at an allocation */
  289. if (map->rd_table) {
  290. for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
  291. if (regmap_reg_in_range(reg,
  292. &map->rd_table->yes_ranges[i]))
  293. break;
  294. }
  295. if (i != map->rd_table->n_yes_ranges) {
  296. range = &map->rd_table->yes_ranges[i];
  297. rbnode->blklen = (range->range_max - range->range_min) /
  298. map->reg_stride + 1;
  299. rbnode->base_reg = range->range_min;
  300. }
  301. }
  302. if (!rbnode->blklen) {
  303. rbnode->blklen = 1;
  304. rbnode->base_reg = reg;
  305. }
  306. rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
  307. GFP_KERNEL);
  308. if (!rbnode->block)
  309. goto err_free;
  310. rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) *
  311. sizeof(*rbnode->cache_present), GFP_KERNEL);
  312. if (!rbnode->cache_present)
  313. goto err_free_block;
  314. return rbnode;
  315. err_free_block:
  316. kfree(rbnode->block);
  317. err_free:
  318. kfree(rbnode);
  319. return NULL;
  320. }
  321. static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
  322. unsigned int value)
  323. {
  324. struct regcache_rbtree_ctx *rbtree_ctx;
  325. struct regcache_rbtree_node *rbnode, *rbnode_tmp;
  326. struct rb_node *node;
  327. unsigned int reg_tmp;
  328. int ret;
  329. rbtree_ctx = map->cache;
  330. /* if we can't locate it in the cached rbnode we'll have
  331. * to traverse the rbtree looking for it.
  332. */
  333. rbnode = regcache_rbtree_lookup(map, reg);
  334. if (rbnode) {
  335. reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
  336. regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
  337. } else {
  338. unsigned int base_reg, top_reg;
  339. unsigned int new_base_reg, new_top_reg;
  340. unsigned int min, max;
  341. unsigned int max_dist;
  342. max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
  343. map->cache_word_size;
  344. if (reg < max_dist)
  345. min = 0;
  346. else
  347. min = reg - max_dist;
  348. max = reg + max_dist;
  349. /* look for an adjacent register to the one we are about to add */
  350. for (node = rb_first(&rbtree_ctx->root); node;
  351. node = rb_next(node)) {
  352. rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
  353. node);
  354. regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
  355. &base_reg, &top_reg);
  356. if (base_reg <= max && top_reg >= min) {
  357. new_base_reg = min(reg, base_reg);
  358. new_top_reg = max(reg, top_reg);
  359. } else {
  360. continue;
  361. }
  362. ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
  363. new_base_reg,
  364. new_top_reg, reg,
  365. value);
  366. if (ret)
  367. return ret;
  368. rbtree_ctx->cached_rbnode = rbnode_tmp;
  369. return 0;
  370. }
  371. /* We did not manage to find a place to insert it in
  372. * an existing block so create a new rbnode.
  373. */
  374. rbnode = regcache_rbtree_node_alloc(map, reg);
  375. if (!rbnode)
  376. return -ENOMEM;
  377. regcache_rbtree_set_register(map, rbnode,
  378. reg - rbnode->base_reg, value);
  379. regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
  380. rbtree_ctx->cached_rbnode = rbnode;
  381. }
  382. return 0;
  383. }
  384. static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
  385. unsigned int max)
  386. {
  387. struct regcache_rbtree_ctx *rbtree_ctx;
  388. struct rb_node *node;
  389. struct regcache_rbtree_node *rbnode;
  390. unsigned int base_reg, top_reg;
  391. unsigned int start, end;
  392. int ret;
  393. rbtree_ctx = map->cache;
  394. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  395. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  396. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  397. &top_reg);
  398. if (base_reg > max)
  399. break;
  400. if (top_reg < min)
  401. continue;
  402. if (min > base_reg)
  403. start = (min - base_reg) / map->reg_stride;
  404. else
  405. start = 0;
  406. if (max < top_reg)
  407. end = (max - base_reg) / map->reg_stride + 1;
  408. else
  409. end = rbnode->blklen;
  410. ret = regcache_sync_block(map, rbnode->block,
  411. rbnode->cache_present,
  412. rbnode->base_reg, start, end);
  413. if (ret != 0)
  414. return ret;
  415. }
  416. return regmap_async_complete(map);
  417. }
  418. static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
  419. unsigned int max)
  420. {
  421. struct regcache_rbtree_ctx *rbtree_ctx;
  422. struct regcache_rbtree_node *rbnode;
  423. struct rb_node *node;
  424. unsigned int base_reg, top_reg;
  425. unsigned int start, end;
  426. rbtree_ctx = map->cache;
  427. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  428. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  429. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  430. &top_reg);
  431. if (base_reg > max)
  432. break;
  433. if (top_reg < min)
  434. continue;
  435. if (min > base_reg)
  436. start = (min - base_reg) / map->reg_stride;
  437. else
  438. start = 0;
  439. if (max < top_reg)
  440. end = (max - base_reg) / map->reg_stride + 1;
  441. else
  442. end = rbnode->blklen;
  443. bitmap_clear(rbnode->cache_present, start, end - start);
  444. }
  445. return 0;
  446. }
  447. struct regcache_ops regcache_rbtree_ops = {
  448. .type = REGCACHE_RBTREE,
  449. .name = "rbtree",
  450. .init = regcache_rbtree_init,
  451. .exit = regcache_rbtree_exit,
  452. #ifdef CONFIG_DEBUG_FS
  453. .debugfs_init = rbtree_debugfs_init,
  454. #endif
  455. .read = regcache_rbtree_read,
  456. .write = regcache_rbtree_write,
  457. .sync = regcache_rbtree_sync,
  458. .drop = regcache_rbtree_drop,
  459. };