sram.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /*
  2. * Generic on-chip SRAM allocation driver
  3. *
  4. * Copyright (C) 2012 Philipp Zabel, Pengutronix
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
  18. * MA 02110-1301, USA.
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/init.h>
  22. #include <linux/clk.h>
  23. #include <linux/err.h>
  24. #include <linux/io.h>
  25. #include <linux/of.h>
  26. #include <linux/of_address.h>
  27. #include <linux/list.h>
  28. #include <linux/list_sort.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/slab.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/genalloc.h>
  33. #define SRAM_GRANULARITY 32
  34. struct sram_dev {
  35. struct gen_pool *pool;
  36. struct clk *clk;
  37. };
  38. struct sram_reserve {
  39. struct list_head list;
  40. u32 start;
  41. u32 size;
  42. };
  43. static int sram_reserve_cmp(void *priv, struct list_head *a,
  44. struct list_head *b)
  45. {
  46. struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
  47. struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
  48. return ra->start - rb->start;
  49. }
  50. static int sram_probe(struct platform_device *pdev)
  51. {
  52. void __iomem *virt_base;
  53. struct sram_dev *sram;
  54. struct resource *res;
  55. struct device_node *np = pdev->dev.of_node, *child;
  56. unsigned long size, cur_start, cur_size;
  57. struct sram_reserve *rblocks, *block;
  58. struct list_head reserve_list;
  59. unsigned int nblocks;
  60. int ret;
  61. INIT_LIST_HEAD(&reserve_list);
  62. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  63. if (!res) {
  64. dev_err(&pdev->dev, "found no memory resource\n");
  65. return -EINVAL;
  66. }
  67. size = resource_size(res);
  68. if (!devm_request_mem_region(&pdev->dev,
  69. res->start, size, pdev->name)) {
  70. dev_err(&pdev->dev, "could not request region for resource\n");
  71. return -EBUSY;
  72. }
  73. virt_base = devm_ioremap_wc(&pdev->dev, res->start, size);
  74. if (IS_ERR(virt_base))
  75. return PTR_ERR(virt_base);
  76. sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
  77. if (!sram)
  78. return -ENOMEM;
  79. sram->clk = devm_clk_get(&pdev->dev, NULL);
  80. if (IS_ERR(sram->clk))
  81. sram->clk = NULL;
  82. else
  83. clk_prepare_enable(sram->clk);
  84. sram->pool = devm_gen_pool_create(&pdev->dev, ilog2(SRAM_GRANULARITY), -1);
  85. if (!sram->pool)
  86. return -ENOMEM;
  87. /*
  88. * We need an additional block to mark the end of the memory region
  89. * after the reserved blocks from the dt are processed.
  90. */
  91. nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
  92. rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
  93. if (!rblocks) {
  94. ret = -ENOMEM;
  95. goto err_alloc;
  96. }
  97. block = &rblocks[0];
  98. for_each_available_child_of_node(np, child) {
  99. struct resource child_res;
  100. ret = of_address_to_resource(child, 0, &child_res);
  101. if (ret < 0) {
  102. dev_err(&pdev->dev,
  103. "could not get address for node %s\n",
  104. child->full_name);
  105. goto err_chunks;
  106. }
  107. if (child_res.start < res->start || child_res.end > res->end) {
  108. dev_err(&pdev->dev,
  109. "reserved block %s outside the sram area\n",
  110. child->full_name);
  111. ret = -EINVAL;
  112. goto err_chunks;
  113. }
  114. block->start = child_res.start - res->start;
  115. block->size = resource_size(&child_res);
  116. list_add_tail(&block->list, &reserve_list);
  117. dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n",
  118. block->start,
  119. block->start + block->size);
  120. block++;
  121. }
  122. /* the last chunk marks the end of the region */
  123. rblocks[nblocks - 1].start = size;
  124. rblocks[nblocks - 1].size = 0;
  125. list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
  126. list_sort(NULL, &reserve_list, sram_reserve_cmp);
  127. cur_start = 0;
  128. list_for_each_entry(block, &reserve_list, list) {
  129. /* can only happen if sections overlap */
  130. if (block->start < cur_start) {
  131. dev_err(&pdev->dev,
  132. "block at 0x%x starts after current offset 0x%lx\n",
  133. block->start, cur_start);
  134. ret = -EINVAL;
  135. goto err_chunks;
  136. }
  137. /* current start is in a reserved block, so continue after it */
  138. if (block->start == cur_start) {
  139. cur_start = block->start + block->size;
  140. continue;
  141. }
  142. /*
  143. * allocate the space between the current starting
  144. * address and the following reserved block, or the
  145. * end of the region.
  146. */
  147. cur_size = block->start - cur_start;
  148. dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n",
  149. cur_start, cur_start + cur_size);
  150. ret = gen_pool_add_virt(sram->pool,
  151. (unsigned long)virt_base + cur_start,
  152. res->start + cur_start, cur_size, -1);
  153. if (ret < 0)
  154. goto err_chunks;
  155. /* next allocation after this reserved block */
  156. cur_start = block->start + block->size;
  157. }
  158. kfree(rblocks);
  159. platform_set_drvdata(pdev, sram);
  160. dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base);
  161. return 0;
  162. err_chunks:
  163. kfree(rblocks);
  164. err_alloc:
  165. if (sram->clk)
  166. clk_disable_unprepare(sram->clk);
  167. return ret;
  168. }
  169. static int sram_remove(struct platform_device *pdev)
  170. {
  171. struct sram_dev *sram = platform_get_drvdata(pdev);
  172. if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
  173. dev_dbg(&pdev->dev, "removed while SRAM allocated\n");
  174. if (sram->clk)
  175. clk_disable_unprepare(sram->clk);
  176. return 0;
  177. }
  178. #ifdef CONFIG_OF
  179. static const struct of_device_id sram_dt_ids[] = {
  180. { .compatible = "mmio-sram" },
  181. {}
  182. };
  183. #endif
  184. static struct platform_driver sram_driver = {
  185. .driver = {
  186. .name = "sram",
  187. .of_match_table = of_match_ptr(sram_dt_ids),
  188. },
  189. .probe = sram_probe,
  190. .remove = sram_remove,
  191. };
  192. static int __init sram_init(void)
  193. {
  194. return platform_driver_register(&sram_driver);
  195. }
  196. postcore_initcall(sram_init);