qe_common.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * Common CPM code
  3. *
  4. * Author: Scott Wood <scottwood@freescale.com>
  5. *
  6. * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
  7. *
  8. * Some parts derived from commproc.c/cpm2_common.c, which is:
  9. * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
  10. * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
  11. * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
  12. * 2006 (c) MontaVista Software, Inc.
  13. * Vitaly Bordug <vbordug@ru.mvista.com>
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of version 2 of the GNU General Public License as
  17. * published by the Free Software Foundation.
  18. */
  19. #include <linux/genalloc.h>
  20. #include <linux/init.h>
  21. #include <linux/list.h>
  22. #include <linux/of_device.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/export.h>
  25. #include <linux/of.h>
  26. #include <linux/of_address.h>
  27. #include <linux/slab.h>
  28. #include <linux/io.h>
  29. #include <soc/fsl/qe/qe.h>
  30. static struct gen_pool *muram_pool;
  31. static spinlock_t cpm_muram_lock;
  32. static u8 __iomem *muram_vbase;
  33. static phys_addr_t muram_pbase;
  34. struct muram_block {
  35. struct list_head head;
  36. unsigned long start;
  37. int size;
  38. };
  39. static LIST_HEAD(muram_block_list);
  40. /* max address size we deal with */
  41. #define OF_MAX_ADDR_CELLS 4
  42. #define GENPOOL_OFFSET (4096 * 8)
  43. int cpm_muram_init(void)
  44. {
  45. struct device_node *np;
  46. struct resource r;
  47. u32 zero[OF_MAX_ADDR_CELLS] = {};
  48. resource_size_t max = 0;
  49. int i = 0;
  50. int ret = 0;
  51. if (muram_pbase)
  52. return 0;
  53. spin_lock_init(&cpm_muram_lock);
  54. np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
  55. if (!np) {
  56. /* try legacy bindings */
  57. np = of_find_node_by_name(NULL, "data-only");
  58. if (!np) {
  59. pr_err("Cannot find CPM muram data node");
  60. ret = -ENODEV;
  61. goto out_muram;
  62. }
  63. }
  64. muram_pool = gen_pool_create(0, -1);
  65. if (!muram_pool) {
  66. pr_err("Cannot allocate memory pool for CPM/QE muram");
  67. ret = -ENOMEM;
  68. goto out_muram;
  69. }
  70. muram_pbase = of_translate_address(np, zero);
  71. if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
  72. pr_err("Cannot translate zero through CPM muram node");
  73. ret = -ENODEV;
  74. goto out_pool;
  75. }
  76. while (of_address_to_resource(np, i++, &r) == 0) {
  77. if (r.end > max)
  78. max = r.end;
  79. ret = gen_pool_add(muram_pool, r.start - muram_pbase +
  80. GENPOOL_OFFSET, resource_size(&r), -1);
  81. if (ret) {
  82. pr_err("QE: couldn't add muram to pool!\n");
  83. goto out_pool;
  84. }
  85. }
  86. muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
  87. if (!muram_vbase) {
  88. pr_err("Cannot map QE muram");
  89. ret = -ENOMEM;
  90. goto out_pool;
  91. }
  92. goto out_muram;
  93. out_pool:
  94. gen_pool_destroy(muram_pool);
  95. out_muram:
  96. of_node_put(np);
  97. return ret;
  98. }
  99. /*
  100. * cpm_muram_alloc_common - cpm_muram_alloc common code
  101. * @size: number of bytes to allocate
  102. * @algo: algorithm for alloc.
  103. * @data: data for genalloc's algorithm.
  104. *
  105. * This function returns an offset into the muram area.
  106. */
  107. static unsigned long cpm_muram_alloc_common(unsigned long size,
  108. genpool_algo_t algo, void *data)
  109. {
  110. struct muram_block *entry;
  111. unsigned long start;
  112. if (!muram_pool && cpm_muram_init())
  113. goto out2;
  114. start = gen_pool_alloc_algo(muram_pool, size, algo, data);
  115. if (!start)
  116. goto out2;
  117. start = start - GENPOOL_OFFSET;
  118. memset_io(cpm_muram_addr(start), 0, size);
  119. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  120. if (!entry)
  121. goto out1;
  122. entry->start = start;
  123. entry->size = size;
  124. list_add(&entry->head, &muram_block_list);
  125. return start;
  126. out1:
  127. gen_pool_free(muram_pool, start, size);
  128. out2:
  129. return (unsigned long)-ENOMEM;
  130. }
  131. /*
  132. * cpm_muram_alloc - allocate the requested size worth of multi-user ram
  133. * @size: number of bytes to allocate
  134. * @align: requested alignment, in bytes
  135. *
  136. * This function returns an offset into the muram area.
  137. * Use cpm_dpram_addr() to get the virtual address of the area.
  138. * Use cpm_muram_free() to free the allocation.
  139. */
  140. unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
  141. {
  142. unsigned long start;
  143. unsigned long flags;
  144. struct genpool_data_align muram_pool_data;
  145. spin_lock_irqsave(&cpm_muram_lock, flags);
  146. muram_pool_data.align = align;
  147. start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
  148. &muram_pool_data);
  149. spin_unlock_irqrestore(&cpm_muram_lock, flags);
  150. return start;
  151. }
  152. EXPORT_SYMBOL(cpm_muram_alloc);
  153. /**
  154. * cpm_muram_free - free a chunk of multi-user ram
  155. * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
  156. */
  157. int cpm_muram_free(unsigned long offset)
  158. {
  159. unsigned long flags;
  160. int size;
  161. struct muram_block *tmp;
  162. size = 0;
  163. spin_lock_irqsave(&cpm_muram_lock, flags);
  164. list_for_each_entry(tmp, &muram_block_list, head) {
  165. if (tmp->start == offset) {
  166. size = tmp->size;
  167. list_del(&tmp->head);
  168. kfree(tmp);
  169. break;
  170. }
  171. }
  172. gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
  173. spin_unlock_irqrestore(&cpm_muram_lock, flags);
  174. return size;
  175. }
  176. EXPORT_SYMBOL(cpm_muram_free);
  177. /*
  178. * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
  179. * @offset: offset of allocation start address
  180. * @size: number of bytes to allocate
  181. * This function returns an offset into the muram area
  182. * Use cpm_dpram_addr() to get the virtual address of the area.
  183. * Use cpm_muram_free() to free the allocation.
  184. */
  185. unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
  186. {
  187. unsigned long start;
  188. unsigned long flags;
  189. struct genpool_data_fixed muram_pool_data_fixed;
  190. spin_lock_irqsave(&cpm_muram_lock, flags);
  191. muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
  192. start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
  193. &muram_pool_data_fixed);
  194. spin_unlock_irqrestore(&cpm_muram_lock, flags);
  195. return start;
  196. }
  197. EXPORT_SYMBOL(cpm_muram_alloc_fixed);
  198. /**
  199. * cpm_muram_addr - turn a muram offset into a virtual address
  200. * @offset: muram offset to convert
  201. */
  202. void __iomem *cpm_muram_addr(unsigned long offset)
  203. {
  204. return muram_vbase + offset;
  205. }
  206. EXPORT_SYMBOL(cpm_muram_addr);
  207. unsigned long cpm_muram_offset(void __iomem *addr)
  208. {
  209. return addr - (void __iomem *)muram_vbase;
  210. }
  211. EXPORT_SYMBOL(cpm_muram_offset);
  212. /**
  213. * cpm_muram_dma - turn a muram virtual address into a DMA address
  214. * @offset: virtual address from cpm_muram_addr() to convert
  215. */
  216. dma_addr_t cpm_muram_dma(void __iomem *addr)
  217. {
  218. return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
  219. }
  220. EXPORT_SYMBOL(cpm_muram_dma);