sram-alloc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. /*
  2. * SRAM allocator for Blackfin on-chip memory
  3. *
  4. * Copyright 2004-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/types.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/ioport.h>
  13. #include <linux/fcntl.h>
  14. #include <linux/init.h>
  15. #include <linux/poll.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/rtc.h>
  20. #include <linux/slab.h>
  21. #include <linux/mm_types.h>
  22. #include <asm/blackfin.h>
  23. #include <asm/mem_map.h>
  24. #include "blackfin_sram.h"
  25. /* the data structure for L1 scratchpad and DATA SRAM */
  26. struct sram_piece {
  27. void *paddr;
  28. int size;
  29. pid_t pid;
  30. struct sram_piece *next;
  31. };
  32. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
  33. static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
  34. static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
  35. #if L1_DATA_A_LENGTH != 0
  36. static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
  37. static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
  38. #endif
  39. #if L1_DATA_B_LENGTH != 0
  40. static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
  41. static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
  42. #endif
  43. #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
  44. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
  45. #endif
  46. #if L1_CODE_LENGTH != 0
  47. static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
  48. static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
  49. static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
  50. #endif
  51. #if L2_LENGTH != 0
  52. static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
  53. static struct sram_piece free_l2_sram_head, used_l2_sram_head;
  54. #endif
  55. static struct kmem_cache *sram_piece_cache;
  56. /* L1 Scratchpad SRAM initialization function */
  57. static void __init l1sram_init(void)
  58. {
  59. unsigned int cpu;
  60. unsigned long reserve;
  61. #ifdef CONFIG_SMP
  62. reserve = 0;
  63. #else
  64. reserve = sizeof(struct l1_scratch_task_info);
  65. #endif
  66. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  67. per_cpu(free_l1_ssram_head, cpu).next =
  68. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  69. if (!per_cpu(free_l1_ssram_head, cpu).next) {
  70. printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
  71. return;
  72. }
  73. per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
  74. per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
  75. per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
  76. per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
  77. per_cpu(used_l1_ssram_head, cpu).next = NULL;
  78. /* mutex initialize */
  79. spin_lock_init(&per_cpu(l1sram_lock, cpu));
  80. printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
  81. L1_SCRATCH_LENGTH >> 10);
  82. }
  83. }
  84. static void __init l1_data_sram_init(void)
  85. {
  86. #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
  87. unsigned int cpu;
  88. #endif
  89. #if L1_DATA_A_LENGTH != 0
  90. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  91. per_cpu(free_l1_data_A_sram_head, cpu).next =
  92. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  93. if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
  94. printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
  95. return;
  96. }
  97. per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
  98. (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
  99. per_cpu(free_l1_data_A_sram_head, cpu).next->size =
  100. L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
  101. per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
  102. per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
  103. per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
  104. printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
  105. L1_DATA_A_LENGTH >> 10,
  106. per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
  107. }
  108. #endif
  109. #if L1_DATA_B_LENGTH != 0
  110. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  111. per_cpu(free_l1_data_B_sram_head, cpu).next =
  112. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  113. if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
  114. printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
  115. return;
  116. }
  117. per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
  118. (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
  119. per_cpu(free_l1_data_B_sram_head, cpu).next->size =
  120. L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
  121. per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
  122. per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
  123. per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
  124. printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
  125. L1_DATA_B_LENGTH >> 10,
  126. per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
  127. /* mutex initialize */
  128. }
  129. #endif
  130. #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
  131. for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
  132. spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
  133. #endif
  134. }
  135. static void __init l1_inst_sram_init(void)
  136. {
  137. #if L1_CODE_LENGTH != 0
  138. unsigned int cpu;
  139. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  140. per_cpu(free_l1_inst_sram_head, cpu).next =
  141. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  142. if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
  143. printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
  144. return;
  145. }
  146. per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
  147. (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
  148. per_cpu(free_l1_inst_sram_head, cpu).next->size =
  149. L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
  150. per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
  151. per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
  152. per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
  153. printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
  154. L1_CODE_LENGTH >> 10,
  155. per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
  156. /* mutex initialize */
  157. spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
  158. }
  159. #endif
  160. }
  161. #ifdef __ADSPBF60x__
  162. static irqreturn_t l2_ecc_err(int irq, void *dev_id)
  163. {
  164. int status;
  165. printk(KERN_ERR "L2 ecc error happened\n");
  166. status = bfin_read32(L2CTL0_STAT);
  167. if (status & 0x1)
  168. printk(KERN_ERR "Core channel error type:0x%x, addr:0x%x\n",
  169. bfin_read32(L2CTL0_ET0), bfin_read32(L2CTL0_EADDR0));
  170. if (status & 0x2)
  171. printk(KERN_ERR "System channel error type:0x%x, addr:0x%x\n",
  172. bfin_read32(L2CTL0_ET1), bfin_read32(L2CTL0_EADDR1));
  173. status = status >> 8;
  174. if (status)
  175. printk(KERN_ERR "L2 Bank%d error, addr:0x%x\n",
  176. status, bfin_read32(L2CTL0_ERRADDR0 + status));
  177. panic("L2 Ecc error");
  178. return IRQ_HANDLED;
  179. }
  180. #endif
  181. static void __init l2_sram_init(void)
  182. {
  183. #if L2_LENGTH != 0
  184. #ifdef __ADSPBF60x__
  185. int ret;
  186. ret = request_irq(IRQ_L2CTL0_ECC_ERR, l2_ecc_err, 0, "l2-ecc-err",
  187. NULL);
  188. if (unlikely(ret < 0)) {
  189. printk(KERN_INFO "Fail to request l2 ecc error interrupt");
  190. return;
  191. }
  192. #endif
  193. free_l2_sram_head.next =
  194. kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
  195. if (!free_l2_sram_head.next) {
  196. printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
  197. return;
  198. }
  199. free_l2_sram_head.next->paddr =
  200. (void *)L2_START + (_ebss_l2 - _stext_l2);
  201. free_l2_sram_head.next->size =
  202. L2_LENGTH - (_ebss_l2 - _stext_l2);
  203. free_l2_sram_head.next->pid = 0;
  204. free_l2_sram_head.next->next = NULL;
  205. used_l2_sram_head.next = NULL;
  206. printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
  207. L2_LENGTH >> 10,
  208. free_l2_sram_head.next->size >> 10);
  209. /* mutex initialize */
  210. spin_lock_init(&l2_sram_lock);
  211. #endif
  212. }
  213. static int __init bfin_sram_init(void)
  214. {
  215. sram_piece_cache = kmem_cache_create("sram_piece_cache",
  216. sizeof(struct sram_piece),
  217. 0, SLAB_PANIC, NULL);
  218. l1sram_init();
  219. l1_data_sram_init();
  220. l1_inst_sram_init();
  221. l2_sram_init();
  222. return 0;
  223. }
  224. pure_initcall(bfin_sram_init);
  225. /* SRAM allocate function */
  226. static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
  227. struct sram_piece *pused_head)
  228. {
  229. struct sram_piece *pslot, *plast, *pavail;
  230. if (size <= 0 || !pfree_head || !pused_head)
  231. return NULL;
  232. /* Align the size */
  233. size = (size + 3) & ~3;
  234. pslot = pfree_head->next;
  235. plast = pfree_head;
  236. /* search an available piece slot */
  237. while (pslot != NULL && size > pslot->size) {
  238. plast = pslot;
  239. pslot = pslot->next;
  240. }
  241. if (!pslot)
  242. return NULL;
  243. if (pslot->size == size) {
  244. plast->next = pslot->next;
  245. pavail = pslot;
  246. } else {
  247. /* use atomic so our L1 allocator can be used atomically */
  248. pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC);
  249. if (!pavail)
  250. return NULL;
  251. pavail->paddr = pslot->paddr;
  252. pavail->size = size;
  253. pslot->paddr += size;
  254. pslot->size -= size;
  255. }
  256. pavail->pid = current->pid;
  257. pslot = pused_head->next;
  258. plast = pused_head;
  259. /* insert new piece into used piece list !!! */
  260. while (pslot != NULL && pavail->paddr < pslot->paddr) {
  261. plast = pslot;
  262. pslot = pslot->next;
  263. }
  264. pavail->next = pslot;
  265. plast->next = pavail;
  266. return pavail->paddr;
  267. }
  268. /* Allocate the largest available block. */
  269. static void *_sram_alloc_max(struct sram_piece *pfree_head,
  270. struct sram_piece *pused_head,
  271. unsigned long *psize)
  272. {
  273. struct sram_piece *pslot, *pmax;
  274. if (!pfree_head || !pused_head)
  275. return NULL;
  276. pmax = pslot = pfree_head->next;
  277. /* search an available piece slot */
  278. while (pslot != NULL) {
  279. if (pslot->size > pmax->size)
  280. pmax = pslot;
  281. pslot = pslot->next;
  282. }
  283. if (!pmax)
  284. return NULL;
  285. *psize = pmax->size;
  286. return _sram_alloc(*psize, pfree_head, pused_head);
  287. }
  288. /* SRAM free function */
  289. static int _sram_free(const void *addr,
  290. struct sram_piece *pfree_head,
  291. struct sram_piece *pused_head)
  292. {
  293. struct sram_piece *pslot, *plast, *pavail;
  294. if (!pfree_head || !pused_head)
  295. return -1;
  296. /* search the relevant memory slot */
  297. pslot = pused_head->next;
  298. plast = pused_head;
  299. /* search an available piece slot */
  300. while (pslot != NULL && pslot->paddr != addr) {
  301. plast = pslot;
  302. pslot = pslot->next;
  303. }
  304. if (!pslot)
  305. return -1;
  306. plast->next = pslot->next;
  307. pavail = pslot;
  308. pavail->pid = 0;
  309. /* insert free pieces back to the free list */
  310. pslot = pfree_head->next;
  311. plast = pfree_head;
  312. while (pslot != NULL && addr > pslot->paddr) {
  313. plast = pslot;
  314. pslot = pslot->next;
  315. }
  316. if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
  317. plast->size += pavail->size;
  318. kmem_cache_free(sram_piece_cache, pavail);
  319. } else {
  320. pavail->next = plast->next;
  321. plast->next = pavail;
  322. plast = pavail;
  323. }
  324. if (pslot && plast->paddr + plast->size == pslot->paddr) {
  325. plast->size += pslot->size;
  326. plast->next = pslot->next;
  327. kmem_cache_free(sram_piece_cache, pslot);
  328. }
  329. return 0;
  330. }
  331. int sram_free(const void *addr)
  332. {
  333. #if L1_CODE_LENGTH != 0
  334. if (addr >= (void *)get_l1_code_start()
  335. && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
  336. return l1_inst_sram_free(addr);
  337. else
  338. #endif
  339. #if L1_DATA_A_LENGTH != 0
  340. if (addr >= (void *)get_l1_data_a_start()
  341. && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
  342. return l1_data_A_sram_free(addr);
  343. else
  344. #endif
  345. #if L1_DATA_B_LENGTH != 0
  346. if (addr >= (void *)get_l1_data_b_start()
  347. && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
  348. return l1_data_B_sram_free(addr);
  349. else
  350. #endif
  351. #if L2_LENGTH != 0
  352. if (addr >= (void *)L2_START
  353. && addr < (void *)(L2_START + L2_LENGTH))
  354. return l2_sram_free(addr);
  355. else
  356. #endif
  357. return -1;
  358. }
  359. EXPORT_SYMBOL(sram_free);
  360. void *l1_data_A_sram_alloc(size_t size)
  361. {
  362. #if L1_DATA_A_LENGTH != 0
  363. unsigned long flags;
  364. void *addr;
  365. unsigned int cpu;
  366. cpu = smp_processor_id();
  367. /* add mutex operation */
  368. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  369. addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
  370. &per_cpu(used_l1_data_A_sram_head, cpu));
  371. /* add mutex operation */
  372. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  373. pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
  374. (long unsigned int)addr, size);
  375. return addr;
  376. #else
  377. return NULL;
  378. #endif
  379. }
  380. EXPORT_SYMBOL(l1_data_A_sram_alloc);
  381. int l1_data_A_sram_free(const void *addr)
  382. {
  383. #if L1_DATA_A_LENGTH != 0
  384. unsigned long flags;
  385. int ret;
  386. unsigned int cpu;
  387. cpu = smp_processor_id();
  388. /* add mutex operation */
  389. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  390. ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
  391. &per_cpu(used_l1_data_A_sram_head, cpu));
  392. /* add mutex operation */
  393. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  394. return ret;
  395. #else
  396. return -1;
  397. #endif
  398. }
  399. EXPORT_SYMBOL(l1_data_A_sram_free);
  400. void *l1_data_B_sram_alloc(size_t size)
  401. {
  402. #if L1_DATA_B_LENGTH != 0
  403. unsigned long flags;
  404. void *addr;
  405. unsigned int cpu;
  406. cpu = smp_processor_id();
  407. /* add mutex operation */
  408. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  409. addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
  410. &per_cpu(used_l1_data_B_sram_head, cpu));
  411. /* add mutex operation */
  412. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  413. pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
  414. (long unsigned int)addr, size);
  415. return addr;
  416. #else
  417. return NULL;
  418. #endif
  419. }
  420. EXPORT_SYMBOL(l1_data_B_sram_alloc);
  421. int l1_data_B_sram_free(const void *addr)
  422. {
  423. #if L1_DATA_B_LENGTH != 0
  424. unsigned long flags;
  425. int ret;
  426. unsigned int cpu;
  427. cpu = smp_processor_id();
  428. /* add mutex operation */
  429. spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
  430. ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
  431. &per_cpu(used_l1_data_B_sram_head, cpu));
  432. /* add mutex operation */
  433. spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
  434. return ret;
  435. #else
  436. return -1;
  437. #endif
  438. }
  439. EXPORT_SYMBOL(l1_data_B_sram_free);
  440. void *l1_data_sram_alloc(size_t size)
  441. {
  442. void *addr = l1_data_A_sram_alloc(size);
  443. if (!addr)
  444. addr = l1_data_B_sram_alloc(size);
  445. return addr;
  446. }
  447. EXPORT_SYMBOL(l1_data_sram_alloc);
  448. void *l1_data_sram_zalloc(size_t size)
  449. {
  450. void *addr = l1_data_sram_alloc(size);
  451. if (addr)
  452. memset(addr, 0x00, size);
  453. return addr;
  454. }
  455. EXPORT_SYMBOL(l1_data_sram_zalloc);
  456. int l1_data_sram_free(const void *addr)
  457. {
  458. int ret;
  459. ret = l1_data_A_sram_free(addr);
  460. if (ret == -1)
  461. ret = l1_data_B_sram_free(addr);
  462. return ret;
  463. }
  464. EXPORT_SYMBOL(l1_data_sram_free);
  465. void *l1_inst_sram_alloc(size_t size)
  466. {
  467. #if L1_CODE_LENGTH != 0
  468. unsigned long flags;
  469. void *addr;
  470. unsigned int cpu;
  471. cpu = smp_processor_id();
  472. /* add mutex operation */
  473. spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
  474. addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
  475. &per_cpu(used_l1_inst_sram_head, cpu));
  476. /* add mutex operation */
  477. spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
  478. pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
  479. (long unsigned int)addr, size);
  480. return addr;
  481. #else
  482. return NULL;
  483. #endif
  484. }
  485. EXPORT_SYMBOL(l1_inst_sram_alloc);
  486. int l1_inst_sram_free(const void *addr)
  487. {
  488. #if L1_CODE_LENGTH != 0
  489. unsigned long flags;
  490. int ret;
  491. unsigned int cpu;
  492. cpu = smp_processor_id();
  493. /* add mutex operation */
  494. spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
  495. ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
  496. &per_cpu(used_l1_inst_sram_head, cpu));
  497. /* add mutex operation */
  498. spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
  499. return ret;
  500. #else
  501. return -1;
  502. #endif
  503. }
  504. EXPORT_SYMBOL(l1_inst_sram_free);
  505. /* L1 Scratchpad memory allocate function */
  506. void *l1sram_alloc(size_t size)
  507. {
  508. unsigned long flags;
  509. void *addr;
  510. unsigned int cpu;
  511. cpu = smp_processor_id();
  512. /* add mutex operation */
  513. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  514. addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
  515. &per_cpu(used_l1_ssram_head, cpu));
  516. /* add mutex operation */
  517. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  518. return addr;
  519. }
  520. /* L1 Scratchpad memory allocate function */
  521. void *l1sram_alloc_max(size_t *psize)
  522. {
  523. unsigned long flags;
  524. void *addr;
  525. unsigned int cpu;
  526. cpu = smp_processor_id();
  527. /* add mutex operation */
  528. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  529. addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
  530. &per_cpu(used_l1_ssram_head, cpu), psize);
  531. /* add mutex operation */
  532. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  533. return addr;
  534. }
  535. /* L1 Scratchpad memory free function */
  536. int l1sram_free(const void *addr)
  537. {
  538. unsigned long flags;
  539. int ret;
  540. unsigned int cpu;
  541. cpu = smp_processor_id();
  542. /* add mutex operation */
  543. spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
  544. ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
  545. &per_cpu(used_l1_ssram_head, cpu));
  546. /* add mutex operation */
  547. spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
  548. return ret;
  549. }
  550. void *l2_sram_alloc(size_t size)
  551. {
  552. #if L2_LENGTH != 0
  553. unsigned long flags;
  554. void *addr;
  555. /* add mutex operation */
  556. spin_lock_irqsave(&l2_sram_lock, flags);
  557. addr = _sram_alloc(size, &free_l2_sram_head,
  558. &used_l2_sram_head);
  559. /* add mutex operation */
  560. spin_unlock_irqrestore(&l2_sram_lock, flags);
  561. pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
  562. (long unsigned int)addr, size);
  563. return addr;
  564. #else
  565. return NULL;
  566. #endif
  567. }
  568. EXPORT_SYMBOL(l2_sram_alloc);
  569. void *l2_sram_zalloc(size_t size)
  570. {
  571. void *addr = l2_sram_alloc(size);
  572. if (addr)
  573. memset(addr, 0x00, size);
  574. return addr;
  575. }
  576. EXPORT_SYMBOL(l2_sram_zalloc);
  577. int l2_sram_free(const void *addr)
  578. {
  579. #if L2_LENGTH != 0
  580. unsigned long flags;
  581. int ret;
  582. /* add mutex operation */
  583. spin_lock_irqsave(&l2_sram_lock, flags);
  584. ret = _sram_free(addr, &free_l2_sram_head,
  585. &used_l2_sram_head);
  586. /* add mutex operation */
  587. spin_unlock_irqrestore(&l2_sram_lock, flags);
  588. return ret;
  589. #else
  590. return -1;
  591. #endif
  592. }
  593. EXPORT_SYMBOL(l2_sram_free);
  594. int sram_free_with_lsl(const void *addr)
  595. {
  596. struct sram_list_struct *lsl, **tmp;
  597. struct mm_struct *mm = current->mm;
  598. int ret = -1;
  599. for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
  600. if ((*tmp)->addr == addr) {
  601. lsl = *tmp;
  602. ret = sram_free(addr);
  603. *tmp = lsl->next;
  604. kfree(lsl);
  605. break;
  606. }
  607. return ret;
  608. }
  609. EXPORT_SYMBOL(sram_free_with_lsl);
  610. /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
  611. * tracked. These are designed for userspace so that when a process exits,
  612. * we can safely reap their resources.
  613. */
  614. void *sram_alloc_with_lsl(size_t size, unsigned long flags)
  615. {
  616. void *addr = NULL;
  617. struct sram_list_struct *lsl = NULL;
  618. struct mm_struct *mm = current->mm;
  619. lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
  620. if (!lsl)
  621. return NULL;
  622. if (flags & L1_INST_SRAM)
  623. addr = l1_inst_sram_alloc(size);
  624. if (addr == NULL && (flags & L1_DATA_A_SRAM))
  625. addr = l1_data_A_sram_alloc(size);
  626. if (addr == NULL && (flags & L1_DATA_B_SRAM))
  627. addr = l1_data_B_sram_alloc(size);
  628. if (addr == NULL && (flags & L2_SRAM))
  629. addr = l2_sram_alloc(size);
  630. if (addr == NULL) {
  631. kfree(lsl);
  632. return NULL;
  633. }
  634. lsl->addr = addr;
  635. lsl->length = size;
  636. lsl->next = mm->context.sram_list;
  637. mm->context.sram_list = lsl;
  638. return addr;
  639. }
  640. EXPORT_SYMBOL(sram_alloc_with_lsl);
  641. #ifdef CONFIG_PROC_FS
  642. /* Once we get a real allocator, we'll throw all of this away.
  643. * Until then, we need some sort of visibility into the L1 alloc.
  644. */
  645. /* Need to keep line of output the same. Currently, that is 44 bytes
  646. * (including newline).
  647. */
  648. static int _sram_proc_show(struct seq_file *m, const char *desc,
  649. struct sram_piece *pfree_head,
  650. struct sram_piece *pused_head)
  651. {
  652. struct sram_piece *pslot;
  653. if (!pfree_head || !pused_head)
  654. return -1;
  655. seq_printf(m, "--- SRAM %-14s Size PID State \n", desc);
  656. /* search the relevant memory slot */
  657. pslot = pused_head->next;
  658. while (pslot != NULL) {
  659. seq_printf(m, "%p-%p %10i %5i %-10s\n",
  660. pslot->paddr, pslot->paddr + pslot->size,
  661. pslot->size, pslot->pid, "ALLOCATED");
  662. pslot = pslot->next;
  663. }
  664. pslot = pfree_head->next;
  665. while (pslot != NULL) {
  666. seq_printf(m, "%p-%p %10i %5i %-10s\n",
  667. pslot->paddr, pslot->paddr + pslot->size,
  668. pslot->size, pslot->pid, "FREE");
  669. pslot = pslot->next;
  670. }
  671. return 0;
  672. }
  673. static int sram_proc_show(struct seq_file *m, void *v)
  674. {
  675. unsigned int cpu;
  676. for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
  677. if (_sram_proc_show(m, "Scratchpad",
  678. &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
  679. goto not_done;
  680. #if L1_DATA_A_LENGTH != 0
  681. if (_sram_proc_show(m, "L1 Data A",
  682. &per_cpu(free_l1_data_A_sram_head, cpu),
  683. &per_cpu(used_l1_data_A_sram_head, cpu)))
  684. goto not_done;
  685. #endif
  686. #if L1_DATA_B_LENGTH != 0
  687. if (_sram_proc_show(m, "L1 Data B",
  688. &per_cpu(free_l1_data_B_sram_head, cpu),
  689. &per_cpu(used_l1_data_B_sram_head, cpu)))
  690. goto not_done;
  691. #endif
  692. #if L1_CODE_LENGTH != 0
  693. if (_sram_proc_show(m, "L1 Instruction",
  694. &per_cpu(free_l1_inst_sram_head, cpu),
  695. &per_cpu(used_l1_inst_sram_head, cpu)))
  696. goto not_done;
  697. #endif
  698. }
  699. #if L2_LENGTH != 0
  700. if (_sram_proc_show(m, "L2", &free_l2_sram_head, &used_l2_sram_head))
  701. goto not_done;
  702. #endif
  703. not_done:
  704. return 0;
  705. }
  706. static int sram_proc_open(struct inode *inode, struct file *file)
  707. {
  708. return single_open(file, sram_proc_show, NULL);
  709. }
  710. static const struct file_operations sram_proc_ops = {
  711. .open = sram_proc_open,
  712. .read = seq_read,
  713. .llseek = seq_lseek,
  714. .release = single_release,
  715. };
  716. static int __init sram_proc_init(void)
  717. {
  718. struct proc_dir_entry *ptr;
  719. ptr = proc_create("sram", S_IRUGO, NULL, &sram_proc_ops);
  720. if (!ptr) {
  721. printk(KERN_WARNING "unable to create /proc/sram\n");
  722. return -1;
  723. }
  724. return 0;
  725. }
  726. late_initcall(sram_proc_init);
  727. #endif