omap_dmm_tiler.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076
  1. /*
  2. * DMM IOMMU driver support functions for TI OMAP processors.
  3. *
  4. * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
  5. * Author: Rob Clark <rob@ti.com>
  6. * Andy Gross <andy.gross@ti.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License as
  10. * published by the Free Software Foundation version 2.
  11. *
  12. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  13. * kind, whether express or implied; without even the implied warranty
  14. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/completion.h>
  18. #include <linux/delay.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/errno.h>
  21. #include <linux/init.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/list.h>
  24. #include <linux/mm.h>
  25. #include <linux/module.h>
  26. #include <linux/platform_device.h> /* platform_device() */
  27. #include <linux/sched.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/slab.h>
  30. #include <linux/time.h>
  31. #include <linux/vmalloc.h>
  32. #include <linux/wait.h>
  33. #include "omap_dmm_tiler.h"
  34. #include "omap_dmm_priv.h"
  35. #define DMM_DRIVER_NAME "dmm"
  36. /* mappings for associating views to luts */
  37. static struct tcm *containers[TILFMT_NFORMATS];
  38. static struct dmm *omap_dmm;
  39. #if defined(CONFIG_OF)
  40. static const struct of_device_id dmm_of_match[];
  41. #endif
  42. /* global spinlock for protecting lists */
  43. static DEFINE_SPINLOCK(list_lock);
  44. /* Geometry table */
  45. #define GEOM(xshift, yshift, bytes_per_pixel) { \
  46. .x_shft = (xshift), \
  47. .y_shft = (yshift), \
  48. .cpp = (bytes_per_pixel), \
  49. .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
  50. .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
  51. }
  52. static const struct {
  53. uint32_t x_shft; /* unused X-bits (as part of bpp) */
  54. uint32_t y_shft; /* unused Y-bits (as part of bpp) */
  55. uint32_t cpp; /* bytes/chars per pixel */
  56. uint32_t slot_w; /* width of each slot (in pixels) */
  57. uint32_t slot_h; /* height of each slot (in pixels) */
  58. } geom[TILFMT_NFORMATS] = {
  59. [TILFMT_8BIT] = GEOM(0, 0, 1),
  60. [TILFMT_16BIT] = GEOM(0, 1, 2),
  61. [TILFMT_32BIT] = GEOM(1, 1, 4),
  62. [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
  63. };
  64. /* lookup table for registers w/ per-engine instances */
  65. static const uint32_t reg[][4] = {
  66. [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
  67. DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
  68. [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
  69. DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
  70. };
  71. static u32 dmm_read(struct dmm *dmm, u32 reg)
  72. {
  73. return readl(dmm->base + reg);
  74. }
  75. static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
  76. {
  77. writel(val, dmm->base + reg);
  78. }
  79. /* simple allocator to grab next 16 byte aligned memory from txn */
  80. static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
  81. {
  82. void *ptr;
  83. struct refill_engine *engine = txn->engine_handle;
  84. /* dmm programming requires 16 byte aligned addresses */
  85. txn->current_pa = round_up(txn->current_pa, 16);
  86. txn->current_va = (void *)round_up((long)txn->current_va, 16);
  87. ptr = txn->current_va;
  88. *pa = txn->current_pa;
  89. txn->current_pa += sz;
  90. txn->current_va += sz;
  91. BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
  92. return ptr;
  93. }
  94. /* check status and spin until wait_mask comes true */
  95. static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
  96. {
  97. struct dmm *dmm = engine->dmm;
  98. uint32_t r = 0, err, i;
  99. i = DMM_FIXED_RETRY_COUNT;
  100. while (true) {
  101. r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
  102. err = r & DMM_PATSTATUS_ERR;
  103. if (err) {
  104. dev_err(dmm->dev,
  105. "%s: error (engine%d). PAT_STATUS: 0x%08x\n",
  106. __func__, engine->id, r);
  107. return -EFAULT;
  108. }
  109. if ((r & wait_mask) == wait_mask)
  110. break;
  111. if (--i == 0) {
  112. dev_err(dmm->dev,
  113. "%s: timeout (engine%d). PAT_STATUS: 0x%08x\n",
  114. __func__, engine->id, r);
  115. return -ETIMEDOUT;
  116. }
  117. udelay(1);
  118. }
  119. return 0;
  120. }
  121. static void release_engine(struct refill_engine *engine)
  122. {
  123. unsigned long flags;
  124. spin_lock_irqsave(&list_lock, flags);
  125. list_add(&engine->idle_node, &omap_dmm->idle_head);
  126. spin_unlock_irqrestore(&list_lock, flags);
  127. atomic_inc(&omap_dmm->engine_counter);
  128. wake_up_interruptible(&omap_dmm->engine_queue);
  129. }
  130. static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
  131. {
  132. struct dmm *dmm = arg;
  133. uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
  134. int i;
  135. /* ack IRQ */
  136. dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
  137. for (i = 0; i < dmm->num_engines; i++) {
  138. if (status & DMM_IRQSTAT_ERR_MASK)
  139. dev_err(dmm->dev,
  140. "irq error(engine%d): IRQSTAT 0x%02x\n",
  141. i, status & 0xff);
  142. if (status & DMM_IRQSTAT_LST) {
  143. if (dmm->engines[i].async)
  144. release_engine(&dmm->engines[i]);
  145. complete(&dmm->engines[i].compl);
  146. }
  147. status >>= 8;
  148. }
  149. return IRQ_HANDLED;
  150. }
  151. /**
  152. * Get a handle for a DMM transaction
  153. */
  154. static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
  155. {
  156. struct dmm_txn *txn = NULL;
  157. struct refill_engine *engine = NULL;
  158. int ret;
  159. unsigned long flags;
  160. /* wait until an engine is available */
  161. ret = wait_event_interruptible(omap_dmm->engine_queue,
  162. atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
  163. if (ret)
  164. return ERR_PTR(ret);
  165. /* grab an idle engine */
  166. spin_lock_irqsave(&list_lock, flags);
  167. if (!list_empty(&dmm->idle_head)) {
  168. engine = list_entry(dmm->idle_head.next, struct refill_engine,
  169. idle_node);
  170. list_del(&engine->idle_node);
  171. }
  172. spin_unlock_irqrestore(&list_lock, flags);
  173. BUG_ON(!engine);
  174. txn = &engine->txn;
  175. engine->tcm = tcm;
  176. txn->engine_handle = engine;
  177. txn->last_pat = NULL;
  178. txn->current_va = engine->refill_va;
  179. txn->current_pa = engine->refill_pa;
  180. return txn;
  181. }
  182. /**
  183. * Add region to DMM transaction. If pages or pages[i] is NULL, then the
  184. * corresponding slot is cleared (ie. dummy_pa is programmed)
  185. */
  186. static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
  187. struct page **pages, uint32_t npages, uint32_t roll)
  188. {
  189. dma_addr_t pat_pa = 0, data_pa = 0;
  190. uint32_t *data;
  191. struct pat *pat;
  192. struct refill_engine *engine = txn->engine_handle;
  193. int columns = (1 + area->x1 - area->x0);
  194. int rows = (1 + area->y1 - area->y0);
  195. int i = columns*rows;
  196. pat = alloc_dma(txn, sizeof(*pat), &pat_pa);
  197. if (txn->last_pat)
  198. txn->last_pat->next_pa = (uint32_t)pat_pa;
  199. pat->area = *area;
  200. /* adjust Y coordinates based off of container parameters */
  201. pat->area.y0 += engine->tcm->y_offset;
  202. pat->area.y1 += engine->tcm->y_offset;
  203. pat->ctrl = (struct pat_ctrl){
  204. .start = 1,
  205. .lut_id = engine->tcm->lut_id,
  206. };
  207. data = alloc_dma(txn, 4*i, &data_pa);
  208. /* FIXME: what if data_pa is more than 32-bit ? */
  209. pat->data_pa = data_pa;
  210. while (i--) {
  211. int n = i + roll;
  212. if (n >= npages)
  213. n -= npages;
  214. data[i] = (pages && pages[n]) ?
  215. page_to_phys(pages[n]) : engine->dmm->dummy_pa;
  216. }
  217. txn->last_pat = pat;
  218. return;
  219. }
  220. /**
  221. * Commit the DMM transaction.
  222. */
  223. static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
  224. {
  225. int ret = 0;
  226. struct refill_engine *engine = txn->engine_handle;
  227. struct dmm *dmm = engine->dmm;
  228. if (!txn->last_pat) {
  229. dev_err(engine->dmm->dev, "need at least one txn\n");
  230. ret = -EINVAL;
  231. goto cleanup;
  232. }
  233. txn->last_pat->next_pa = 0;
  234. /* write to PAT_DESCR to clear out any pending transaction */
  235. dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
  236. /* wait for engine ready: */
  237. ret = wait_status(engine, DMM_PATSTATUS_READY);
  238. if (ret) {
  239. ret = -EFAULT;
  240. goto cleanup;
  241. }
  242. /* mark whether it is async to denote list management in IRQ handler */
  243. engine->async = wait ? false : true;
  244. reinit_completion(&engine->compl);
  245. /* verify that the irq handler sees the 'async' and completion value */
  246. smp_mb();
  247. /* kick reload */
  248. dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
  249. if (wait) {
  250. if (!wait_for_completion_timeout(&engine->compl,
  251. msecs_to_jiffies(100))) {
  252. dev_err(dmm->dev, "timed out waiting for done\n");
  253. ret = -ETIMEDOUT;
  254. goto cleanup;
  255. }
  256. /* Check the engine status before continue */
  257. ret = wait_status(engine, DMM_PATSTATUS_READY |
  258. DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
  259. }
  260. cleanup:
  261. /* only place engine back on list if we are done with it */
  262. if (ret || wait)
  263. release_engine(engine);
  264. return ret;
  265. }
  266. /*
  267. * DMM programming
  268. */
  269. static int fill(struct tcm_area *area, struct page **pages,
  270. uint32_t npages, uint32_t roll, bool wait)
  271. {
  272. int ret = 0;
  273. struct tcm_area slice, area_s;
  274. struct dmm_txn *txn;
  275. /*
  276. * FIXME
  277. *
  278. * Asynchronous fill does not work reliably, as the driver does not
  279. * handle errors in the async code paths. The fill operation may
  280. * silently fail, leading to leaking DMM engines, which may eventually
  281. * lead to deadlock if we run out of DMM engines.
  282. *
  283. * For now, always set 'wait' so that we only use sync fills. Async
  284. * fills should be fixed, or alternatively we could decide to only
  285. * support sync fills and so the whole async code path could be removed.
  286. */
  287. wait = true;
  288. txn = dmm_txn_init(omap_dmm, area->tcm);
  289. if (IS_ERR_OR_NULL(txn))
  290. return -ENOMEM;
  291. tcm_for_each_slice(slice, *area, area_s) {
  292. struct pat_area p_area = {
  293. .x0 = slice.p0.x, .y0 = slice.p0.y,
  294. .x1 = slice.p1.x, .y1 = slice.p1.y,
  295. };
  296. dmm_txn_append(txn, &p_area, pages, npages, roll);
  297. roll += tcm_sizeof(slice);
  298. }
  299. ret = dmm_txn_commit(txn, wait);
  300. return ret;
  301. }
  302. /*
  303. * Pin/unpin
  304. */
  305. /* note: slots for which pages[i] == NULL are filled w/ dummy page
  306. */
  307. int tiler_pin(struct tiler_block *block, struct page **pages,
  308. uint32_t npages, uint32_t roll, bool wait)
  309. {
  310. int ret;
  311. ret = fill(&block->area, pages, npages, roll, wait);
  312. if (ret)
  313. tiler_unpin(block);
  314. return ret;
  315. }
  316. int tiler_unpin(struct tiler_block *block)
  317. {
  318. return fill(&block->area, NULL, 0, 0, false);
  319. }
  320. /*
  321. * Reserve/release
  322. */
  323. struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
  324. uint16_t h, uint16_t align)
  325. {
  326. struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
  327. u32 min_align = 128;
  328. int ret;
  329. unsigned long flags;
  330. u32 slot_bytes;
  331. BUG_ON(!validfmt(fmt));
  332. /* convert width/height to slots */
  333. w = DIV_ROUND_UP(w, geom[fmt].slot_w);
  334. h = DIV_ROUND_UP(h, geom[fmt].slot_h);
  335. /* convert alignment to slots */
  336. slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
  337. min_align = max(min_align, slot_bytes);
  338. align = (align > min_align) ? ALIGN(align, min_align) : min_align;
  339. align /= slot_bytes;
  340. block->fmt = fmt;
  341. ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
  342. &block->area);
  343. if (ret) {
  344. kfree(block);
  345. return ERR_PTR(-ENOMEM);
  346. }
  347. /* add to allocation list */
  348. spin_lock_irqsave(&list_lock, flags);
  349. list_add(&block->alloc_node, &omap_dmm->alloc_head);
  350. spin_unlock_irqrestore(&list_lock, flags);
  351. return block;
  352. }
  353. struct tiler_block *tiler_reserve_1d(size_t size)
  354. {
  355. struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
  356. int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  357. unsigned long flags;
  358. if (!block)
  359. return ERR_PTR(-ENOMEM);
  360. block->fmt = TILFMT_PAGE;
  361. if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
  362. &block->area)) {
  363. kfree(block);
  364. return ERR_PTR(-ENOMEM);
  365. }
  366. spin_lock_irqsave(&list_lock, flags);
  367. list_add(&block->alloc_node, &omap_dmm->alloc_head);
  368. spin_unlock_irqrestore(&list_lock, flags);
  369. return block;
  370. }
  371. /* note: if you have pin'd pages, you should have already unpin'd first! */
  372. int tiler_release(struct tiler_block *block)
  373. {
  374. int ret = tcm_free(&block->area);
  375. unsigned long flags;
  376. if (block->area.tcm)
  377. dev_err(omap_dmm->dev, "failed to release block\n");
  378. spin_lock_irqsave(&list_lock, flags);
  379. list_del(&block->alloc_node);
  380. spin_unlock_irqrestore(&list_lock, flags);
  381. kfree(block);
  382. return ret;
  383. }
  384. /*
  385. * Utils
  386. */
  387. /* calculate the tiler space address of a pixel in a view orientation...
  388. * below description copied from the display subsystem section of TRM:
  389. *
  390. * When the TILER is addressed, the bits:
  391. * [28:27] = 0x0 for 8-bit tiled
  392. * 0x1 for 16-bit tiled
  393. * 0x2 for 32-bit tiled
  394. * 0x3 for page mode
  395. * [31:29] = 0x0 for 0-degree view
  396. * 0x1 for 180-degree view + mirroring
  397. * 0x2 for 0-degree view + mirroring
  398. * 0x3 for 180-degree view
  399. * 0x4 for 270-degree view + mirroring
  400. * 0x5 for 270-degree view
  401. * 0x6 for 90-degree view
  402. * 0x7 for 90-degree view + mirroring
  403. * Otherwise the bits indicated the corresponding bit address to access
  404. * the SDRAM.
  405. */
  406. static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
  407. {
  408. u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
  409. x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
  410. y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
  411. alignment = geom[fmt].x_shft + geom[fmt].y_shft;
  412. /* validate coordinate */
  413. x_mask = MASK(x_bits);
  414. y_mask = MASK(y_bits);
  415. if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
  416. DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
  417. x, x, x_mask, y, y, y_mask);
  418. return 0;
  419. }
  420. /* account for mirroring */
  421. if (orient & MASK_X_INVERT)
  422. x ^= x_mask;
  423. if (orient & MASK_Y_INVERT)
  424. y ^= y_mask;
  425. /* get coordinate address */
  426. if (orient & MASK_XY_FLIP)
  427. tmp = ((x << y_bits) + y);
  428. else
  429. tmp = ((y << x_bits) + x);
  430. return TIL_ADDR((tmp << alignment), orient, fmt);
  431. }
  432. dma_addr_t tiler_ssptr(struct tiler_block *block)
  433. {
  434. BUG_ON(!validfmt(block->fmt));
  435. return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
  436. block->area.p0.x * geom[block->fmt].slot_w,
  437. block->area.p0.y * geom[block->fmt].slot_h);
  438. }
  439. dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
  440. uint32_t x, uint32_t y)
  441. {
  442. struct tcm_pt *p = &block->area.p0;
  443. BUG_ON(!validfmt(block->fmt));
  444. return tiler_get_address(block->fmt, orient,
  445. (p->x * geom[block->fmt].slot_w) + x,
  446. (p->y * geom[block->fmt].slot_h) + y);
  447. }
  448. void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
  449. {
  450. BUG_ON(!validfmt(fmt));
  451. *w = round_up(*w, geom[fmt].slot_w);
  452. *h = round_up(*h, geom[fmt].slot_h);
  453. }
  454. uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
  455. {
  456. BUG_ON(!validfmt(fmt));
  457. if (orient & MASK_XY_FLIP)
  458. return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
  459. else
  460. return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
  461. }
  462. size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
  463. {
  464. tiler_align(fmt, &w, &h);
  465. return geom[fmt].cpp * w * h;
  466. }
  467. size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
  468. {
  469. BUG_ON(!validfmt(fmt));
  470. return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
  471. }
  472. uint32_t tiler_get_cpu_cache_flags(void)
  473. {
  474. return omap_dmm->plat_data->cpu_cache_flags;
  475. }
  476. bool dmm_is_available(void)
  477. {
  478. return omap_dmm ? true : false;
  479. }
  480. static int omap_dmm_remove(struct platform_device *dev)
  481. {
  482. struct tiler_block *block, *_block;
  483. int i;
  484. unsigned long flags;
  485. if (omap_dmm) {
  486. /* free all area regions */
  487. spin_lock_irqsave(&list_lock, flags);
  488. list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
  489. alloc_node) {
  490. list_del(&block->alloc_node);
  491. kfree(block);
  492. }
  493. spin_unlock_irqrestore(&list_lock, flags);
  494. for (i = 0; i < omap_dmm->num_lut; i++)
  495. if (omap_dmm->tcm && omap_dmm->tcm[i])
  496. omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
  497. kfree(omap_dmm->tcm);
  498. kfree(omap_dmm->engines);
  499. if (omap_dmm->refill_va)
  500. dma_free_wc(omap_dmm->dev,
  501. REFILL_BUFFER_SIZE * omap_dmm->num_engines,
  502. omap_dmm->refill_va, omap_dmm->refill_pa);
  503. if (omap_dmm->dummy_page)
  504. __free_page(omap_dmm->dummy_page);
  505. if (omap_dmm->irq > 0)
  506. free_irq(omap_dmm->irq, omap_dmm);
  507. iounmap(omap_dmm->base);
  508. kfree(omap_dmm);
  509. omap_dmm = NULL;
  510. }
  511. return 0;
  512. }
  513. static int omap_dmm_probe(struct platform_device *dev)
  514. {
  515. int ret = -EFAULT, i;
  516. struct tcm_area area = {0};
  517. u32 hwinfo, pat_geom;
  518. struct resource *mem;
  519. omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
  520. if (!omap_dmm)
  521. goto fail;
  522. /* initialize lists */
  523. INIT_LIST_HEAD(&omap_dmm->alloc_head);
  524. INIT_LIST_HEAD(&omap_dmm->idle_head);
  525. init_waitqueue_head(&omap_dmm->engine_queue);
  526. if (dev->dev.of_node) {
  527. const struct of_device_id *match;
  528. match = of_match_node(dmm_of_match, dev->dev.of_node);
  529. if (!match) {
  530. dev_err(&dev->dev, "failed to find matching device node\n");
  531. ret = -ENODEV;
  532. goto fail;
  533. }
  534. omap_dmm->plat_data = match->data;
  535. }
  536. /* lookup hwmod data - base address and irq */
  537. mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
  538. if (!mem) {
  539. dev_err(&dev->dev, "failed to get base address resource\n");
  540. goto fail;
  541. }
  542. omap_dmm->base = ioremap(mem->start, SZ_2K);
  543. if (!omap_dmm->base) {
  544. dev_err(&dev->dev, "failed to get dmm base address\n");
  545. goto fail;
  546. }
  547. omap_dmm->irq = platform_get_irq(dev, 0);
  548. if (omap_dmm->irq < 0) {
  549. dev_err(&dev->dev, "failed to get IRQ resource\n");
  550. goto fail;
  551. }
  552. omap_dmm->dev = &dev->dev;
  553. hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
  554. omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
  555. omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
  556. omap_dmm->container_width = 256;
  557. omap_dmm->container_height = 128;
  558. atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
  559. /* read out actual LUT width and height */
  560. pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
  561. omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
  562. omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
  563. /* increment LUT by one if on OMAP5 */
  564. /* LUT has twice the height, and is split into a separate container */
  565. if (omap_dmm->lut_height != omap_dmm->container_height)
  566. omap_dmm->num_lut++;
  567. /* initialize DMM registers */
  568. dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
  569. dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
  570. dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
  571. dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
  572. dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
  573. dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
  574. ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
  575. "omap_dmm_irq_handler", omap_dmm);
  576. if (ret) {
  577. dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
  578. omap_dmm->irq, ret);
  579. omap_dmm->irq = -1;
  580. goto fail;
  581. }
  582. /* Enable all interrupts for each refill engine except
  583. * ERR_LUT_MISS<n> (which is just advisory, and we don't care
  584. * about because we want to be able to refill live scanout
  585. * buffers for accelerated pan/scroll) and FILL_DSC<n> which
  586. * we just generally don't care about.
  587. */
  588. dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
  589. omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
  590. if (!omap_dmm->dummy_page) {
  591. dev_err(&dev->dev, "could not allocate dummy page\n");
  592. ret = -ENOMEM;
  593. goto fail;
  594. }
  595. /* set dma mask for device */
  596. ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
  597. if (ret)
  598. goto fail;
  599. omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
  600. /* alloc refill memory */
  601. omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
  602. REFILL_BUFFER_SIZE * omap_dmm->num_engines,
  603. &omap_dmm->refill_pa, GFP_KERNEL);
  604. if (!omap_dmm->refill_va) {
  605. dev_err(&dev->dev, "could not allocate refill memory\n");
  606. goto fail;
  607. }
  608. /* alloc engines */
  609. omap_dmm->engines = kcalloc(omap_dmm->num_engines,
  610. sizeof(*omap_dmm->engines), GFP_KERNEL);
  611. if (!omap_dmm->engines) {
  612. ret = -ENOMEM;
  613. goto fail;
  614. }
  615. for (i = 0; i < omap_dmm->num_engines; i++) {
  616. omap_dmm->engines[i].id = i;
  617. omap_dmm->engines[i].dmm = omap_dmm;
  618. omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
  619. (REFILL_BUFFER_SIZE * i);
  620. omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
  621. (REFILL_BUFFER_SIZE * i);
  622. init_completion(&omap_dmm->engines[i].compl);
  623. list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
  624. }
  625. omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
  626. GFP_KERNEL);
  627. if (!omap_dmm->tcm) {
  628. ret = -ENOMEM;
  629. goto fail;
  630. }
  631. /* init containers */
  632. /* Each LUT is associated with a TCM (container manager). We use the
  633. lut_id to denote the lut_id used to identify the correct LUT for
  634. programming during reill operations */
  635. for (i = 0; i < omap_dmm->num_lut; i++) {
  636. omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
  637. omap_dmm->container_height);
  638. if (!omap_dmm->tcm[i]) {
  639. dev_err(&dev->dev, "failed to allocate container\n");
  640. ret = -ENOMEM;
  641. goto fail;
  642. }
  643. omap_dmm->tcm[i]->lut_id = i;
  644. }
  645. /* assign access mode containers to applicable tcm container */
  646. /* OMAP 4 has 1 container for all 4 views */
  647. /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
  648. containers[TILFMT_8BIT] = omap_dmm->tcm[0];
  649. containers[TILFMT_16BIT] = omap_dmm->tcm[0];
  650. containers[TILFMT_32BIT] = omap_dmm->tcm[0];
  651. if (omap_dmm->container_height != omap_dmm->lut_height) {
  652. /* second LUT is used for PAGE mode. Programming must use
  653. y offset that is added to all y coordinates. LUT id is still
  654. 0, because it is the same LUT, just the upper 128 lines */
  655. containers[TILFMT_PAGE] = omap_dmm->tcm[1];
  656. omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
  657. omap_dmm->tcm[1]->lut_id = 0;
  658. } else {
  659. containers[TILFMT_PAGE] = omap_dmm->tcm[0];
  660. }
  661. area = (struct tcm_area) {
  662. .tcm = NULL,
  663. .p1.x = omap_dmm->container_width - 1,
  664. .p1.y = omap_dmm->container_height - 1,
  665. };
  666. /* initialize all LUTs to dummy page entries */
  667. for (i = 0; i < omap_dmm->num_lut; i++) {
  668. area.tcm = omap_dmm->tcm[i];
  669. if (fill(&area, NULL, 0, 0, true))
  670. dev_err(omap_dmm->dev, "refill failed");
  671. }
  672. dev_info(omap_dmm->dev, "initialized all PAT entries\n");
  673. return 0;
  674. fail:
  675. if (omap_dmm_remove(dev))
  676. dev_err(&dev->dev, "cleanup failed\n");
  677. return ret;
  678. }
  679. /*
  680. * debugfs support
  681. */
  682. #ifdef CONFIG_DEBUG_FS
  683. static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
  684. "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
  685. static const char *special = ".,:;'\"`~!^-+";
  686. static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
  687. char c, bool ovw)
  688. {
  689. int x, y;
  690. for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
  691. for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
  692. if (map[y][x] == ' ' || ovw)
  693. map[y][x] = c;
  694. }
  695. static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
  696. char c)
  697. {
  698. map[p->y / ydiv][p->x / xdiv] = c;
  699. }
  700. static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
  701. {
  702. return map[p->y / ydiv][p->x / xdiv];
  703. }
  704. static int map_width(int xdiv, int x0, int x1)
  705. {
  706. return (x1 / xdiv) - (x0 / xdiv) + 1;
  707. }
  708. static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
  709. {
  710. char *p = map[yd] + (x0 / xdiv);
  711. int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
  712. if (w >= 0) {
  713. p += w;
  714. while (*nice)
  715. *p++ = *nice++;
  716. }
  717. }
  718. static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
  719. struct tcm_area *a)
  720. {
  721. sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
  722. if (a->p0.y + 1 < a->p1.y) {
  723. text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
  724. 256 - 1);
  725. } else if (a->p0.y < a->p1.y) {
  726. if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
  727. text_map(map, xdiv, nice, a->p0.y / ydiv,
  728. a->p0.x + xdiv, 256 - 1);
  729. else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
  730. text_map(map, xdiv, nice, a->p1.y / ydiv,
  731. 0, a->p1.y - xdiv);
  732. } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
  733. text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
  734. }
  735. }
  736. static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
  737. struct tcm_area *a)
  738. {
  739. sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
  740. if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
  741. text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
  742. a->p0.x, a->p1.x);
  743. }
  744. int tiler_map_show(struct seq_file *s, void *arg)
  745. {
  746. int xdiv = 2, ydiv = 1;
  747. char **map = NULL, *global_map;
  748. struct tiler_block *block;
  749. struct tcm_area a, p;
  750. int i;
  751. const char *m2d = alphabet;
  752. const char *a2d = special;
  753. const char *m2dp = m2d, *a2dp = a2d;
  754. char nice[128];
  755. int h_adj;
  756. int w_adj;
  757. unsigned long flags;
  758. int lut_idx;
  759. if (!omap_dmm) {
  760. /* early return if dmm/tiler device is not initialized */
  761. return 0;
  762. }
  763. h_adj = omap_dmm->container_height / ydiv;
  764. w_adj = omap_dmm->container_width / xdiv;
  765. map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
  766. global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
  767. if (!map || !global_map)
  768. goto error;
  769. for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
  770. memset(map, 0, h_adj * sizeof(*map));
  771. memset(global_map, ' ', (w_adj + 1) * h_adj);
  772. for (i = 0; i < omap_dmm->container_height; i++) {
  773. map[i] = global_map + i * (w_adj + 1);
  774. map[i][w_adj] = 0;
  775. }
  776. spin_lock_irqsave(&list_lock, flags);
  777. list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
  778. if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
  779. if (block->fmt != TILFMT_PAGE) {
  780. fill_map(map, xdiv, ydiv, &block->area,
  781. *m2dp, true);
  782. if (!*++a2dp)
  783. a2dp = a2d;
  784. if (!*++m2dp)
  785. m2dp = m2d;
  786. map_2d_info(map, xdiv, ydiv, nice,
  787. &block->area);
  788. } else {
  789. bool start = read_map_pt(map, xdiv,
  790. ydiv, &block->area.p0) == ' ';
  791. bool end = read_map_pt(map, xdiv, ydiv,
  792. &block->area.p1) == ' ';
  793. tcm_for_each_slice(a, block->area, p)
  794. fill_map(map, xdiv, ydiv, &a,
  795. '=', true);
  796. fill_map_pt(map, xdiv, ydiv,
  797. &block->area.p0,
  798. start ? '<' : 'X');
  799. fill_map_pt(map, xdiv, ydiv,
  800. &block->area.p1,
  801. end ? '>' : 'X');
  802. map_1d_info(map, xdiv, ydiv, nice,
  803. &block->area);
  804. }
  805. }
  806. }
  807. spin_unlock_irqrestore(&list_lock, flags);
  808. if (s) {
  809. seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
  810. for (i = 0; i < 128; i++)
  811. seq_printf(s, "%03d:%s\n", i, map[i]);
  812. seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
  813. } else {
  814. dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
  815. lut_idx);
  816. for (i = 0; i < 128; i++)
  817. dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
  818. dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
  819. lut_idx);
  820. }
  821. }
  822. error:
  823. kfree(map);
  824. kfree(global_map);
  825. return 0;
  826. }
  827. #endif
  828. #ifdef CONFIG_PM_SLEEP
  829. static int omap_dmm_resume(struct device *dev)
  830. {
  831. struct tcm_area area;
  832. int i;
  833. if (!omap_dmm)
  834. return -ENODEV;
  835. area = (struct tcm_area) {
  836. .tcm = NULL,
  837. .p1.x = omap_dmm->container_width - 1,
  838. .p1.y = omap_dmm->container_height - 1,
  839. };
  840. /* initialize all LUTs to dummy page entries */
  841. for (i = 0; i < omap_dmm->num_lut; i++) {
  842. area.tcm = omap_dmm->tcm[i];
  843. if (fill(&area, NULL, 0, 0, true))
  844. dev_err(dev, "refill failed");
  845. }
  846. return 0;
  847. }
  848. #endif
  849. static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
  850. #if defined(CONFIG_OF)
  851. static const struct dmm_platform_data dmm_omap4_platform_data = {
  852. .cpu_cache_flags = OMAP_BO_WC,
  853. };
  854. static const struct dmm_platform_data dmm_omap5_platform_data = {
  855. .cpu_cache_flags = OMAP_BO_UNCACHED,
  856. };
  857. static const struct of_device_id dmm_of_match[] = {
  858. {
  859. .compatible = "ti,omap4-dmm",
  860. .data = &dmm_omap4_platform_data,
  861. },
  862. {
  863. .compatible = "ti,omap5-dmm",
  864. .data = &dmm_omap5_platform_data,
  865. },
  866. {},
  867. };
  868. #endif
  869. struct platform_driver omap_dmm_driver = {
  870. .probe = omap_dmm_probe,
  871. .remove = omap_dmm_remove,
  872. .driver = {
  873. .owner = THIS_MODULE,
  874. .name = DMM_DRIVER_NAME,
  875. .of_match_table = of_match_ptr(dmm_of_match),
  876. .pm = &omap_dmm_pm_ops,
  877. },
  878. };
  879. MODULE_LICENSE("GPL v2");
  880. MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
  881. MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");