armada_crtc.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /*
  2. * Copyright (C) 2012 Russell King
  3. * Rewritten from the dovefb driver, and Armada510 manuals.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/component.h>
  11. #include <linux/of_device.h>
  12. #include <linux/platform_device.h>
  13. #include <drm/drmP.h>
  14. #include <drm/drm_atomic.h>
  15. #include <drm/drm_crtc_helper.h>
  16. #include <drm/drm_plane_helper.h>
  17. #include <drm/drm_atomic_helper.h>
  18. #include "armada_crtc.h"
  19. #include "armada_drm.h"
  20. #include "armada_fb.h"
  21. #include "armada_gem.h"
  22. #include "armada_hw.h"
  23. #include "armada_plane.h"
  24. #include "armada_trace.h"
  25. /*
  26. * A note about interlacing. Let's consider HDMI 1920x1080i.
  27. * The timing parameters we have from X are:
  28. * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
  29. * 1920 2448 2492 2640 1080 1084 1094 1125
  30. * Which get translated to:
  31. * Hact HsyA HsyI Htot Vact VsyA VsyI Vtot
  32. * 1920 2448 2492 2640 540 542 547 562
  33. *
  34. * This is how it is defined by CEA-861-D - line and pixel numbers are
  35. * referenced to the rising edge of VSYNC and HSYNC. Total clocks per
  36. * line: 2640. The odd frame, the first active line is at line 21, and
  37. * the even frame, the first active line is 584.
  38. *
  39. * LN: 560 561 562 563 567 568 569
  40. * DE: ~~~|____________________________//__________________________
  41. * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
  42. * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
  43. * 22 blanking lines. VSYNC at 1320 (referenced to the HSYNC rising edge).
  44. *
  45. * LN: 1123 1124 1125 1 5 6 7
  46. * DE: ~~~|____________________________//__________________________
  47. * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
  48. * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
  49. * 23 blanking lines
  50. *
  51. * The Armada LCD Controller line and pixel numbers are, like X timings,
  52. * referenced to the top left of the active frame.
  53. *
  54. * So, translating these to our LCD controller:
  55. * Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
  56. * Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
  57. * Note: Vsync front porch remains constant!
  58. *
  59. * if (odd_frame) {
  60. * vtotal = mode->crtc_vtotal + 1;
  61. * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
  62. * vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
  63. * } else {
  64. * vtotal = mode->crtc_vtotal;
  65. * vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
  66. * vhorizpos = mode->crtc_hsync_start;
  67. * }
  68. * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
  69. *
  70. * So, we need to reprogram these registers on each vsync event:
  71. * LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
  72. *
  73. * Note: we do not use the frame done interrupts because these appear
  74. * to happen too early, and lead to jitter on the display (presumably
  75. * they occur at the end of the last active line, before the vsync back
  76. * porch, which we're reprogramming.)
  77. */
  78. void
  79. armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
  80. {
  81. while (regs->offset != ~0) {
  82. void __iomem *reg = dcrtc->base + regs->offset;
  83. uint32_t val;
  84. val = regs->mask;
  85. if (val != 0)
  86. val &= readl_relaxed(reg);
  87. writel_relaxed(val | regs->val, reg);
  88. ++regs;
  89. }
  90. }
  91. static void armada_drm_crtc_update(struct armada_crtc *dcrtc, bool enable)
  92. {
  93. uint32_t dumb_ctrl;
  94. dumb_ctrl = dcrtc->cfg_dumb_ctrl;
  95. if (enable)
  96. dumb_ctrl |= CFG_DUMB_ENA;
  97. /*
  98. * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
  99. * be using SPI or GPIO. If we set this to DUMB_BLANK, we will
  100. * force LCD_D[23:0] to output blank color, overriding the GPIO or
  101. * SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
  102. */
  103. if (!enable && (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
  104. dumb_ctrl &= ~DUMB_MASK;
  105. dumb_ctrl |= DUMB_BLANK;
  106. }
  107. armada_updatel(dumb_ctrl,
  108. ~(CFG_INV_CSYNC | CFG_INV_HSYNC | CFG_INV_VSYNC),
  109. dcrtc->base + LCD_SPU_DUMB_CTRL);
  110. }
  111. static void armada_drm_crtc_queue_state_event(struct drm_crtc *crtc)
  112. {
  113. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  114. struct drm_pending_vblank_event *event;
  115. /* If we have an event, we need vblank events enabled */
  116. event = xchg(&crtc->state->event, NULL);
  117. if (event) {
  118. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  119. dcrtc->event = event;
  120. }
  121. }
  122. /* The mode_config.mutex will be held for this call */
  123. static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
  124. const struct drm_display_mode *mode, struct drm_display_mode *adj)
  125. {
  126. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  127. int ret;
  128. /* We can't do interlaced modes if we don't have the SPU_ADV_REG */
  129. if (!dcrtc->variant->has_spu_adv_reg &&
  130. adj->flags & DRM_MODE_FLAG_INTERLACE)
  131. return false;
  132. /* Check whether the display mode is possible */
  133. ret = dcrtc->variant->compute_clock(dcrtc, adj, NULL);
  134. if (ret)
  135. return false;
  136. return true;
  137. }
  138. /* These are locked by dev->vbl_lock */
  139. static void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
  140. {
  141. if (dcrtc->irq_ena & mask) {
  142. dcrtc->irq_ena &= ~mask;
  143. writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
  144. }
  145. }
  146. static void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
  147. {
  148. if ((dcrtc->irq_ena & mask) != mask) {
  149. dcrtc->irq_ena |= mask;
  150. writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
  151. if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
  152. writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
  153. }
  154. }
  155. static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
  156. {
  157. struct drm_pending_vblank_event *event;
  158. void __iomem *base = dcrtc->base;
  159. if (stat & DMA_FF_UNDERFLOW)
  160. DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
  161. if (stat & GRA_FF_UNDERFLOW)
  162. DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
  163. if (stat & VSYNC_IRQ)
  164. drm_crtc_handle_vblank(&dcrtc->crtc);
  165. spin_lock(&dcrtc->irq_lock);
  166. if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
  167. int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
  168. uint32_t val;
  169. writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
  170. writel_relaxed(dcrtc->v[i].spu_v_h_total,
  171. base + LCD_SPUT_V_H_TOTAL);
  172. val = readl_relaxed(base + LCD_SPU_ADV_REG);
  173. val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
  174. val |= dcrtc->v[i].spu_adv_reg;
  175. writel_relaxed(val, base + LCD_SPU_ADV_REG);
  176. }
  177. if (stat & dcrtc->irq_ena & DUMB_FRAMEDONE) {
  178. if (dcrtc->update_pending) {
  179. armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
  180. dcrtc->update_pending = false;
  181. }
  182. if (dcrtc->cursor_update) {
  183. writel_relaxed(dcrtc->cursor_hw_pos,
  184. base + LCD_SPU_HWC_OVSA_HPXL_VLN);
  185. writel_relaxed(dcrtc->cursor_hw_sz,
  186. base + LCD_SPU_HWC_HPXL_VLN);
  187. armada_updatel(CFG_HWC_ENA,
  188. CFG_HWC_ENA | CFG_HWC_1BITMOD |
  189. CFG_HWC_1BITENA,
  190. base + LCD_SPU_DMA_CTRL0);
  191. dcrtc->cursor_update = false;
  192. }
  193. armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
  194. }
  195. spin_unlock(&dcrtc->irq_lock);
  196. if (stat & VSYNC_IRQ && !dcrtc->update_pending) {
  197. event = xchg(&dcrtc->event, NULL);
  198. if (event) {
  199. spin_lock(&dcrtc->crtc.dev->event_lock);
  200. drm_crtc_send_vblank_event(&dcrtc->crtc, event);
  201. spin_unlock(&dcrtc->crtc.dev->event_lock);
  202. drm_crtc_vblank_put(&dcrtc->crtc);
  203. }
  204. }
  205. }
  206. static irqreturn_t armada_drm_irq(int irq, void *arg)
  207. {
  208. struct armada_crtc *dcrtc = arg;
  209. u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
  210. /*
  211. * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
  212. * is set. Writing has some other effect to acknowledge the IRQ -
  213. * without this, we only get a single IRQ.
  214. */
  215. writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
  216. trace_armada_drm_irq(&dcrtc->crtc, stat);
  217. /* Mask out those interrupts we haven't enabled */
  218. v = stat & dcrtc->irq_ena;
  219. if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
  220. armada_drm_crtc_irq(dcrtc, stat);
  221. return IRQ_HANDLED;
  222. }
  223. return IRQ_NONE;
  224. }
  225. /* The mode_config.mutex will be held for this call */
  226. static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
  227. {
  228. struct drm_display_mode *adj = &crtc->state->adjusted_mode;
  229. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  230. struct armada_regs regs[17];
  231. uint32_t lm, rm, tm, bm, val, sclk;
  232. unsigned long flags;
  233. unsigned i;
  234. bool interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
  235. i = 0;
  236. rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
  237. lm = adj->crtc_htotal - adj->crtc_hsync_end;
  238. bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
  239. tm = adj->crtc_vtotal - adj->crtc_vsync_end;
  240. DRM_DEBUG_KMS("[CRTC:%d:%s] mode " DRM_MODE_FMT "\n",
  241. crtc->base.id, crtc->name,
  242. adj->base.id, adj->name, adj->vrefresh, adj->clock,
  243. adj->crtc_hdisplay, adj->crtc_hsync_start,
  244. adj->crtc_hsync_end, adj->crtc_htotal,
  245. adj->crtc_vdisplay, adj->crtc_vsync_start,
  246. adj->crtc_vsync_end, adj->crtc_vtotal,
  247. adj->type, adj->flags);
  248. DRM_DEBUG_KMS("lm %d rm %d tm %d bm %d\n", lm, rm, tm, bm);
  249. /* Now compute the divider for real */
  250. dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
  251. armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
  252. if (interlaced ^ dcrtc->interlaced) {
  253. if (adj->flags & DRM_MODE_FLAG_INTERLACE)
  254. drm_crtc_vblank_get(&dcrtc->crtc);
  255. else
  256. drm_crtc_vblank_put(&dcrtc->crtc);
  257. dcrtc->interlaced = interlaced;
  258. }
  259. spin_lock_irqsave(&dcrtc->irq_lock, flags);
  260. /* Even interlaced/progressive frame */
  261. dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
  262. adj->crtc_htotal;
  263. dcrtc->v[1].spu_v_porch = tm << 16 | bm;
  264. val = adj->crtc_hsync_start;
  265. dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
  266. if (interlaced) {
  267. /* Odd interlaced frame */
  268. val -= adj->crtc_htotal / 2;
  269. dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
  270. dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
  271. (1 << 16);
  272. dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
  273. } else {
  274. dcrtc->v[0] = dcrtc->v[1];
  275. }
  276. val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
  277. armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
  278. armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
  279. armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
  280. armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
  281. LCD_SPUT_V_H_TOTAL);
  282. if (dcrtc->variant->has_spu_adv_reg)
  283. armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
  284. ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
  285. ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
  286. val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
  287. armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
  288. /*
  289. * The documentation doesn't indicate what the normal state of
  290. * the sync signals are. Sebastian Hesselbart kindly probed
  291. * these signals on his board to determine their state.
  292. *
  293. * The non-inverted state of the sync signals is active high.
  294. * Setting these bits makes the appropriate signal active low.
  295. */
  296. val = 0;
  297. if (adj->flags & DRM_MODE_FLAG_NCSYNC)
  298. val |= CFG_INV_CSYNC;
  299. if (adj->flags & DRM_MODE_FLAG_NHSYNC)
  300. val |= CFG_INV_HSYNC;
  301. if (adj->flags & DRM_MODE_FLAG_NVSYNC)
  302. val |= CFG_INV_VSYNC;
  303. armada_reg_queue_mod(regs, i, val, CFG_INV_CSYNC | CFG_INV_HSYNC |
  304. CFG_INV_VSYNC, LCD_SPU_DUMB_CTRL);
  305. armada_reg_queue_end(regs, i);
  306. armada_drm_crtc_update_regs(dcrtc, regs);
  307. spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
  308. }
  309. static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
  310. struct drm_crtc_state *old_crtc_state)
  311. {
  312. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  313. DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
  314. dcrtc->regs_idx = 0;
  315. dcrtc->regs = dcrtc->atomic_regs;
  316. }
  317. static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
  318. struct drm_crtc_state *old_crtc_state)
  319. {
  320. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  321. DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
  322. armada_reg_queue_end(dcrtc->regs, dcrtc->regs_idx);
  323. /*
  324. * If we aren't doing a full modeset, then we need to queue
  325. * the event here.
  326. */
  327. if (!drm_atomic_crtc_needs_modeset(crtc->state)) {
  328. dcrtc->update_pending = true;
  329. armada_drm_crtc_queue_state_event(crtc);
  330. spin_lock_irq(&dcrtc->irq_lock);
  331. armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
  332. spin_unlock_irq(&dcrtc->irq_lock);
  333. } else {
  334. spin_lock_irq(&dcrtc->irq_lock);
  335. armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
  336. spin_unlock_irq(&dcrtc->irq_lock);
  337. }
  338. }
  339. static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
  340. struct drm_crtc_state *old_state)
  341. {
  342. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  343. struct drm_pending_vblank_event *event;
  344. DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
  345. drm_crtc_vblank_off(crtc);
  346. armada_drm_crtc_update(dcrtc, false);
  347. if (!crtc->state->active) {
  348. /*
  349. * This modeset will be leaving the CRTC disabled, so
  350. * call the backend to disable upstream clocks etc.
  351. */
  352. if (dcrtc->variant->disable)
  353. dcrtc->variant->disable(dcrtc);
  354. /*
  355. * We will not receive any further vblank events.
  356. * Send the flip_done event manually.
  357. */
  358. event = crtc->state->event;
  359. crtc->state->event = NULL;
  360. if (event) {
  361. spin_lock_irq(&crtc->dev->event_lock);
  362. drm_crtc_send_vblank_event(crtc, event);
  363. spin_unlock_irq(&crtc->dev->event_lock);
  364. }
  365. }
  366. }
  367. static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
  368. struct drm_crtc_state *old_state)
  369. {
  370. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  371. DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
  372. if (!old_state->active) {
  373. /*
  374. * This modeset is enabling the CRTC after it having
  375. * been disabled. Reverse the call to ->disable in
  376. * the atomic_disable().
  377. */
  378. if (dcrtc->variant->enable)
  379. dcrtc->variant->enable(dcrtc, &crtc->state->adjusted_mode);
  380. }
  381. armada_drm_crtc_update(dcrtc, true);
  382. drm_crtc_vblank_on(crtc);
  383. armada_drm_crtc_queue_state_event(crtc);
  384. }
  385. static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
  386. .mode_fixup = armada_drm_crtc_mode_fixup,
  387. .mode_set_nofb = armada_drm_crtc_mode_set_nofb,
  388. .atomic_begin = armada_drm_crtc_atomic_begin,
  389. .atomic_flush = armada_drm_crtc_atomic_flush,
  390. .atomic_disable = armada_drm_crtc_atomic_disable,
  391. .atomic_enable = armada_drm_crtc_atomic_enable,
  392. };
  393. static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
  394. unsigned stride, unsigned width, unsigned height)
  395. {
  396. uint32_t addr;
  397. unsigned y;
  398. addr = SRAM_HWC32_RAM1;
  399. for (y = 0; y < height; y++) {
  400. uint32_t *p = &pix[y * stride];
  401. unsigned x;
  402. for (x = 0; x < width; x++, p++) {
  403. uint32_t val = *p;
  404. val = (val & 0xff00ff00) |
  405. (val & 0x000000ff) << 16 |
  406. (val & 0x00ff0000) >> 16;
  407. writel_relaxed(val,
  408. base + LCD_SPU_SRAM_WRDAT);
  409. writel_relaxed(addr | SRAM_WRITE,
  410. base + LCD_SPU_SRAM_CTRL);
  411. readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN);
  412. addr += 1;
  413. if ((addr & 0x00ff) == 0)
  414. addr += 0xf00;
  415. if ((addr & 0x30ff) == 0)
  416. addr = SRAM_HWC32_RAM2;
  417. }
  418. }
  419. }
  420. static void armada_drm_crtc_cursor_tran(void __iomem *base)
  421. {
  422. unsigned addr;
  423. for (addr = 0; addr < 256; addr++) {
  424. /* write the default value */
  425. writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
  426. writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
  427. base + LCD_SPU_SRAM_CTRL);
  428. }
  429. }
  430. static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
  431. {
  432. uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
  433. uint32_t yoff, yscr, h = dcrtc->cursor_h;
  434. uint32_t para1;
  435. /*
  436. * Calculate the visible width and height of the cursor,
  437. * screen position, and the position in the cursor bitmap.
  438. */
  439. if (dcrtc->cursor_x < 0) {
  440. xoff = -dcrtc->cursor_x;
  441. xscr = 0;
  442. w -= min(xoff, w);
  443. } else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
  444. xoff = 0;
  445. xscr = dcrtc->cursor_x;
  446. w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
  447. } else {
  448. xoff = 0;
  449. xscr = dcrtc->cursor_x;
  450. }
  451. if (dcrtc->cursor_y < 0) {
  452. yoff = -dcrtc->cursor_y;
  453. yscr = 0;
  454. h -= min(yoff, h);
  455. } else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
  456. yoff = 0;
  457. yscr = dcrtc->cursor_y;
  458. h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
  459. } else {
  460. yoff = 0;
  461. yscr = dcrtc->cursor_y;
  462. }
  463. /* On interlaced modes, the vertical cursor size must be halved */
  464. s = dcrtc->cursor_w;
  465. if (dcrtc->interlaced) {
  466. s *= 2;
  467. yscr /= 2;
  468. h /= 2;
  469. }
  470. if (!dcrtc->cursor_obj || !h || !w) {
  471. spin_lock_irq(&dcrtc->irq_lock);
  472. dcrtc->cursor_update = false;
  473. armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
  474. spin_unlock_irq(&dcrtc->irq_lock);
  475. return 0;
  476. }
  477. spin_lock_irq(&dcrtc->irq_lock);
  478. para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
  479. armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
  480. dcrtc->base + LCD_SPU_SRAM_PARA1);
  481. spin_unlock_irq(&dcrtc->irq_lock);
  482. /*
  483. * Initialize the transparency if the SRAM was powered down.
  484. * We must also reload the cursor data as well.
  485. */
  486. if (!(para1 & CFG_CSB_256x32)) {
  487. armada_drm_crtc_cursor_tran(dcrtc->base);
  488. reload = true;
  489. }
  490. if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
  491. spin_lock_irq(&dcrtc->irq_lock);
  492. dcrtc->cursor_update = false;
  493. armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
  494. spin_unlock_irq(&dcrtc->irq_lock);
  495. reload = true;
  496. }
  497. if (reload) {
  498. struct armada_gem_object *obj = dcrtc->cursor_obj;
  499. uint32_t *pix;
  500. /* Set the top-left corner of the cursor image */
  501. pix = obj->addr;
  502. pix += yoff * s + xoff;
  503. armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
  504. }
  505. /* Reload the cursor position, size and enable in the IRQ handler */
  506. spin_lock_irq(&dcrtc->irq_lock);
  507. dcrtc->cursor_hw_pos = yscr << 16 | xscr;
  508. dcrtc->cursor_hw_sz = h << 16 | w;
  509. dcrtc->cursor_update = true;
  510. armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
  511. spin_unlock_irq(&dcrtc->irq_lock);
  512. return 0;
  513. }
  514. static void cursor_update(void *data)
  515. {
  516. armada_drm_crtc_cursor_update(data, true);
  517. }
  518. static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
  519. struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
  520. {
  521. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  522. struct armada_gem_object *obj = NULL;
  523. int ret;
  524. /* If no cursor support, replicate drm's return value */
  525. if (!dcrtc->variant->has_spu_adv_reg)
  526. return -ENXIO;
  527. if (handle && w > 0 && h > 0) {
  528. /* maximum size is 64x32 or 32x64 */
  529. if (w > 64 || h > 64 || (w > 32 && h > 32))
  530. return -ENOMEM;
  531. obj = armada_gem_object_lookup(file, handle);
  532. if (!obj)
  533. return -ENOENT;
  534. /* Must be a kernel-mapped object */
  535. if (!obj->addr) {
  536. drm_gem_object_put_unlocked(&obj->obj);
  537. return -EINVAL;
  538. }
  539. if (obj->obj.size < w * h * 4) {
  540. DRM_ERROR("buffer is too small\n");
  541. drm_gem_object_put_unlocked(&obj->obj);
  542. return -ENOMEM;
  543. }
  544. }
  545. if (dcrtc->cursor_obj) {
  546. dcrtc->cursor_obj->update = NULL;
  547. dcrtc->cursor_obj->update_data = NULL;
  548. drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj);
  549. }
  550. dcrtc->cursor_obj = obj;
  551. dcrtc->cursor_w = w;
  552. dcrtc->cursor_h = h;
  553. ret = armada_drm_crtc_cursor_update(dcrtc, true);
  554. if (obj) {
  555. obj->update_data = dcrtc;
  556. obj->update = cursor_update;
  557. }
  558. return ret;
  559. }
  560. static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  561. {
  562. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  563. int ret;
  564. /* If no cursor support, replicate drm's return value */
  565. if (!dcrtc->variant->has_spu_adv_reg)
  566. return -EFAULT;
  567. dcrtc->cursor_x = x;
  568. dcrtc->cursor_y = y;
  569. ret = armada_drm_crtc_cursor_update(dcrtc, false);
  570. return ret;
  571. }
  572. static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
  573. {
  574. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  575. struct armada_private *priv = crtc->dev->dev_private;
  576. if (dcrtc->cursor_obj)
  577. drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj);
  578. priv->dcrtc[dcrtc->num] = NULL;
  579. drm_crtc_cleanup(&dcrtc->crtc);
  580. if (dcrtc->variant->disable)
  581. dcrtc->variant->disable(dcrtc);
  582. writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
  583. of_node_put(dcrtc->crtc.port);
  584. kfree(dcrtc);
  585. }
  586. /* These are called under the vbl_lock. */
  587. static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
  588. {
  589. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  590. unsigned long flags;
  591. spin_lock_irqsave(&dcrtc->irq_lock, flags);
  592. armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
  593. spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
  594. return 0;
  595. }
  596. static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
  597. {
  598. struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
  599. unsigned long flags;
  600. spin_lock_irqsave(&dcrtc->irq_lock, flags);
  601. armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
  602. spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
  603. }
  604. static const struct drm_crtc_funcs armada_crtc_funcs = {
  605. .reset = drm_atomic_helper_crtc_reset,
  606. .cursor_set = armada_drm_crtc_cursor_set,
  607. .cursor_move = armada_drm_crtc_cursor_move,
  608. .destroy = armada_drm_crtc_destroy,
  609. .set_config = drm_atomic_helper_set_config,
  610. .page_flip = drm_atomic_helper_page_flip,
  611. .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
  612. .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
  613. .enable_vblank = armada_drm_crtc_enable_vblank,
  614. .disable_vblank = armada_drm_crtc_disable_vblank,
  615. };
  616. static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
  617. struct resource *res, int irq, const struct armada_variant *variant,
  618. struct device_node *port)
  619. {
  620. struct armada_private *priv = drm->dev_private;
  621. struct armada_crtc *dcrtc;
  622. struct drm_plane *primary;
  623. void __iomem *base;
  624. int ret;
  625. base = devm_ioremap_resource(dev, res);
  626. if (IS_ERR(base))
  627. return PTR_ERR(base);
  628. dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
  629. if (!dcrtc) {
  630. DRM_ERROR("failed to allocate Armada crtc\n");
  631. return -ENOMEM;
  632. }
  633. if (dev != drm->dev)
  634. dev_set_drvdata(dev, dcrtc);
  635. dcrtc->variant = variant;
  636. dcrtc->base = base;
  637. dcrtc->num = drm->mode_config.num_crtc;
  638. dcrtc->clk = ERR_PTR(-EINVAL);
  639. dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
  640. dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
  641. spin_lock_init(&dcrtc->irq_lock);
  642. dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
  643. /* Initialize some registers which we don't otherwise set */
  644. writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
  645. writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
  646. writel_relaxed(dcrtc->spu_iopad_ctrl,
  647. dcrtc->base + LCD_SPU_IOPAD_CONTROL);
  648. writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
  649. writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
  650. CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
  651. CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
  652. writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
  653. writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
  654. readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
  655. writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
  656. ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
  657. dcrtc);
  658. if (ret < 0)
  659. goto err_crtc;
  660. if (dcrtc->variant->init) {
  661. ret = dcrtc->variant->init(dcrtc, dev);
  662. if (ret)
  663. goto err_crtc;
  664. }
  665. /* Ensure AXI pipeline is enabled */
  666. armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
  667. priv->dcrtc[dcrtc->num] = dcrtc;
  668. dcrtc->crtc.port = port;
  669. primary = kzalloc(sizeof(*primary), GFP_KERNEL);
  670. if (!primary) {
  671. ret = -ENOMEM;
  672. goto err_crtc;
  673. }
  674. ret = armada_drm_primary_plane_init(drm, primary);
  675. if (ret) {
  676. kfree(primary);
  677. goto err_crtc;
  678. }
  679. ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, primary, NULL,
  680. &armada_crtc_funcs, NULL);
  681. if (ret)
  682. goto err_crtc_init;
  683. drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
  684. return armada_overlay_plane_create(drm, 1 << dcrtc->num);
  685. err_crtc_init:
  686. primary->funcs->destroy(primary);
  687. err_crtc:
  688. kfree(dcrtc);
  689. return ret;
  690. }
  691. static int
  692. armada_lcd_bind(struct device *dev, struct device *master, void *data)
  693. {
  694. struct platform_device *pdev = to_platform_device(dev);
  695. struct drm_device *drm = data;
  696. struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  697. int irq = platform_get_irq(pdev, 0);
  698. const struct armada_variant *variant;
  699. struct device_node *port = NULL;
  700. if (irq < 0)
  701. return irq;
  702. if (!dev->of_node) {
  703. const struct platform_device_id *id;
  704. id = platform_get_device_id(pdev);
  705. if (!id)
  706. return -ENXIO;
  707. variant = (const struct armada_variant *)id->driver_data;
  708. } else {
  709. const struct of_device_id *match;
  710. struct device_node *np, *parent = dev->of_node;
  711. match = of_match_device(dev->driver->of_match_table, dev);
  712. if (!match)
  713. return -ENXIO;
  714. np = of_get_child_by_name(parent, "ports");
  715. if (np)
  716. parent = np;
  717. port = of_get_child_by_name(parent, "port");
  718. of_node_put(np);
  719. if (!port) {
  720. dev_err(dev, "no port node found in %pOF\n", parent);
  721. return -ENXIO;
  722. }
  723. variant = match->data;
  724. }
  725. return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
  726. }
  727. static void
  728. armada_lcd_unbind(struct device *dev, struct device *master, void *data)
  729. {
  730. struct armada_crtc *dcrtc = dev_get_drvdata(dev);
  731. armada_drm_crtc_destroy(&dcrtc->crtc);
  732. }
  733. static const struct component_ops armada_lcd_ops = {
  734. .bind = armada_lcd_bind,
  735. .unbind = armada_lcd_unbind,
  736. };
  737. static int armada_lcd_probe(struct platform_device *pdev)
  738. {
  739. return component_add(&pdev->dev, &armada_lcd_ops);
  740. }
  741. static int armada_lcd_remove(struct platform_device *pdev)
  742. {
  743. component_del(&pdev->dev, &armada_lcd_ops);
  744. return 0;
  745. }
  746. static const struct of_device_id armada_lcd_of_match[] = {
  747. {
  748. .compatible = "marvell,dove-lcd",
  749. .data = &armada510_ops,
  750. },
  751. {}
  752. };
  753. MODULE_DEVICE_TABLE(of, armada_lcd_of_match);
  754. static const struct platform_device_id armada_lcd_platform_ids[] = {
  755. {
  756. .name = "armada-lcd",
  757. .driver_data = (unsigned long)&armada510_ops,
  758. }, {
  759. .name = "armada-510-lcd",
  760. .driver_data = (unsigned long)&armada510_ops,
  761. },
  762. { },
  763. };
  764. MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
  765. struct platform_driver armada_lcd_platform_driver = {
  766. .probe = armada_lcd_probe,
  767. .remove = armada_lcd_remove,
  768. .driver = {
  769. .name = "armada-lcd",
  770. .owner = THIS_MODULE,
  771. .of_match_table = armada_lcd_of_match,
  772. },
  773. .id_table = armada_lcd_platform_ids,
  774. };