mtk_dsi.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210
  1. /*
  2. * Copyright (c) 2015 MediaTek Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <drm/drmP.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #include <drm/drm_crtc_helper.h>
  16. #include <drm/drm_mipi_dsi.h>
  17. #include <drm/drm_panel.h>
  18. #include <drm/drm_of.h>
  19. #include <linux/clk.h>
  20. #include <linux/component.h>
  21. #include <linux/irq.h>
  22. #include <linux/of.h>
  23. #include <linux/of_platform.h>
  24. #include <linux/phy/phy.h>
  25. #include <linux/platform_device.h>
  26. #include <video/mipi_display.h>
  27. #include <video/videomode.h>
  28. #include "mtk_drm_ddp_comp.h"
  29. #define DSI_START 0x00
  30. #define DSI_INTEN 0x08
  31. #define DSI_INTSTA 0x0c
  32. #define LPRX_RD_RDY_INT_FLAG BIT(0)
  33. #define CMD_DONE_INT_FLAG BIT(1)
  34. #define TE_RDY_INT_FLAG BIT(2)
  35. #define VM_DONE_INT_FLAG BIT(3)
  36. #define EXT_TE_RDY_INT_FLAG BIT(4)
  37. #define DSI_BUSY BIT(31)
  38. #define DSI_CON_CTRL 0x10
  39. #define DSI_RESET BIT(0)
  40. #define DSI_EN BIT(1)
  41. #define DSI_MODE_CTRL 0x14
  42. #define MODE (3)
  43. #define CMD_MODE 0
  44. #define SYNC_PULSE_MODE 1
  45. #define SYNC_EVENT_MODE 2
  46. #define BURST_MODE 3
  47. #define FRM_MODE BIT(16)
  48. #define MIX_MODE BIT(17)
  49. #define DSI_TXRX_CTRL 0x18
  50. #define VC_NUM BIT(1)
  51. #define LANE_NUM (0xf << 2)
  52. #define DIS_EOT BIT(6)
  53. #define NULL_EN BIT(7)
  54. #define TE_FREERUN BIT(8)
  55. #define EXT_TE_EN BIT(9)
  56. #define EXT_TE_EDGE BIT(10)
  57. #define MAX_RTN_SIZE (0xf << 12)
  58. #define HSTX_CKLP_EN BIT(16)
  59. #define DSI_PSCTRL 0x1c
  60. #define DSI_PS_WC 0x3fff
  61. #define DSI_PS_SEL (3 << 16)
  62. #define PACKED_PS_16BIT_RGB565 (0 << 16)
  63. #define LOOSELY_PS_18BIT_RGB666 (1 << 16)
  64. #define PACKED_PS_18BIT_RGB666 (2 << 16)
  65. #define PACKED_PS_24BIT_RGB888 (3 << 16)
  66. #define DSI_VSA_NL 0x20
  67. #define DSI_VBP_NL 0x24
  68. #define DSI_VFP_NL 0x28
  69. #define DSI_VACT_NL 0x2C
  70. #define DSI_HSA_WC 0x50
  71. #define DSI_HBP_WC 0x54
  72. #define DSI_HFP_WC 0x58
  73. #define DSI_CMDQ_SIZE 0x60
  74. #define CMDQ_SIZE 0x3f
  75. #define DSI_HSTX_CKL_WC 0x64
  76. #define DSI_RX_DATA0 0x74
  77. #define DSI_RX_DATA1 0x78
  78. #define DSI_RX_DATA2 0x7c
  79. #define DSI_RX_DATA3 0x80
  80. #define DSI_RACK 0x84
  81. #define RACK BIT(0)
  82. #define DSI_PHY_LCCON 0x104
  83. #define LC_HS_TX_EN BIT(0)
  84. #define LC_ULPM_EN BIT(1)
  85. #define LC_WAKEUP_EN BIT(2)
  86. #define DSI_PHY_LD0CON 0x108
  87. #define LD0_HS_TX_EN BIT(0)
  88. #define LD0_ULPM_EN BIT(1)
  89. #define LD0_WAKEUP_EN BIT(2)
  90. #define DSI_PHY_TIMECON0 0x110
  91. #define LPX (0xff << 0)
  92. #define HS_PREP (0xff << 8)
  93. #define HS_ZERO (0xff << 16)
  94. #define HS_TRAIL (0xff << 24)
  95. #define DSI_PHY_TIMECON1 0x114
  96. #define TA_GO (0xff << 0)
  97. #define TA_SURE (0xff << 8)
  98. #define TA_GET (0xff << 16)
  99. #define DA_HS_EXIT (0xff << 24)
  100. #define DSI_PHY_TIMECON2 0x118
  101. #define CONT_DET (0xff << 0)
  102. #define CLK_ZERO (0xff << 16)
  103. #define CLK_TRAIL (0xff << 24)
  104. #define DSI_PHY_TIMECON3 0x11c
  105. #define CLK_HS_PREP (0xff << 0)
  106. #define CLK_HS_POST (0xff << 8)
  107. #define CLK_HS_EXIT (0xff << 16)
  108. #define DSI_VM_CMD_CON 0x130
  109. #define VM_CMD_EN BIT(0)
  110. #define TS_VFP_EN BIT(5)
  111. #define DSI_CMDQ0 0x180
  112. #define CONFIG (0xff << 0)
  113. #define SHORT_PACKET 0
  114. #define LONG_PACKET 2
  115. #define BTA BIT(2)
  116. #define DATA_ID (0xff << 8)
  117. #define DATA_0 (0xff << 16)
  118. #define DATA_1 (0xff << 24)
  119. #define T_LPX 5
  120. #define T_HS_PREP 6
  121. #define T_HS_TRAIL 8
  122. #define T_HS_EXIT 7
  123. #define T_HS_ZERO 10
  124. #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
  125. #define MTK_DSI_HOST_IS_READ(type) \
  126. ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
  127. (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
  128. (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
  129. (type == MIPI_DSI_DCS_READ))
  130. struct phy;
  131. struct mtk_dsi {
  132. struct mtk_ddp_comp ddp_comp;
  133. struct device *dev;
  134. struct mipi_dsi_host host;
  135. struct drm_encoder encoder;
  136. struct drm_connector conn;
  137. struct drm_panel *panel;
  138. struct drm_bridge *bridge;
  139. struct phy *phy;
  140. void __iomem *regs;
  141. struct clk *engine_clk;
  142. struct clk *digital_clk;
  143. struct clk *hs_clk;
  144. u32 data_rate;
  145. unsigned long mode_flags;
  146. enum mipi_dsi_pixel_format format;
  147. unsigned int lanes;
  148. struct videomode vm;
  149. int refcount;
  150. bool enabled;
  151. u32 irq_data;
  152. wait_queue_head_t irq_wait_queue;
  153. };
  154. static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
  155. {
  156. return container_of(e, struct mtk_dsi, encoder);
  157. }
  158. static inline struct mtk_dsi *connector_to_dsi(struct drm_connector *c)
  159. {
  160. return container_of(c, struct mtk_dsi, conn);
  161. }
  162. static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h)
  163. {
  164. return container_of(h, struct mtk_dsi, host);
  165. }
  166. static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
  167. {
  168. u32 temp = readl(dsi->regs + offset);
  169. writel((temp & ~mask) | (data & mask), dsi->regs + offset);
  170. }
  171. static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
  172. {
  173. u32 timcon0, timcon1, timcon2, timcon3;
  174. u32 ui, cycle_time;
  175. ui = 1000 / dsi->data_rate + 0x01;
  176. cycle_time = 8000 / dsi->data_rate + 0x01;
  177. timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
  178. timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
  179. T_HS_EXIT << 24;
  180. timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
  181. (NS_TO_CYCLE(0x150, cycle_time) << 16);
  182. timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
  183. NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
  184. writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
  185. writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
  186. writel(timcon2, dsi->regs + DSI_PHY_TIMECON2);
  187. writel(timcon3, dsi->regs + DSI_PHY_TIMECON3);
  188. }
  189. static void mtk_dsi_enable(struct mtk_dsi *dsi)
  190. {
  191. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN);
  192. }
  193. static void mtk_dsi_disable(struct mtk_dsi *dsi)
  194. {
  195. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
  196. }
  197. static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
  198. {
  199. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
  200. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
  201. }
  202. static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
  203. {
  204. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
  205. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
  206. }
  207. static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
  208. {
  209. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
  210. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
  211. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
  212. }
  213. static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
  214. {
  215. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
  216. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
  217. }
  218. static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
  219. {
  220. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
  221. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
  222. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
  223. }
  224. static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
  225. {
  226. u32 tmp_reg1;
  227. tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
  228. return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
  229. }
  230. static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
  231. {
  232. if (enter && !mtk_dsi_clk_hs_state(dsi))
  233. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
  234. else if (!enter && mtk_dsi_clk_hs_state(dsi))
  235. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
  236. }
  237. static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
  238. {
  239. u32 vid_mode = CMD_MODE;
  240. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
  241. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
  242. vid_mode = BURST_MODE;
  243. else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  244. vid_mode = SYNC_PULSE_MODE;
  245. else
  246. vid_mode = SYNC_EVENT_MODE;
  247. }
  248. writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
  249. }
  250. static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
  251. {
  252. mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
  253. mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
  254. }
  255. static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
  256. {
  257. struct videomode *vm = &dsi->vm;
  258. u32 dsi_buf_bpp, ps_wc;
  259. u32 ps_bpp_mode;
  260. if (dsi->format == MIPI_DSI_FMT_RGB565)
  261. dsi_buf_bpp = 2;
  262. else
  263. dsi_buf_bpp = 3;
  264. ps_wc = vm->hactive * dsi_buf_bpp;
  265. ps_bpp_mode = ps_wc;
  266. switch (dsi->format) {
  267. case MIPI_DSI_FMT_RGB888:
  268. ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
  269. break;
  270. case MIPI_DSI_FMT_RGB666:
  271. ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
  272. break;
  273. case MIPI_DSI_FMT_RGB666_PACKED:
  274. ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
  275. break;
  276. case MIPI_DSI_FMT_RGB565:
  277. ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
  278. break;
  279. }
  280. writel(vm->vactive, dsi->regs + DSI_VACT_NL);
  281. writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL);
  282. writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
  283. }
  284. static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
  285. {
  286. u32 tmp_reg;
  287. switch (dsi->lanes) {
  288. case 1:
  289. tmp_reg = 1 << 2;
  290. break;
  291. case 2:
  292. tmp_reg = 3 << 2;
  293. break;
  294. case 3:
  295. tmp_reg = 7 << 2;
  296. break;
  297. case 4:
  298. tmp_reg = 0xf << 2;
  299. break;
  300. default:
  301. tmp_reg = 0xf << 2;
  302. break;
  303. }
  304. tmp_reg |= (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) << 6;
  305. tmp_reg |= (dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET) >> 3;
  306. writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
  307. }
  308. static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
  309. {
  310. u32 dsi_tmp_buf_bpp;
  311. u32 tmp_reg;
  312. switch (dsi->format) {
  313. case MIPI_DSI_FMT_RGB888:
  314. tmp_reg = PACKED_PS_24BIT_RGB888;
  315. dsi_tmp_buf_bpp = 3;
  316. break;
  317. case MIPI_DSI_FMT_RGB666:
  318. tmp_reg = LOOSELY_PS_18BIT_RGB666;
  319. dsi_tmp_buf_bpp = 3;
  320. break;
  321. case MIPI_DSI_FMT_RGB666_PACKED:
  322. tmp_reg = PACKED_PS_18BIT_RGB666;
  323. dsi_tmp_buf_bpp = 3;
  324. break;
  325. case MIPI_DSI_FMT_RGB565:
  326. tmp_reg = PACKED_PS_16BIT_RGB565;
  327. dsi_tmp_buf_bpp = 2;
  328. break;
  329. default:
  330. tmp_reg = PACKED_PS_24BIT_RGB888;
  331. dsi_tmp_buf_bpp = 3;
  332. break;
  333. }
  334. tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC;
  335. writel(tmp_reg, dsi->regs + DSI_PSCTRL);
  336. }
  337. static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
  338. {
  339. u32 horizontal_sync_active_byte;
  340. u32 horizontal_backporch_byte;
  341. u32 horizontal_frontporch_byte;
  342. u32 dsi_tmp_buf_bpp;
  343. struct videomode *vm = &dsi->vm;
  344. if (dsi->format == MIPI_DSI_FMT_RGB565)
  345. dsi_tmp_buf_bpp = 2;
  346. else
  347. dsi_tmp_buf_bpp = 3;
  348. writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
  349. writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
  350. writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
  351. writel(vm->vactive, dsi->regs + DSI_VACT_NL);
  352. horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
  353. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  354. horizontal_backporch_byte =
  355. (vm->hback_porch * dsi_tmp_buf_bpp - 10);
  356. else
  357. horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
  358. dsi_tmp_buf_bpp - 10);
  359. horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
  360. writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
  361. writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
  362. writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
  363. mtk_dsi_ps_control(dsi);
  364. }
  365. static void mtk_dsi_start(struct mtk_dsi *dsi)
  366. {
  367. writel(0, dsi->regs + DSI_START);
  368. writel(1, dsi->regs + DSI_START);
  369. }
  370. static void mtk_dsi_stop(struct mtk_dsi *dsi)
  371. {
  372. writel(0, dsi->regs + DSI_START);
  373. }
  374. static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi)
  375. {
  376. writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL);
  377. }
  378. static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi)
  379. {
  380. u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
  381. writel(inten, dsi->regs + DSI_INTEN);
  382. }
  383. static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit)
  384. {
  385. dsi->irq_data |= irq_bit;
  386. }
  387. static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit)
  388. {
  389. dsi->irq_data &= ~irq_bit;
  390. }
  391. static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag,
  392. unsigned int timeout)
  393. {
  394. s32 ret = 0;
  395. unsigned long jiffies = msecs_to_jiffies(timeout);
  396. ret = wait_event_interruptible_timeout(dsi->irq_wait_queue,
  397. dsi->irq_data & irq_flag,
  398. jiffies);
  399. if (ret == 0) {
  400. DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag);
  401. mtk_dsi_enable(dsi);
  402. mtk_dsi_reset_engine(dsi);
  403. }
  404. return ret;
  405. }
  406. static irqreturn_t mtk_dsi_irq(int irq, void *dev_id)
  407. {
  408. struct mtk_dsi *dsi = dev_id;
  409. u32 status, tmp;
  410. u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
  411. status = readl(dsi->regs + DSI_INTSTA) & flag;
  412. if (status) {
  413. do {
  414. mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK);
  415. tmp = readl(dsi->regs + DSI_INTSTA);
  416. } while (tmp & DSI_BUSY);
  417. mtk_dsi_mask(dsi, DSI_INTSTA, status, 0);
  418. mtk_dsi_irq_data_set(dsi, status);
  419. wake_up_interruptible(&dsi->irq_wait_queue);
  420. }
  421. return IRQ_HANDLED;
  422. }
  423. static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
  424. {
  425. mtk_dsi_irq_data_clear(dsi, irq_flag);
  426. mtk_dsi_set_cmd_mode(dsi);
  427. if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) {
  428. DRM_ERROR("failed to switch cmd mode\n");
  429. return -ETIME;
  430. } else {
  431. return 0;
  432. }
  433. }
  434. static int mtk_dsi_poweron(struct mtk_dsi *dsi)
  435. {
  436. struct device *dev = dsi->dev;
  437. int ret;
  438. u64 pixel_clock, total_bits;
  439. u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
  440. if (++dsi->refcount != 1)
  441. return 0;
  442. switch (dsi->format) {
  443. case MIPI_DSI_FMT_RGB565:
  444. bit_per_pixel = 16;
  445. break;
  446. case MIPI_DSI_FMT_RGB666_PACKED:
  447. bit_per_pixel = 18;
  448. break;
  449. case MIPI_DSI_FMT_RGB666:
  450. case MIPI_DSI_FMT_RGB888:
  451. default:
  452. bit_per_pixel = 24;
  453. break;
  454. }
  455. /**
  456. * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
  457. * htotal_time = htotal * byte_per_pixel / num_lanes
  458. * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
  459. * mipi_ratio = (htotal_time + overhead_time) / htotal_time
  460. * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
  461. */
  462. pixel_clock = dsi->vm.pixelclock * 1000;
  463. htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
  464. dsi->vm.hsync_len;
  465. htotal_bits = htotal * bit_per_pixel;
  466. overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
  467. T_HS_EXIT;
  468. overhead_bits = overhead_cycles * dsi->lanes * 8;
  469. total_bits = htotal_bits + overhead_bits;
  470. dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
  471. htotal * dsi->lanes);
  472. ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
  473. if (ret < 0) {
  474. dev_err(dev, "Failed to set data rate: %d\n", ret);
  475. goto err_refcount;
  476. }
  477. phy_power_on(dsi->phy);
  478. ret = clk_prepare_enable(dsi->engine_clk);
  479. if (ret < 0) {
  480. dev_err(dev, "Failed to enable engine clock: %d\n", ret);
  481. goto err_phy_power_off;
  482. }
  483. ret = clk_prepare_enable(dsi->digital_clk);
  484. if (ret < 0) {
  485. dev_err(dev, "Failed to enable digital clock: %d\n", ret);
  486. goto err_disable_engine_clk;
  487. }
  488. mtk_dsi_enable(dsi);
  489. mtk_dsi_reset_engine(dsi);
  490. mtk_dsi_phy_timconfig(dsi);
  491. mtk_dsi_rxtx_control(dsi);
  492. mtk_dsi_ps_control_vact(dsi);
  493. mtk_dsi_set_vm_cmd(dsi);
  494. mtk_dsi_config_vdo_timing(dsi);
  495. mtk_dsi_set_interrupt_enable(dsi);
  496. mtk_dsi_clk_ulp_mode_leave(dsi);
  497. mtk_dsi_lane0_ulp_mode_leave(dsi);
  498. mtk_dsi_clk_hs_mode(dsi, 0);
  499. if (dsi->panel) {
  500. if (drm_panel_prepare(dsi->panel)) {
  501. DRM_ERROR("failed to prepare the panel\n");
  502. goto err_disable_digital_clk;
  503. }
  504. }
  505. return 0;
  506. err_disable_digital_clk:
  507. clk_disable_unprepare(dsi->digital_clk);
  508. err_disable_engine_clk:
  509. clk_disable_unprepare(dsi->engine_clk);
  510. err_phy_power_off:
  511. phy_power_off(dsi->phy);
  512. err_refcount:
  513. dsi->refcount--;
  514. return ret;
  515. }
  516. static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
  517. {
  518. if (WARN_ON(dsi->refcount == 0))
  519. return;
  520. if (--dsi->refcount != 0)
  521. return;
  522. if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
  523. if (dsi->panel) {
  524. if (drm_panel_unprepare(dsi->panel)) {
  525. DRM_ERROR("failed to unprepare the panel\n");
  526. return;
  527. }
  528. }
  529. }
  530. mtk_dsi_reset_engine(dsi);
  531. mtk_dsi_lane0_ulp_mode_enter(dsi);
  532. mtk_dsi_clk_ulp_mode_enter(dsi);
  533. mtk_dsi_disable(dsi);
  534. clk_disable_unprepare(dsi->engine_clk);
  535. clk_disable_unprepare(dsi->digital_clk);
  536. phy_power_off(dsi->phy);
  537. }
  538. static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
  539. {
  540. int ret;
  541. if (dsi->enabled)
  542. return;
  543. ret = mtk_dsi_poweron(dsi);
  544. if (ret < 0) {
  545. DRM_ERROR("failed to power on dsi\n");
  546. return;
  547. }
  548. mtk_dsi_set_mode(dsi);
  549. mtk_dsi_clk_hs_mode(dsi, 1);
  550. mtk_dsi_start(dsi);
  551. if (dsi->panel) {
  552. if (drm_panel_enable(dsi->panel)) {
  553. DRM_ERROR("failed to enable the panel\n");
  554. goto err_dsi_power_off;
  555. }
  556. }
  557. dsi->enabled = true;
  558. return;
  559. err_dsi_power_off:
  560. mtk_dsi_stop(dsi);
  561. mtk_dsi_poweroff(dsi);
  562. }
  563. static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
  564. {
  565. if (!dsi->enabled)
  566. return;
  567. if (dsi->panel) {
  568. if (drm_panel_disable(dsi->panel)) {
  569. DRM_ERROR("failed to disable the panel\n");
  570. return;
  571. }
  572. }
  573. mtk_dsi_stop(dsi);
  574. mtk_dsi_poweroff(dsi);
  575. dsi->enabled = false;
  576. }
  577. static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder)
  578. {
  579. drm_encoder_cleanup(encoder);
  580. }
  581. static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = {
  582. .destroy = mtk_dsi_encoder_destroy,
  583. };
  584. static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
  585. const struct drm_display_mode *mode,
  586. struct drm_display_mode *adjusted_mode)
  587. {
  588. return true;
  589. }
  590. static void mtk_dsi_encoder_mode_set(struct drm_encoder *encoder,
  591. struct drm_display_mode *mode,
  592. struct drm_display_mode *adjusted)
  593. {
  594. struct mtk_dsi *dsi = encoder_to_dsi(encoder);
  595. dsi->vm.pixelclock = adjusted->clock;
  596. dsi->vm.hactive = adjusted->hdisplay;
  597. dsi->vm.hback_porch = adjusted->htotal - adjusted->hsync_end;
  598. dsi->vm.hfront_porch = adjusted->hsync_start - adjusted->hdisplay;
  599. dsi->vm.hsync_len = adjusted->hsync_end - adjusted->hsync_start;
  600. dsi->vm.vactive = adjusted->vdisplay;
  601. dsi->vm.vback_porch = adjusted->vtotal - adjusted->vsync_end;
  602. dsi->vm.vfront_porch = adjusted->vsync_start - adjusted->vdisplay;
  603. dsi->vm.vsync_len = adjusted->vsync_end - adjusted->vsync_start;
  604. }
  605. static void mtk_dsi_encoder_disable(struct drm_encoder *encoder)
  606. {
  607. struct mtk_dsi *dsi = encoder_to_dsi(encoder);
  608. mtk_output_dsi_disable(dsi);
  609. }
  610. static void mtk_dsi_encoder_enable(struct drm_encoder *encoder)
  611. {
  612. struct mtk_dsi *dsi = encoder_to_dsi(encoder);
  613. mtk_output_dsi_enable(dsi);
  614. }
  615. static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
  616. {
  617. struct mtk_dsi *dsi = connector_to_dsi(connector);
  618. return drm_panel_get_modes(dsi->panel);
  619. }
  620. static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
  621. .mode_fixup = mtk_dsi_encoder_mode_fixup,
  622. .mode_set = mtk_dsi_encoder_mode_set,
  623. .disable = mtk_dsi_encoder_disable,
  624. .enable = mtk_dsi_encoder_enable,
  625. };
  626. static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
  627. .dpms = drm_atomic_helper_connector_dpms,
  628. .fill_modes = drm_helper_probe_single_connector_modes,
  629. .destroy = drm_connector_cleanup,
  630. .reset = drm_atomic_helper_connector_reset,
  631. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  632. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  633. };
  634. static const struct drm_connector_helper_funcs
  635. mtk_dsi_connector_helper_funcs = {
  636. .get_modes = mtk_dsi_connector_get_modes,
  637. };
  638. static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
  639. {
  640. int ret;
  641. ret = drm_connector_init(drm, &dsi->conn, &mtk_dsi_connector_funcs,
  642. DRM_MODE_CONNECTOR_DSI);
  643. if (ret) {
  644. DRM_ERROR("Failed to connector init to drm\n");
  645. return ret;
  646. }
  647. drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
  648. dsi->conn.dpms = DRM_MODE_DPMS_OFF;
  649. drm_mode_connector_attach_encoder(&dsi->conn, &dsi->encoder);
  650. if (dsi->panel) {
  651. ret = drm_panel_attach(dsi->panel, &dsi->conn);
  652. if (ret) {
  653. DRM_ERROR("Failed to attach panel to drm\n");
  654. goto err_connector_cleanup;
  655. }
  656. }
  657. return 0;
  658. err_connector_cleanup:
  659. drm_connector_cleanup(&dsi->conn);
  660. return ret;
  661. }
  662. static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
  663. {
  664. int ret;
  665. ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs,
  666. DRM_MODE_ENCODER_DSI, NULL);
  667. if (ret) {
  668. DRM_ERROR("Failed to encoder init to drm\n");
  669. return ret;
  670. }
  671. drm_encoder_helper_add(&dsi->encoder, &mtk_dsi_encoder_helper_funcs);
  672. /*
  673. * Currently display data paths are statically assigned to a crtc each.
  674. * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
  675. */
  676. dsi->encoder.possible_crtcs = 1;
  677. /* If there's a bridge, attach to it and let it create the connector */
  678. ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
  679. if (ret) {
  680. DRM_ERROR("Failed to attach bridge to drm\n");
  681. /* Otherwise create our own connector and attach to a panel */
  682. ret = mtk_dsi_create_connector(drm, dsi);
  683. if (ret)
  684. goto err_encoder_cleanup;
  685. }
  686. return 0;
  687. err_encoder_cleanup:
  688. drm_encoder_cleanup(&dsi->encoder);
  689. return ret;
  690. }
  691. static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
  692. {
  693. drm_encoder_cleanup(&dsi->encoder);
  694. /* Skip connector cleanup if creation was delegated to the bridge */
  695. if (dsi->conn.dev)
  696. drm_connector_cleanup(&dsi->conn);
  697. }
  698. static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
  699. {
  700. struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
  701. mtk_dsi_poweron(dsi);
  702. }
  703. static void mtk_dsi_ddp_stop(struct mtk_ddp_comp *comp)
  704. {
  705. struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
  706. mtk_dsi_poweroff(dsi);
  707. }
  708. static const struct mtk_ddp_comp_funcs mtk_dsi_funcs = {
  709. .start = mtk_dsi_ddp_start,
  710. .stop = mtk_dsi_ddp_stop,
  711. };
  712. static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
  713. struct mipi_dsi_device *device)
  714. {
  715. struct mtk_dsi *dsi = host_to_dsi(host);
  716. dsi->lanes = device->lanes;
  717. dsi->format = device->format;
  718. dsi->mode_flags = device->mode_flags;
  719. if (dsi->conn.dev)
  720. drm_helper_hpd_irq_event(dsi->conn.dev);
  721. return 0;
  722. }
  723. static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
  724. struct mipi_dsi_device *device)
  725. {
  726. struct mtk_dsi *dsi = host_to_dsi(host);
  727. if (dsi->conn.dev)
  728. drm_helper_hpd_irq_event(dsi->conn.dev);
  729. return 0;
  730. }
  731. static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
  732. {
  733. u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
  734. while (timeout_ms--) {
  735. if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
  736. break;
  737. usleep_range(2, 4);
  738. }
  739. if (timeout_ms == 0) {
  740. DRM_WARN("polling dsi wait not busy timeout!\n");
  741. mtk_dsi_enable(dsi);
  742. mtk_dsi_reset_engine(dsi);
  743. }
  744. }
  745. static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
  746. {
  747. switch (type) {
  748. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
  749. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
  750. return 1;
  751. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
  752. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
  753. return 2;
  754. case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
  755. case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
  756. return read_data[1] + read_data[2] * 16;
  757. case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
  758. DRM_INFO("type is 0x02, try again\n");
  759. break;
  760. default:
  761. DRM_INFO("type(0x%x) cannot be non-recognite\n", type);
  762. break;
  763. }
  764. return 0;
  765. }
  766. static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
  767. {
  768. const char *tx_buf = msg->tx_buf;
  769. u8 config, cmdq_size, cmdq_off, type = msg->type;
  770. u32 reg_val, cmdq_mask, i;
  771. if (MTK_DSI_HOST_IS_READ(type))
  772. config = BTA;
  773. else
  774. config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET;
  775. if (msg->tx_len > 2) {
  776. cmdq_size = 1 + (msg->tx_len + 3) / 4;
  777. cmdq_off = 4;
  778. cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1;
  779. reg_val = (msg->tx_len << 16) | (type << 8) | config;
  780. } else {
  781. cmdq_size = 1;
  782. cmdq_off = 2;
  783. cmdq_mask = CONFIG | DATA_ID;
  784. reg_val = (type << 8) | config;
  785. }
  786. for (i = 0; i < msg->tx_len; i++)
  787. writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
  788. mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
  789. mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
  790. }
  791. static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi,
  792. const struct mipi_dsi_msg *msg, u8 flag)
  793. {
  794. mtk_dsi_wait_for_idle(dsi);
  795. mtk_dsi_irq_data_clear(dsi, flag);
  796. mtk_dsi_cmdq(dsi, msg);
  797. mtk_dsi_start(dsi);
  798. if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000))
  799. return -ETIME;
  800. else
  801. return 0;
  802. }
  803. static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
  804. const struct mipi_dsi_msg *msg)
  805. {
  806. struct mtk_dsi *dsi = host_to_dsi(host);
  807. u32 recv_cnt, i;
  808. u8 read_data[16];
  809. void *src_addr;
  810. u8 irq_flag = CMD_DONE_INT_FLAG;
  811. if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) {
  812. DRM_ERROR("dsi engine is not command mode\n");
  813. return -EINVAL;
  814. }
  815. if (MTK_DSI_HOST_IS_READ(msg->type))
  816. irq_flag |= LPRX_RD_RDY_INT_FLAG;
  817. if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0)
  818. return -ETIME;
  819. if (!MTK_DSI_HOST_IS_READ(msg->type))
  820. return 0;
  821. if (!msg->rx_buf) {
  822. DRM_ERROR("dsi receive buffer size may be NULL\n");
  823. return -EINVAL;
  824. }
  825. for (i = 0; i < 16; i++)
  826. *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i);
  827. recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data);
  828. if (recv_cnt > 2)
  829. src_addr = &read_data[4];
  830. else
  831. src_addr = &read_data[1];
  832. if (recv_cnt > 10)
  833. recv_cnt = 10;
  834. if (recv_cnt > msg->rx_len)
  835. recv_cnt = msg->rx_len;
  836. if (recv_cnt)
  837. memcpy(msg->rx_buf, src_addr, recv_cnt);
  838. DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
  839. recv_cnt, *((u8 *)(msg->tx_buf)));
  840. return recv_cnt;
  841. }
  842. static const struct mipi_dsi_host_ops mtk_dsi_ops = {
  843. .attach = mtk_dsi_host_attach,
  844. .detach = mtk_dsi_host_detach,
  845. .transfer = mtk_dsi_host_transfer,
  846. };
  847. static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
  848. {
  849. int ret;
  850. struct drm_device *drm = data;
  851. struct mtk_dsi *dsi = dev_get_drvdata(dev);
  852. ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp);
  853. if (ret < 0) {
  854. dev_err(dev, "Failed to register component %s: %d\n",
  855. dev->of_node->full_name, ret);
  856. return ret;
  857. }
  858. ret = mipi_dsi_host_register(&dsi->host);
  859. if (ret < 0) {
  860. dev_err(dev, "failed to register DSI host: %d\n", ret);
  861. goto err_ddp_comp_unregister;
  862. }
  863. ret = mtk_dsi_create_conn_enc(drm, dsi);
  864. if (ret) {
  865. DRM_ERROR("Encoder create failed with %d\n", ret);
  866. goto err_unregister;
  867. }
  868. return 0;
  869. err_unregister:
  870. mipi_dsi_host_unregister(&dsi->host);
  871. err_ddp_comp_unregister:
  872. mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
  873. return ret;
  874. }
  875. static void mtk_dsi_unbind(struct device *dev, struct device *master,
  876. void *data)
  877. {
  878. struct drm_device *drm = data;
  879. struct mtk_dsi *dsi = dev_get_drvdata(dev);
  880. mtk_dsi_destroy_conn_enc(dsi);
  881. mipi_dsi_host_unregister(&dsi->host);
  882. mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
  883. }
  884. static const struct component_ops mtk_dsi_component_ops = {
  885. .bind = mtk_dsi_bind,
  886. .unbind = mtk_dsi_unbind,
  887. };
  888. static int mtk_dsi_probe(struct platform_device *pdev)
  889. {
  890. struct mtk_dsi *dsi;
  891. struct device *dev = &pdev->dev;
  892. struct resource *regs;
  893. int irq_num;
  894. int comp_id;
  895. int ret;
  896. dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
  897. if (!dsi)
  898. return -ENOMEM;
  899. dsi->host.ops = &mtk_dsi_ops;
  900. dsi->host.dev = dev;
  901. ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
  902. &dsi->panel, &dsi->bridge);
  903. if (ret)
  904. return ret;
  905. dsi->engine_clk = devm_clk_get(dev, "engine");
  906. if (IS_ERR(dsi->engine_clk)) {
  907. ret = PTR_ERR(dsi->engine_clk);
  908. dev_err(dev, "Failed to get engine clock: %d\n", ret);
  909. return ret;
  910. }
  911. dsi->digital_clk = devm_clk_get(dev, "digital");
  912. if (IS_ERR(dsi->digital_clk)) {
  913. ret = PTR_ERR(dsi->digital_clk);
  914. dev_err(dev, "Failed to get digital clock: %d\n", ret);
  915. return ret;
  916. }
  917. dsi->hs_clk = devm_clk_get(dev, "hs");
  918. if (IS_ERR(dsi->hs_clk)) {
  919. ret = PTR_ERR(dsi->hs_clk);
  920. dev_err(dev, "Failed to get hs clock: %d\n", ret);
  921. return ret;
  922. }
  923. regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  924. dsi->regs = devm_ioremap_resource(dev, regs);
  925. if (IS_ERR(dsi->regs)) {
  926. ret = PTR_ERR(dsi->regs);
  927. dev_err(dev, "Failed to ioremap memory: %d\n", ret);
  928. return ret;
  929. }
  930. dsi->phy = devm_phy_get(dev, "dphy");
  931. if (IS_ERR(dsi->phy)) {
  932. ret = PTR_ERR(dsi->phy);
  933. dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
  934. return ret;
  935. }
  936. comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
  937. if (comp_id < 0) {
  938. dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
  939. return comp_id;
  940. }
  941. ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
  942. &mtk_dsi_funcs);
  943. if (ret) {
  944. dev_err(dev, "Failed to initialize component: %d\n", ret);
  945. return ret;
  946. }
  947. irq_num = platform_get_irq(pdev, 0);
  948. if (irq_num < 0) {
  949. dev_err(&pdev->dev, "failed to request dsi irq resource\n");
  950. return -EPROBE_DEFER;
  951. }
  952. irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
  953. ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
  954. IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
  955. if (ret) {
  956. dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
  957. return -EPROBE_DEFER;
  958. }
  959. init_waitqueue_head(&dsi->irq_wait_queue);
  960. platform_set_drvdata(pdev, dsi);
  961. return component_add(&pdev->dev, &mtk_dsi_component_ops);
  962. }
  963. static int mtk_dsi_remove(struct platform_device *pdev)
  964. {
  965. struct mtk_dsi *dsi = platform_get_drvdata(pdev);
  966. mtk_output_dsi_disable(dsi);
  967. component_del(&pdev->dev, &mtk_dsi_component_ops);
  968. return 0;
  969. }
  970. static const struct of_device_id mtk_dsi_of_match[] = {
  971. { .compatible = "mediatek,mt2701-dsi" },
  972. { .compatible = "mediatek,mt8173-dsi" },
  973. { },
  974. };
  975. struct platform_driver mtk_dsi_driver = {
  976. .probe = mtk_dsi_probe,
  977. .remove = mtk_dsi_remove,
  978. .driver = {
  979. .name = "mtk-dsi",
  980. .of_match_table = mtk_dsi_of_match,
  981. },
  982. };