mtk_dsi.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206
  1. /*
  2. * Copyright (c) 2015 MediaTek Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <drm/drmP.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #include <drm/drm_crtc_helper.h>
  16. #include <drm/drm_mipi_dsi.h>
  17. #include <drm/drm_panel.h>
  18. #include <drm/drm_of.h>
  19. #include <linux/clk.h>
  20. #include <linux/component.h>
  21. #include <linux/iopoll.h>
  22. #include <linux/irq.h>
  23. #include <linux/of.h>
  24. #include <linux/of_platform.h>
  25. #include <linux/phy/phy.h>
  26. #include <linux/platform_device.h>
  27. #include <video/mipi_display.h>
  28. #include <video/videomode.h>
  29. #include "mtk_drm_ddp_comp.h"
  30. #define DSI_START 0x00
  31. #define DSI_INTEN 0x08
  32. #define DSI_INTSTA 0x0c
  33. #define LPRX_RD_RDY_INT_FLAG BIT(0)
  34. #define CMD_DONE_INT_FLAG BIT(1)
  35. #define TE_RDY_INT_FLAG BIT(2)
  36. #define VM_DONE_INT_FLAG BIT(3)
  37. #define EXT_TE_RDY_INT_FLAG BIT(4)
  38. #define DSI_BUSY BIT(31)
  39. #define DSI_CON_CTRL 0x10
  40. #define DSI_RESET BIT(0)
  41. #define DSI_EN BIT(1)
  42. #define DSI_MODE_CTRL 0x14
  43. #define MODE (3)
  44. #define CMD_MODE 0
  45. #define SYNC_PULSE_MODE 1
  46. #define SYNC_EVENT_MODE 2
  47. #define BURST_MODE 3
  48. #define FRM_MODE BIT(16)
  49. #define MIX_MODE BIT(17)
  50. #define DSI_TXRX_CTRL 0x18
  51. #define VC_NUM BIT(1)
  52. #define LANE_NUM (0xf << 2)
  53. #define DIS_EOT BIT(6)
  54. #define NULL_EN BIT(7)
  55. #define TE_FREERUN BIT(8)
  56. #define EXT_TE_EN BIT(9)
  57. #define EXT_TE_EDGE BIT(10)
  58. #define MAX_RTN_SIZE (0xf << 12)
  59. #define HSTX_CKLP_EN BIT(16)
  60. #define DSI_PSCTRL 0x1c
  61. #define DSI_PS_WC 0x3fff
  62. #define DSI_PS_SEL (3 << 16)
  63. #define PACKED_PS_16BIT_RGB565 (0 << 16)
  64. #define LOOSELY_PS_18BIT_RGB666 (1 << 16)
  65. #define PACKED_PS_18BIT_RGB666 (2 << 16)
  66. #define PACKED_PS_24BIT_RGB888 (3 << 16)
  67. #define DSI_VSA_NL 0x20
  68. #define DSI_VBP_NL 0x24
  69. #define DSI_VFP_NL 0x28
  70. #define DSI_VACT_NL 0x2C
  71. #define DSI_HSA_WC 0x50
  72. #define DSI_HBP_WC 0x54
  73. #define DSI_HFP_WC 0x58
  74. #define DSI_CMDQ_SIZE 0x60
  75. #define CMDQ_SIZE 0x3f
  76. #define DSI_HSTX_CKL_WC 0x64
  77. #define DSI_RX_DATA0 0x74
  78. #define DSI_RX_DATA1 0x78
  79. #define DSI_RX_DATA2 0x7c
  80. #define DSI_RX_DATA3 0x80
  81. #define DSI_RACK 0x84
  82. #define RACK BIT(0)
  83. #define DSI_PHY_LCCON 0x104
  84. #define LC_HS_TX_EN BIT(0)
  85. #define LC_ULPM_EN BIT(1)
  86. #define LC_WAKEUP_EN BIT(2)
  87. #define DSI_PHY_LD0CON 0x108
  88. #define LD0_HS_TX_EN BIT(0)
  89. #define LD0_ULPM_EN BIT(1)
  90. #define LD0_WAKEUP_EN BIT(2)
  91. #define DSI_PHY_TIMECON0 0x110
  92. #define LPX (0xff << 0)
  93. #define HS_PREP (0xff << 8)
  94. #define HS_ZERO (0xff << 16)
  95. #define HS_TRAIL (0xff << 24)
  96. #define DSI_PHY_TIMECON1 0x114
  97. #define TA_GO (0xff << 0)
  98. #define TA_SURE (0xff << 8)
  99. #define TA_GET (0xff << 16)
  100. #define DA_HS_EXIT (0xff << 24)
  101. #define DSI_PHY_TIMECON2 0x118
  102. #define CONT_DET (0xff << 0)
  103. #define CLK_ZERO (0xff << 16)
  104. #define CLK_TRAIL (0xff << 24)
  105. #define DSI_PHY_TIMECON3 0x11c
  106. #define CLK_HS_PREP (0xff << 0)
  107. #define CLK_HS_POST (0xff << 8)
  108. #define CLK_HS_EXIT (0xff << 16)
  109. #define DSI_VM_CMD_CON 0x130
  110. #define VM_CMD_EN BIT(0)
  111. #define TS_VFP_EN BIT(5)
  112. #define DSI_CMDQ0 0x180
  113. #define CONFIG (0xff << 0)
  114. #define SHORT_PACKET 0
  115. #define LONG_PACKET 2
  116. #define BTA BIT(2)
  117. #define DATA_ID (0xff << 8)
  118. #define DATA_0 (0xff << 16)
  119. #define DATA_1 (0xff << 24)
  120. #define T_LPX 5
  121. #define T_HS_PREP 6
  122. #define T_HS_TRAIL 8
  123. #define T_HS_EXIT 7
  124. #define T_HS_ZERO 10
  125. #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
  126. #define MTK_DSI_HOST_IS_READ(type) \
  127. ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
  128. (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
  129. (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
  130. (type == MIPI_DSI_DCS_READ))
  131. struct phy;
  132. struct mtk_dsi {
  133. struct mtk_ddp_comp ddp_comp;
  134. struct device *dev;
  135. struct mipi_dsi_host host;
  136. struct drm_encoder encoder;
  137. struct drm_connector conn;
  138. struct drm_panel *panel;
  139. struct drm_bridge *bridge;
  140. struct phy *phy;
  141. void __iomem *regs;
  142. struct clk *engine_clk;
  143. struct clk *digital_clk;
  144. struct clk *hs_clk;
  145. u32 data_rate;
  146. unsigned long mode_flags;
  147. enum mipi_dsi_pixel_format format;
  148. unsigned int lanes;
  149. struct videomode vm;
  150. int refcount;
  151. bool enabled;
  152. u32 irq_data;
  153. wait_queue_head_t irq_wait_queue;
  154. };
  155. static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
  156. {
  157. return container_of(e, struct mtk_dsi, encoder);
  158. }
  159. static inline struct mtk_dsi *connector_to_dsi(struct drm_connector *c)
  160. {
  161. return container_of(c, struct mtk_dsi, conn);
  162. }
  163. static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h)
  164. {
  165. return container_of(h, struct mtk_dsi, host);
  166. }
  167. static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
  168. {
  169. u32 temp = readl(dsi->regs + offset);
  170. writel((temp & ~mask) | (data & mask), dsi->regs + offset);
  171. }
  172. static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
  173. {
  174. u32 timcon0, timcon1, timcon2, timcon3;
  175. u32 ui, cycle_time;
  176. ui = 1000 / dsi->data_rate + 0x01;
  177. cycle_time = 8000 / dsi->data_rate + 0x01;
  178. timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
  179. timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
  180. T_HS_EXIT << 24;
  181. timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
  182. (NS_TO_CYCLE(0x150, cycle_time) << 16);
  183. timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
  184. NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
  185. writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
  186. writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
  187. writel(timcon2, dsi->regs + DSI_PHY_TIMECON2);
  188. writel(timcon3, dsi->regs + DSI_PHY_TIMECON3);
  189. }
  190. static void mtk_dsi_enable(struct mtk_dsi *dsi)
  191. {
  192. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN);
  193. }
  194. static void mtk_dsi_disable(struct mtk_dsi *dsi)
  195. {
  196. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
  197. }
  198. static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
  199. {
  200. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
  201. mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
  202. }
  203. static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
  204. {
  205. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
  206. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
  207. }
  208. static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
  209. {
  210. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
  211. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
  212. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
  213. }
  214. static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
  215. {
  216. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
  217. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
  218. }
  219. static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
  220. {
  221. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
  222. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
  223. mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
  224. }
  225. static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
  226. {
  227. u32 tmp_reg1;
  228. tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
  229. return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
  230. }
  231. static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
  232. {
  233. if (enter && !mtk_dsi_clk_hs_state(dsi))
  234. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
  235. else if (!enter && mtk_dsi_clk_hs_state(dsi))
  236. mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
  237. }
  238. static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
  239. {
  240. u32 vid_mode = CMD_MODE;
  241. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
  242. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
  243. vid_mode = BURST_MODE;
  244. else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  245. vid_mode = SYNC_PULSE_MODE;
  246. else
  247. vid_mode = SYNC_EVENT_MODE;
  248. }
  249. writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
  250. }
  251. static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
  252. {
  253. mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
  254. mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
  255. }
  256. static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
  257. {
  258. struct videomode *vm = &dsi->vm;
  259. u32 dsi_buf_bpp, ps_wc;
  260. u32 ps_bpp_mode;
  261. if (dsi->format == MIPI_DSI_FMT_RGB565)
  262. dsi_buf_bpp = 2;
  263. else
  264. dsi_buf_bpp = 3;
  265. ps_wc = vm->hactive * dsi_buf_bpp;
  266. ps_bpp_mode = ps_wc;
  267. switch (dsi->format) {
  268. case MIPI_DSI_FMT_RGB888:
  269. ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
  270. break;
  271. case MIPI_DSI_FMT_RGB666:
  272. ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
  273. break;
  274. case MIPI_DSI_FMT_RGB666_PACKED:
  275. ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
  276. break;
  277. case MIPI_DSI_FMT_RGB565:
  278. ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
  279. break;
  280. }
  281. writel(vm->vactive, dsi->regs + DSI_VACT_NL);
  282. writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL);
  283. writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
  284. }
  285. static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
  286. {
  287. u32 tmp_reg;
  288. switch (dsi->lanes) {
  289. case 1:
  290. tmp_reg = 1 << 2;
  291. break;
  292. case 2:
  293. tmp_reg = 3 << 2;
  294. break;
  295. case 3:
  296. tmp_reg = 7 << 2;
  297. break;
  298. case 4:
  299. tmp_reg = 0xf << 2;
  300. break;
  301. default:
  302. tmp_reg = 0xf << 2;
  303. break;
  304. }
  305. tmp_reg |= (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) << 6;
  306. tmp_reg |= (dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET) >> 3;
  307. writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
  308. }
  309. static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
  310. {
  311. u32 dsi_tmp_buf_bpp;
  312. u32 tmp_reg;
  313. switch (dsi->format) {
  314. case MIPI_DSI_FMT_RGB888:
  315. tmp_reg = PACKED_PS_24BIT_RGB888;
  316. dsi_tmp_buf_bpp = 3;
  317. break;
  318. case MIPI_DSI_FMT_RGB666:
  319. tmp_reg = LOOSELY_PS_18BIT_RGB666;
  320. dsi_tmp_buf_bpp = 3;
  321. break;
  322. case MIPI_DSI_FMT_RGB666_PACKED:
  323. tmp_reg = PACKED_PS_18BIT_RGB666;
  324. dsi_tmp_buf_bpp = 3;
  325. break;
  326. case MIPI_DSI_FMT_RGB565:
  327. tmp_reg = PACKED_PS_16BIT_RGB565;
  328. dsi_tmp_buf_bpp = 2;
  329. break;
  330. default:
  331. tmp_reg = PACKED_PS_24BIT_RGB888;
  332. dsi_tmp_buf_bpp = 3;
  333. break;
  334. }
  335. tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC;
  336. writel(tmp_reg, dsi->regs + DSI_PSCTRL);
  337. }
  338. static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
  339. {
  340. u32 horizontal_sync_active_byte;
  341. u32 horizontal_backporch_byte;
  342. u32 horizontal_frontporch_byte;
  343. u32 dsi_tmp_buf_bpp;
  344. struct videomode *vm = &dsi->vm;
  345. if (dsi->format == MIPI_DSI_FMT_RGB565)
  346. dsi_tmp_buf_bpp = 2;
  347. else
  348. dsi_tmp_buf_bpp = 3;
  349. writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
  350. writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
  351. writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
  352. writel(vm->vactive, dsi->regs + DSI_VACT_NL);
  353. horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
  354. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  355. horizontal_backporch_byte =
  356. (vm->hback_porch * dsi_tmp_buf_bpp - 10);
  357. else
  358. horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
  359. dsi_tmp_buf_bpp - 10);
  360. horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
  361. writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
  362. writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
  363. writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
  364. mtk_dsi_ps_control(dsi);
  365. }
  366. static void mtk_dsi_start(struct mtk_dsi *dsi)
  367. {
  368. writel(0, dsi->regs + DSI_START);
  369. writel(1, dsi->regs + DSI_START);
  370. }
  371. static void mtk_dsi_stop(struct mtk_dsi *dsi)
  372. {
  373. writel(0, dsi->regs + DSI_START);
  374. }
  375. static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi)
  376. {
  377. writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL);
  378. }
  379. static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi)
  380. {
  381. u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
  382. writel(inten, dsi->regs + DSI_INTEN);
  383. }
  384. static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit)
  385. {
  386. dsi->irq_data |= irq_bit;
  387. }
  388. static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit)
  389. {
  390. dsi->irq_data &= ~irq_bit;
  391. }
  392. static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag,
  393. unsigned int timeout)
  394. {
  395. s32 ret = 0;
  396. unsigned long jiffies = msecs_to_jiffies(timeout);
  397. ret = wait_event_interruptible_timeout(dsi->irq_wait_queue,
  398. dsi->irq_data & irq_flag,
  399. jiffies);
  400. if (ret == 0) {
  401. DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag);
  402. mtk_dsi_enable(dsi);
  403. mtk_dsi_reset_engine(dsi);
  404. }
  405. return ret;
  406. }
  407. static irqreturn_t mtk_dsi_irq(int irq, void *dev_id)
  408. {
  409. struct mtk_dsi *dsi = dev_id;
  410. u32 status, tmp;
  411. u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
  412. status = readl(dsi->regs + DSI_INTSTA) & flag;
  413. if (status) {
  414. do {
  415. mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK);
  416. tmp = readl(dsi->regs + DSI_INTSTA);
  417. } while (tmp & DSI_BUSY);
  418. mtk_dsi_mask(dsi, DSI_INTSTA, status, 0);
  419. mtk_dsi_irq_data_set(dsi, status);
  420. wake_up_interruptible(&dsi->irq_wait_queue);
  421. }
  422. return IRQ_HANDLED;
  423. }
  424. static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
  425. {
  426. mtk_dsi_irq_data_clear(dsi, irq_flag);
  427. mtk_dsi_set_cmd_mode(dsi);
  428. if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) {
  429. DRM_ERROR("failed to switch cmd mode\n");
  430. return -ETIME;
  431. } else {
  432. return 0;
  433. }
  434. }
  435. static int mtk_dsi_poweron(struct mtk_dsi *dsi)
  436. {
  437. struct device *dev = dsi->dev;
  438. int ret;
  439. u64 pixel_clock, total_bits;
  440. u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
  441. if (++dsi->refcount != 1)
  442. return 0;
  443. switch (dsi->format) {
  444. case MIPI_DSI_FMT_RGB565:
  445. bit_per_pixel = 16;
  446. break;
  447. case MIPI_DSI_FMT_RGB666_PACKED:
  448. bit_per_pixel = 18;
  449. break;
  450. case MIPI_DSI_FMT_RGB666:
  451. case MIPI_DSI_FMT_RGB888:
  452. default:
  453. bit_per_pixel = 24;
  454. break;
  455. }
  456. /**
  457. * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
  458. * htotal_time = htotal * byte_per_pixel / num_lanes
  459. * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
  460. * mipi_ratio = (htotal_time + overhead_time) / htotal_time
  461. * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
  462. */
  463. pixel_clock = dsi->vm.pixelclock * 1000;
  464. htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
  465. dsi->vm.hsync_len;
  466. htotal_bits = htotal * bit_per_pixel;
  467. overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
  468. T_HS_EXIT;
  469. overhead_bits = overhead_cycles * dsi->lanes * 8;
  470. total_bits = htotal_bits + overhead_bits;
  471. dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
  472. htotal * dsi->lanes);
  473. ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
  474. if (ret < 0) {
  475. dev_err(dev, "Failed to set data rate: %d\n", ret);
  476. goto err_refcount;
  477. }
  478. phy_power_on(dsi->phy);
  479. ret = clk_prepare_enable(dsi->engine_clk);
  480. if (ret < 0) {
  481. dev_err(dev, "Failed to enable engine clock: %d\n", ret);
  482. goto err_phy_power_off;
  483. }
  484. ret = clk_prepare_enable(dsi->digital_clk);
  485. if (ret < 0) {
  486. dev_err(dev, "Failed to enable digital clock: %d\n", ret);
  487. goto err_disable_engine_clk;
  488. }
  489. mtk_dsi_enable(dsi);
  490. mtk_dsi_reset_engine(dsi);
  491. mtk_dsi_phy_timconfig(dsi);
  492. mtk_dsi_rxtx_control(dsi);
  493. mtk_dsi_ps_control_vact(dsi);
  494. mtk_dsi_set_vm_cmd(dsi);
  495. mtk_dsi_config_vdo_timing(dsi);
  496. mtk_dsi_set_interrupt_enable(dsi);
  497. mtk_dsi_clk_ulp_mode_leave(dsi);
  498. mtk_dsi_lane0_ulp_mode_leave(dsi);
  499. mtk_dsi_clk_hs_mode(dsi, 0);
  500. if (dsi->panel) {
  501. if (drm_panel_prepare(dsi->panel)) {
  502. DRM_ERROR("failed to prepare the panel\n");
  503. goto err_disable_digital_clk;
  504. }
  505. }
  506. return 0;
  507. err_disable_digital_clk:
  508. clk_disable_unprepare(dsi->digital_clk);
  509. err_disable_engine_clk:
  510. clk_disable_unprepare(dsi->engine_clk);
  511. err_phy_power_off:
  512. phy_power_off(dsi->phy);
  513. err_refcount:
  514. dsi->refcount--;
  515. return ret;
  516. }
  517. static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
  518. {
  519. if (WARN_ON(dsi->refcount == 0))
  520. return;
  521. if (--dsi->refcount != 0)
  522. return;
  523. if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
  524. if (dsi->panel) {
  525. if (drm_panel_unprepare(dsi->panel)) {
  526. DRM_ERROR("failed to unprepare the panel\n");
  527. return;
  528. }
  529. }
  530. }
  531. mtk_dsi_reset_engine(dsi);
  532. mtk_dsi_lane0_ulp_mode_enter(dsi);
  533. mtk_dsi_clk_ulp_mode_enter(dsi);
  534. mtk_dsi_disable(dsi);
  535. clk_disable_unprepare(dsi->engine_clk);
  536. clk_disable_unprepare(dsi->digital_clk);
  537. phy_power_off(dsi->phy);
  538. }
  539. static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
  540. {
  541. int ret;
  542. if (dsi->enabled)
  543. return;
  544. ret = mtk_dsi_poweron(dsi);
  545. if (ret < 0) {
  546. DRM_ERROR("failed to power on dsi\n");
  547. return;
  548. }
  549. mtk_dsi_set_mode(dsi);
  550. mtk_dsi_clk_hs_mode(dsi, 1);
  551. mtk_dsi_start(dsi);
  552. if (dsi->panel) {
  553. if (drm_panel_enable(dsi->panel)) {
  554. DRM_ERROR("failed to enable the panel\n");
  555. goto err_dsi_power_off;
  556. }
  557. }
  558. dsi->enabled = true;
  559. return;
  560. err_dsi_power_off:
  561. mtk_dsi_stop(dsi);
  562. mtk_dsi_poweroff(dsi);
  563. }
  564. static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
  565. {
  566. if (!dsi->enabled)
  567. return;
  568. if (dsi->panel) {
  569. if (drm_panel_disable(dsi->panel)) {
  570. DRM_ERROR("failed to disable the panel\n");
  571. return;
  572. }
  573. }
  574. mtk_dsi_stop(dsi);
  575. mtk_dsi_poweroff(dsi);
  576. dsi->enabled = false;
  577. }
  578. static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder)
  579. {
  580. drm_encoder_cleanup(encoder);
  581. }
  582. static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = {
  583. .destroy = mtk_dsi_encoder_destroy,
  584. };
  585. static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
  586. const struct drm_display_mode *mode,
  587. struct drm_display_mode *adjusted_mode)
  588. {
  589. return true;
  590. }
  591. static void mtk_dsi_encoder_mode_set(struct drm_encoder *encoder,
  592. struct drm_display_mode *mode,
  593. struct drm_display_mode *adjusted)
  594. {
  595. struct mtk_dsi *dsi = encoder_to_dsi(encoder);
  596. dsi->vm.pixelclock = adjusted->clock;
  597. dsi->vm.hactive = adjusted->hdisplay;
  598. dsi->vm.hback_porch = adjusted->htotal - adjusted->hsync_end;
  599. dsi->vm.hfront_porch = adjusted->hsync_start - adjusted->hdisplay;
  600. dsi->vm.hsync_len = adjusted->hsync_end - adjusted->hsync_start;
  601. dsi->vm.vactive = adjusted->vdisplay;
  602. dsi->vm.vback_porch = adjusted->vtotal - adjusted->vsync_end;
  603. dsi->vm.vfront_porch = adjusted->vsync_start - adjusted->vdisplay;
  604. dsi->vm.vsync_len = adjusted->vsync_end - adjusted->vsync_start;
  605. }
  606. static void mtk_dsi_encoder_disable(struct drm_encoder *encoder)
  607. {
  608. struct mtk_dsi *dsi = encoder_to_dsi(encoder);
  609. mtk_output_dsi_disable(dsi);
  610. }
  611. static void mtk_dsi_encoder_enable(struct drm_encoder *encoder)
  612. {
  613. struct mtk_dsi *dsi = encoder_to_dsi(encoder);
  614. mtk_output_dsi_enable(dsi);
  615. }
  616. static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
  617. {
  618. struct mtk_dsi *dsi = connector_to_dsi(connector);
  619. return drm_panel_get_modes(dsi->panel);
  620. }
  621. static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
  622. .mode_fixup = mtk_dsi_encoder_mode_fixup,
  623. .mode_set = mtk_dsi_encoder_mode_set,
  624. .disable = mtk_dsi_encoder_disable,
  625. .enable = mtk_dsi_encoder_enable,
  626. };
  627. static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
  628. .fill_modes = drm_helper_probe_single_connector_modes,
  629. .destroy = drm_connector_cleanup,
  630. .reset = drm_atomic_helper_connector_reset,
  631. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  632. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  633. };
  634. static const struct drm_connector_helper_funcs
  635. mtk_dsi_connector_helper_funcs = {
  636. .get_modes = mtk_dsi_connector_get_modes,
  637. };
  638. static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
  639. {
  640. int ret;
  641. ret = drm_connector_init(drm, &dsi->conn, &mtk_dsi_connector_funcs,
  642. DRM_MODE_CONNECTOR_DSI);
  643. if (ret) {
  644. DRM_ERROR("Failed to connector init to drm\n");
  645. return ret;
  646. }
  647. drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
  648. dsi->conn.dpms = DRM_MODE_DPMS_OFF;
  649. drm_mode_connector_attach_encoder(&dsi->conn, &dsi->encoder);
  650. if (dsi->panel) {
  651. ret = drm_panel_attach(dsi->panel, &dsi->conn);
  652. if (ret) {
  653. DRM_ERROR("Failed to attach panel to drm\n");
  654. goto err_connector_cleanup;
  655. }
  656. }
  657. return 0;
  658. err_connector_cleanup:
  659. drm_connector_cleanup(&dsi->conn);
  660. return ret;
  661. }
  662. static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
  663. {
  664. int ret;
  665. ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs,
  666. DRM_MODE_ENCODER_DSI, NULL);
  667. if (ret) {
  668. DRM_ERROR("Failed to encoder init to drm\n");
  669. return ret;
  670. }
  671. drm_encoder_helper_add(&dsi->encoder, &mtk_dsi_encoder_helper_funcs);
  672. /*
  673. * Currently display data paths are statically assigned to a crtc each.
  674. * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
  675. */
  676. dsi->encoder.possible_crtcs = 1;
  677. /* If there's a bridge, attach to it and let it create the connector */
  678. ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
  679. if (ret) {
  680. DRM_ERROR("Failed to attach bridge to drm\n");
  681. /* Otherwise create our own connector and attach to a panel */
  682. ret = mtk_dsi_create_connector(drm, dsi);
  683. if (ret)
  684. goto err_encoder_cleanup;
  685. }
  686. return 0;
  687. err_encoder_cleanup:
  688. drm_encoder_cleanup(&dsi->encoder);
  689. return ret;
  690. }
  691. static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
  692. {
  693. drm_encoder_cleanup(&dsi->encoder);
  694. /* Skip connector cleanup if creation was delegated to the bridge */
  695. if (dsi->conn.dev)
  696. drm_connector_cleanup(&dsi->conn);
  697. }
  698. static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
  699. {
  700. struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
  701. mtk_dsi_poweron(dsi);
  702. }
  703. static void mtk_dsi_ddp_stop(struct mtk_ddp_comp *comp)
  704. {
  705. struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
  706. mtk_dsi_poweroff(dsi);
  707. }
  708. static const struct mtk_ddp_comp_funcs mtk_dsi_funcs = {
  709. .start = mtk_dsi_ddp_start,
  710. .stop = mtk_dsi_ddp_stop,
  711. };
  712. static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
  713. struct mipi_dsi_device *device)
  714. {
  715. struct mtk_dsi *dsi = host_to_dsi(host);
  716. dsi->lanes = device->lanes;
  717. dsi->format = device->format;
  718. dsi->mode_flags = device->mode_flags;
  719. if (dsi->conn.dev)
  720. drm_helper_hpd_irq_event(dsi->conn.dev);
  721. return 0;
  722. }
  723. static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
  724. struct mipi_dsi_device *device)
  725. {
  726. struct mtk_dsi *dsi = host_to_dsi(host);
  727. if (dsi->conn.dev)
  728. drm_helper_hpd_irq_event(dsi->conn.dev);
  729. return 0;
  730. }
  731. static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
  732. {
  733. int ret;
  734. u32 val;
  735. ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
  736. 4, 2000000);
  737. if (ret) {
  738. DRM_WARN("polling dsi wait not busy timeout!\n");
  739. mtk_dsi_enable(dsi);
  740. mtk_dsi_reset_engine(dsi);
  741. }
  742. }
  743. static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
  744. {
  745. switch (type) {
  746. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
  747. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
  748. return 1;
  749. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
  750. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
  751. return 2;
  752. case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
  753. case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
  754. return read_data[1] + read_data[2] * 16;
  755. case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
  756. DRM_INFO("type is 0x02, try again\n");
  757. break;
  758. default:
  759. DRM_INFO("type(0x%x) not recognized\n", type);
  760. break;
  761. }
  762. return 0;
  763. }
  764. static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
  765. {
  766. const char *tx_buf = msg->tx_buf;
  767. u8 config, cmdq_size, cmdq_off, type = msg->type;
  768. u32 reg_val, cmdq_mask, i;
  769. if (MTK_DSI_HOST_IS_READ(type))
  770. config = BTA;
  771. else
  772. config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET;
  773. if (msg->tx_len > 2) {
  774. cmdq_size = 1 + (msg->tx_len + 3) / 4;
  775. cmdq_off = 4;
  776. cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1;
  777. reg_val = (msg->tx_len << 16) | (type << 8) | config;
  778. } else {
  779. cmdq_size = 1;
  780. cmdq_off = 2;
  781. cmdq_mask = CONFIG | DATA_ID;
  782. reg_val = (type << 8) | config;
  783. }
  784. for (i = 0; i < msg->tx_len; i++)
  785. writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
  786. mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
  787. mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
  788. }
  789. static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi,
  790. const struct mipi_dsi_msg *msg, u8 flag)
  791. {
  792. mtk_dsi_wait_for_idle(dsi);
  793. mtk_dsi_irq_data_clear(dsi, flag);
  794. mtk_dsi_cmdq(dsi, msg);
  795. mtk_dsi_start(dsi);
  796. if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000))
  797. return -ETIME;
  798. else
  799. return 0;
  800. }
  801. static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
  802. const struct mipi_dsi_msg *msg)
  803. {
  804. struct mtk_dsi *dsi = host_to_dsi(host);
  805. u32 recv_cnt, i;
  806. u8 read_data[16];
  807. void *src_addr;
  808. u8 irq_flag = CMD_DONE_INT_FLAG;
  809. if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) {
  810. DRM_ERROR("dsi engine is not command mode\n");
  811. return -EINVAL;
  812. }
  813. if (MTK_DSI_HOST_IS_READ(msg->type))
  814. irq_flag |= LPRX_RD_RDY_INT_FLAG;
  815. if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0)
  816. return -ETIME;
  817. if (!MTK_DSI_HOST_IS_READ(msg->type))
  818. return 0;
  819. if (!msg->rx_buf) {
  820. DRM_ERROR("dsi receive buffer size may be NULL\n");
  821. return -EINVAL;
  822. }
  823. for (i = 0; i < 16; i++)
  824. *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i);
  825. recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data);
  826. if (recv_cnt > 2)
  827. src_addr = &read_data[4];
  828. else
  829. src_addr = &read_data[1];
  830. if (recv_cnt > 10)
  831. recv_cnt = 10;
  832. if (recv_cnt > msg->rx_len)
  833. recv_cnt = msg->rx_len;
  834. if (recv_cnt)
  835. memcpy(msg->rx_buf, src_addr, recv_cnt);
  836. DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
  837. recv_cnt, *((u8 *)(msg->tx_buf)));
  838. return recv_cnt;
  839. }
  840. static const struct mipi_dsi_host_ops mtk_dsi_ops = {
  841. .attach = mtk_dsi_host_attach,
  842. .detach = mtk_dsi_host_detach,
  843. .transfer = mtk_dsi_host_transfer,
  844. };
  845. static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
  846. {
  847. int ret;
  848. struct drm_device *drm = data;
  849. struct mtk_dsi *dsi = dev_get_drvdata(dev);
  850. ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp);
  851. if (ret < 0) {
  852. dev_err(dev, "Failed to register component %pOF: %d\n",
  853. dev->of_node, ret);
  854. return ret;
  855. }
  856. ret = mipi_dsi_host_register(&dsi->host);
  857. if (ret < 0) {
  858. dev_err(dev, "failed to register DSI host: %d\n", ret);
  859. goto err_ddp_comp_unregister;
  860. }
  861. ret = mtk_dsi_create_conn_enc(drm, dsi);
  862. if (ret) {
  863. DRM_ERROR("Encoder create failed with %d\n", ret);
  864. goto err_unregister;
  865. }
  866. return 0;
  867. err_unregister:
  868. mipi_dsi_host_unregister(&dsi->host);
  869. err_ddp_comp_unregister:
  870. mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
  871. return ret;
  872. }
  873. static void mtk_dsi_unbind(struct device *dev, struct device *master,
  874. void *data)
  875. {
  876. struct drm_device *drm = data;
  877. struct mtk_dsi *dsi = dev_get_drvdata(dev);
  878. mtk_dsi_destroy_conn_enc(dsi);
  879. mipi_dsi_host_unregister(&dsi->host);
  880. mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
  881. }
  882. static const struct component_ops mtk_dsi_component_ops = {
  883. .bind = mtk_dsi_bind,
  884. .unbind = mtk_dsi_unbind,
  885. };
  886. static int mtk_dsi_probe(struct platform_device *pdev)
  887. {
  888. struct mtk_dsi *dsi;
  889. struct device *dev = &pdev->dev;
  890. struct resource *regs;
  891. int irq_num;
  892. int comp_id;
  893. int ret;
  894. dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
  895. if (!dsi)
  896. return -ENOMEM;
  897. dsi->host.ops = &mtk_dsi_ops;
  898. dsi->host.dev = dev;
  899. ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
  900. &dsi->panel, &dsi->bridge);
  901. if (ret)
  902. return ret;
  903. dsi->engine_clk = devm_clk_get(dev, "engine");
  904. if (IS_ERR(dsi->engine_clk)) {
  905. ret = PTR_ERR(dsi->engine_clk);
  906. dev_err(dev, "Failed to get engine clock: %d\n", ret);
  907. return ret;
  908. }
  909. dsi->digital_clk = devm_clk_get(dev, "digital");
  910. if (IS_ERR(dsi->digital_clk)) {
  911. ret = PTR_ERR(dsi->digital_clk);
  912. dev_err(dev, "Failed to get digital clock: %d\n", ret);
  913. return ret;
  914. }
  915. dsi->hs_clk = devm_clk_get(dev, "hs");
  916. if (IS_ERR(dsi->hs_clk)) {
  917. ret = PTR_ERR(dsi->hs_clk);
  918. dev_err(dev, "Failed to get hs clock: %d\n", ret);
  919. return ret;
  920. }
  921. regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  922. dsi->regs = devm_ioremap_resource(dev, regs);
  923. if (IS_ERR(dsi->regs)) {
  924. ret = PTR_ERR(dsi->regs);
  925. dev_err(dev, "Failed to ioremap memory: %d\n", ret);
  926. return ret;
  927. }
  928. dsi->phy = devm_phy_get(dev, "dphy");
  929. if (IS_ERR(dsi->phy)) {
  930. ret = PTR_ERR(dsi->phy);
  931. dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
  932. return ret;
  933. }
  934. comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
  935. if (comp_id < 0) {
  936. dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
  937. return comp_id;
  938. }
  939. ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
  940. &mtk_dsi_funcs);
  941. if (ret) {
  942. dev_err(dev, "Failed to initialize component: %d\n", ret);
  943. return ret;
  944. }
  945. irq_num = platform_get_irq(pdev, 0);
  946. if (irq_num < 0) {
  947. dev_err(&pdev->dev, "failed to request dsi irq resource\n");
  948. return -EPROBE_DEFER;
  949. }
  950. irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
  951. ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
  952. IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
  953. if (ret) {
  954. dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
  955. return -EPROBE_DEFER;
  956. }
  957. init_waitqueue_head(&dsi->irq_wait_queue);
  958. platform_set_drvdata(pdev, dsi);
  959. return component_add(&pdev->dev, &mtk_dsi_component_ops);
  960. }
  961. static int mtk_dsi_remove(struct platform_device *pdev)
  962. {
  963. struct mtk_dsi *dsi = platform_get_drvdata(pdev);
  964. mtk_output_dsi_disable(dsi);
  965. component_del(&pdev->dev, &mtk_dsi_component_ops);
  966. return 0;
  967. }
  968. static const struct of_device_id mtk_dsi_of_match[] = {
  969. { .compatible = "mediatek,mt2701-dsi" },
  970. { .compatible = "mediatek,mt8173-dsi" },
  971. { },
  972. };
  973. struct platform_driver mtk_dsi_driver = {
  974. .probe = mtk_dsi_probe,
  975. .remove = mtk_dsi_remove,
  976. .driver = {
  977. .name = "mtk-dsi",
  978. .of_match_table = mtk_dsi_of_match,
  979. },
  980. };