dsi_host.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307
  1. /*
  2. * Copyright (c) 2015, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/gpio.h>
  17. #include <linux/gpio/consumer.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/of_device.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/pinctrl/consumer.h>
  23. #include <linux/of_graph.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/mfd/syscon.h>
  27. #include <linux/regmap.h>
  28. #include <video/mipi_display.h>
  29. #include "dsi.h"
  30. #include "dsi.xml.h"
  31. #include "sfpb.xml.h"
  32. #include "dsi_cfg.h"
  33. static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
  34. {
  35. u32 ver;
  36. if (!major || !minor)
  37. return -EINVAL;
  38. /*
  39. * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
  40. * makes all other registers 4-byte shifted down.
  41. *
  42. * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
  43. * older, we read the DSI_VERSION register without any shift(offset
  44. * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
  45. * the case of DSI6G, this has to be zero (the offset points to a
  46. * scratch register which we never touch)
  47. */
  48. ver = msm_readl(base + REG_DSI_VERSION);
  49. if (ver) {
  50. /* older dsi host, there is no register shift */
  51. ver = FIELD(ver, DSI_VERSION_MAJOR);
  52. if (ver <= MSM_DSI_VER_MAJOR_V2) {
  53. /* old versions */
  54. *major = ver;
  55. *minor = 0;
  56. return 0;
  57. } else {
  58. return -EINVAL;
  59. }
  60. } else {
  61. /*
  62. * newer host, offset 0 has 6G_HW_VERSION, the rest of the
  63. * registers are shifted down, read DSI_VERSION again with
  64. * the shifted offset
  65. */
  66. ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
  67. ver = FIELD(ver, DSI_VERSION_MAJOR);
  68. if (ver == MSM_DSI_VER_MAJOR_6G) {
  69. /* 6G version */
  70. *major = ver;
  71. *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
  72. return 0;
  73. } else {
  74. return -EINVAL;
  75. }
  76. }
  77. }
  78. #define DSI_ERR_STATE_ACK 0x0000
  79. #define DSI_ERR_STATE_TIMEOUT 0x0001
  80. #define DSI_ERR_STATE_DLN0_PHY 0x0002
  81. #define DSI_ERR_STATE_FIFO 0x0004
  82. #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
  83. #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
  84. #define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
  85. #define DSI_CLK_CTRL_ENABLE_CLKS \
  86. (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
  87. DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
  88. DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
  89. DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
  90. struct msm_dsi_host {
  91. struct mipi_dsi_host base;
  92. struct platform_device *pdev;
  93. struct drm_device *dev;
  94. int id;
  95. void __iomem *ctrl_base;
  96. struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
  97. struct clk *bus_clks[DSI_BUS_CLK_MAX];
  98. struct clk *byte_clk;
  99. struct clk *esc_clk;
  100. struct clk *pixel_clk;
  101. struct clk *byte_clk_src;
  102. struct clk *pixel_clk_src;
  103. u32 byte_clk_rate;
  104. u32 esc_clk_rate;
  105. /* DSI v2 specific clocks */
  106. struct clk *src_clk;
  107. struct clk *esc_clk_src;
  108. struct clk *dsi_clk_src;
  109. u32 src_clk_rate;
  110. struct gpio_desc *disp_en_gpio;
  111. struct gpio_desc *te_gpio;
  112. const struct msm_dsi_cfg_handler *cfg_hnd;
  113. struct completion dma_comp;
  114. struct completion video_comp;
  115. struct mutex dev_mutex;
  116. struct mutex cmd_mutex;
  117. struct mutex clk_mutex;
  118. spinlock_t intr_lock; /* Protect interrupt ctrl register */
  119. u32 err_work_state;
  120. struct work_struct err_work;
  121. struct workqueue_struct *workqueue;
  122. /* DSI 6G TX buffer*/
  123. struct drm_gem_object *tx_gem_obj;
  124. /* DSI v2 TX buffer */
  125. void *tx_buf;
  126. dma_addr_t tx_buf_paddr;
  127. int tx_size;
  128. u8 *rx_buf;
  129. struct regmap *sfpb;
  130. struct drm_display_mode *mode;
  131. /* connected device info */
  132. struct device_node *device_node;
  133. unsigned int channel;
  134. unsigned int lanes;
  135. enum mipi_dsi_pixel_format format;
  136. unsigned long mode_flags;
  137. /* lane data parsed via DT */
  138. int dlane_swap;
  139. int num_data_lanes;
  140. u32 dma_cmd_ctrl_restore;
  141. bool registered;
  142. bool power_on;
  143. int irq;
  144. };
  145. static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
  146. {
  147. switch (fmt) {
  148. case MIPI_DSI_FMT_RGB565: return 16;
  149. case MIPI_DSI_FMT_RGB666_PACKED: return 18;
  150. case MIPI_DSI_FMT_RGB666:
  151. case MIPI_DSI_FMT_RGB888:
  152. default: return 24;
  153. }
  154. }
  155. static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
  156. {
  157. return msm_readl(msm_host->ctrl_base + reg);
  158. }
  159. static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
  160. {
  161. msm_writel(data, msm_host->ctrl_base + reg);
  162. }
  163. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
  164. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
  165. static const struct msm_dsi_cfg_handler *dsi_get_config(
  166. struct msm_dsi_host *msm_host)
  167. {
  168. const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
  169. struct device *dev = &msm_host->pdev->dev;
  170. struct regulator *gdsc_reg;
  171. struct clk *ahb_clk;
  172. int ret;
  173. u32 major = 0, minor = 0;
  174. gdsc_reg = regulator_get(dev, "gdsc");
  175. if (IS_ERR(gdsc_reg)) {
  176. pr_err("%s: cannot get gdsc\n", __func__);
  177. goto exit;
  178. }
  179. ahb_clk = clk_get(dev, "iface_clk");
  180. if (IS_ERR(ahb_clk)) {
  181. pr_err("%s: cannot get interface clock\n", __func__);
  182. goto put_gdsc;
  183. }
  184. ret = regulator_enable(gdsc_reg);
  185. if (ret) {
  186. pr_err("%s: unable to enable gdsc\n", __func__);
  187. goto put_clk;
  188. }
  189. ret = clk_prepare_enable(ahb_clk);
  190. if (ret) {
  191. pr_err("%s: unable to enable ahb_clk\n", __func__);
  192. goto disable_gdsc;
  193. }
  194. ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
  195. if (ret) {
  196. pr_err("%s: Invalid version\n", __func__);
  197. goto disable_clks;
  198. }
  199. cfg_hnd = msm_dsi_cfg_get(major, minor);
  200. DBG("%s: Version %x:%x\n", __func__, major, minor);
  201. disable_clks:
  202. clk_disable_unprepare(ahb_clk);
  203. disable_gdsc:
  204. regulator_disable(gdsc_reg);
  205. put_clk:
  206. clk_put(ahb_clk);
  207. put_gdsc:
  208. regulator_put(gdsc_reg);
  209. exit:
  210. return cfg_hnd;
  211. }
  212. static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
  213. {
  214. return container_of(host, struct msm_dsi_host, base);
  215. }
  216. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
  217. {
  218. struct regulator_bulk_data *s = msm_host->supplies;
  219. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  220. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  221. int i;
  222. DBG("");
  223. for (i = num - 1; i >= 0; i--)
  224. if (regs[i].disable_load >= 0)
  225. regulator_set_load(s[i].consumer,
  226. regs[i].disable_load);
  227. regulator_bulk_disable(num, s);
  228. }
  229. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
  230. {
  231. struct regulator_bulk_data *s = msm_host->supplies;
  232. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  233. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  234. int ret, i;
  235. DBG("");
  236. for (i = 0; i < num; i++) {
  237. if (regs[i].enable_load >= 0) {
  238. ret = regulator_set_load(s[i].consumer,
  239. regs[i].enable_load);
  240. if (ret < 0) {
  241. pr_err("regulator %d set op mode failed, %d\n",
  242. i, ret);
  243. goto fail;
  244. }
  245. }
  246. }
  247. ret = regulator_bulk_enable(num, s);
  248. if (ret < 0) {
  249. pr_err("regulator enable failed, %d\n", ret);
  250. goto fail;
  251. }
  252. return 0;
  253. fail:
  254. for (i--; i >= 0; i--)
  255. regulator_set_load(s[i].consumer, regs[i].disable_load);
  256. return ret;
  257. }
  258. static int dsi_regulator_init(struct msm_dsi_host *msm_host)
  259. {
  260. struct regulator_bulk_data *s = msm_host->supplies;
  261. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  262. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  263. int i, ret;
  264. for (i = 0; i < num; i++)
  265. s[i].supply = regs[i].name;
  266. ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
  267. if (ret < 0) {
  268. pr_err("%s: failed to init regulator, ret=%d\n",
  269. __func__, ret);
  270. return ret;
  271. }
  272. return 0;
  273. }
  274. static int dsi_clk_init(struct msm_dsi_host *msm_host)
  275. {
  276. struct device *dev = &msm_host->pdev->dev;
  277. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  278. const struct msm_dsi_config *cfg = cfg_hnd->cfg;
  279. int i, ret = 0;
  280. /* get bus clocks */
  281. for (i = 0; i < cfg->num_bus_clks; i++) {
  282. msm_host->bus_clks[i] = devm_clk_get(dev,
  283. cfg->bus_clk_names[i]);
  284. if (IS_ERR(msm_host->bus_clks[i])) {
  285. ret = PTR_ERR(msm_host->bus_clks[i]);
  286. pr_err("%s: Unable to get %s, ret = %d\n",
  287. __func__, cfg->bus_clk_names[i], ret);
  288. goto exit;
  289. }
  290. }
  291. /* get link and source clocks */
  292. msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
  293. if (IS_ERR(msm_host->byte_clk)) {
  294. ret = PTR_ERR(msm_host->byte_clk);
  295. pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
  296. __func__, ret);
  297. msm_host->byte_clk = NULL;
  298. goto exit;
  299. }
  300. msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
  301. if (IS_ERR(msm_host->pixel_clk)) {
  302. ret = PTR_ERR(msm_host->pixel_clk);
  303. pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
  304. __func__, ret);
  305. msm_host->pixel_clk = NULL;
  306. goto exit;
  307. }
  308. msm_host->esc_clk = devm_clk_get(dev, "core_clk");
  309. if (IS_ERR(msm_host->esc_clk)) {
  310. ret = PTR_ERR(msm_host->esc_clk);
  311. pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
  312. __func__, ret);
  313. msm_host->esc_clk = NULL;
  314. goto exit;
  315. }
  316. msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
  317. if (!msm_host->byte_clk_src) {
  318. ret = -ENODEV;
  319. pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
  320. goto exit;
  321. }
  322. msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
  323. if (!msm_host->pixel_clk_src) {
  324. ret = -ENODEV;
  325. pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
  326. goto exit;
  327. }
  328. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
  329. msm_host->src_clk = devm_clk_get(dev, "src_clk");
  330. if (IS_ERR(msm_host->src_clk)) {
  331. ret = PTR_ERR(msm_host->src_clk);
  332. pr_err("%s: can't find dsi_src_clk. ret=%d\n",
  333. __func__, ret);
  334. msm_host->src_clk = NULL;
  335. goto exit;
  336. }
  337. msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
  338. if (!msm_host->esc_clk_src) {
  339. ret = -ENODEV;
  340. pr_err("%s: can't get esc_clk_src. ret=%d\n",
  341. __func__, ret);
  342. goto exit;
  343. }
  344. msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
  345. if (!msm_host->dsi_clk_src) {
  346. ret = -ENODEV;
  347. pr_err("%s: can't get dsi_clk_src. ret=%d\n",
  348. __func__, ret);
  349. }
  350. }
  351. exit:
  352. return ret;
  353. }
  354. static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
  355. {
  356. const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
  357. int i, ret;
  358. DBG("id=%d", msm_host->id);
  359. for (i = 0; i < cfg->num_bus_clks; i++) {
  360. ret = clk_prepare_enable(msm_host->bus_clks[i]);
  361. if (ret) {
  362. pr_err("%s: failed to enable bus clock %d ret %d\n",
  363. __func__, i, ret);
  364. goto err;
  365. }
  366. }
  367. return 0;
  368. err:
  369. for (; i > 0; i--)
  370. clk_disable_unprepare(msm_host->bus_clks[i]);
  371. return ret;
  372. }
  373. static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
  374. {
  375. const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
  376. int i;
  377. DBG("");
  378. for (i = cfg->num_bus_clks - 1; i >= 0; i--)
  379. clk_disable_unprepare(msm_host->bus_clks[i]);
  380. }
  381. static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
  382. {
  383. int ret;
  384. DBG("Set clk rates: pclk=%d, byteclk=%d",
  385. msm_host->mode->clock, msm_host->byte_clk_rate);
  386. ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
  387. if (ret) {
  388. pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
  389. goto error;
  390. }
  391. ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
  392. if (ret) {
  393. pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
  394. goto error;
  395. }
  396. ret = clk_prepare_enable(msm_host->esc_clk);
  397. if (ret) {
  398. pr_err("%s: Failed to enable dsi esc clk\n", __func__);
  399. goto error;
  400. }
  401. ret = clk_prepare_enable(msm_host->byte_clk);
  402. if (ret) {
  403. pr_err("%s: Failed to enable dsi byte clk\n", __func__);
  404. goto byte_clk_err;
  405. }
  406. ret = clk_prepare_enable(msm_host->pixel_clk);
  407. if (ret) {
  408. pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
  409. goto pixel_clk_err;
  410. }
  411. return 0;
  412. pixel_clk_err:
  413. clk_disable_unprepare(msm_host->byte_clk);
  414. byte_clk_err:
  415. clk_disable_unprepare(msm_host->esc_clk);
  416. error:
  417. return ret;
  418. }
  419. static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
  420. {
  421. int ret;
  422. DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
  423. msm_host->mode->clock, msm_host->byte_clk_rate,
  424. msm_host->esc_clk_rate, msm_host->src_clk_rate);
  425. ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
  426. if (ret) {
  427. pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
  428. goto error;
  429. }
  430. ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
  431. if (ret) {
  432. pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
  433. goto error;
  434. }
  435. ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
  436. if (ret) {
  437. pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
  438. goto error;
  439. }
  440. ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
  441. if (ret) {
  442. pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
  443. goto error;
  444. }
  445. ret = clk_prepare_enable(msm_host->byte_clk);
  446. if (ret) {
  447. pr_err("%s: Failed to enable dsi byte clk\n", __func__);
  448. goto error;
  449. }
  450. ret = clk_prepare_enable(msm_host->esc_clk);
  451. if (ret) {
  452. pr_err("%s: Failed to enable dsi esc clk\n", __func__);
  453. goto esc_clk_err;
  454. }
  455. ret = clk_prepare_enable(msm_host->src_clk);
  456. if (ret) {
  457. pr_err("%s: Failed to enable dsi src clk\n", __func__);
  458. goto src_clk_err;
  459. }
  460. ret = clk_prepare_enable(msm_host->pixel_clk);
  461. if (ret) {
  462. pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
  463. goto pixel_clk_err;
  464. }
  465. return 0;
  466. pixel_clk_err:
  467. clk_disable_unprepare(msm_host->src_clk);
  468. src_clk_err:
  469. clk_disable_unprepare(msm_host->esc_clk);
  470. esc_clk_err:
  471. clk_disable_unprepare(msm_host->byte_clk);
  472. error:
  473. return ret;
  474. }
  475. static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
  476. {
  477. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  478. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
  479. return dsi_link_clk_enable_6g(msm_host);
  480. else
  481. return dsi_link_clk_enable_v2(msm_host);
  482. }
  483. static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
  484. {
  485. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  486. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  487. clk_disable_unprepare(msm_host->esc_clk);
  488. clk_disable_unprepare(msm_host->pixel_clk);
  489. clk_disable_unprepare(msm_host->byte_clk);
  490. } else {
  491. clk_disable_unprepare(msm_host->pixel_clk);
  492. clk_disable_unprepare(msm_host->src_clk);
  493. clk_disable_unprepare(msm_host->esc_clk);
  494. clk_disable_unprepare(msm_host->byte_clk);
  495. }
  496. }
  497. static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
  498. {
  499. int ret = 0;
  500. mutex_lock(&msm_host->clk_mutex);
  501. if (enable) {
  502. ret = dsi_bus_clk_enable(msm_host);
  503. if (ret) {
  504. pr_err("%s: Can not enable bus clk, %d\n",
  505. __func__, ret);
  506. goto unlock_ret;
  507. }
  508. ret = dsi_link_clk_enable(msm_host);
  509. if (ret) {
  510. pr_err("%s: Can not enable link clk, %d\n",
  511. __func__, ret);
  512. dsi_bus_clk_disable(msm_host);
  513. goto unlock_ret;
  514. }
  515. } else {
  516. dsi_link_clk_disable(msm_host);
  517. dsi_bus_clk_disable(msm_host);
  518. }
  519. unlock_ret:
  520. mutex_unlock(&msm_host->clk_mutex);
  521. return ret;
  522. }
  523. static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
  524. {
  525. struct drm_display_mode *mode = msm_host->mode;
  526. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  527. u8 lanes = msm_host->lanes;
  528. u32 bpp = dsi_get_bpp(msm_host->format);
  529. u32 pclk_rate;
  530. if (!mode) {
  531. pr_err("%s: mode not set\n", __func__);
  532. return -EINVAL;
  533. }
  534. pclk_rate = mode->clock * 1000;
  535. if (lanes > 0) {
  536. msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
  537. } else {
  538. pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
  539. msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
  540. }
  541. DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
  542. msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
  543. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
  544. unsigned int esc_mhz, esc_div;
  545. unsigned long byte_mhz;
  546. msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
  547. /*
  548. * esc clock is byte clock followed by a 4 bit divider,
  549. * we need to find an escape clock frequency within the
  550. * mipi DSI spec range within the maximum divider limit
  551. * We iterate here between an escape clock frequencey
  552. * between 20 Mhz to 5 Mhz and pick up the first one
  553. * that can be supported by our divider
  554. */
  555. byte_mhz = msm_host->byte_clk_rate / 1000000;
  556. for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
  557. esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
  558. /*
  559. * TODO: Ideally, we shouldn't know what sort of divider
  560. * is available in mmss_cc, we're just assuming that
  561. * it'll always be a 4 bit divider. Need to come up with
  562. * a better way here.
  563. */
  564. if (esc_div >= 1 && esc_div <= 16)
  565. break;
  566. }
  567. if (esc_mhz < 5)
  568. return -EINVAL;
  569. msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
  570. DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
  571. msm_host->src_clk_rate);
  572. }
  573. return 0;
  574. }
  575. static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
  576. {
  577. DBG("");
  578. dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
  579. /* Make sure fully reset */
  580. wmb();
  581. udelay(1000);
  582. dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
  583. udelay(100);
  584. }
  585. static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
  586. {
  587. u32 intr;
  588. unsigned long flags;
  589. spin_lock_irqsave(&msm_host->intr_lock, flags);
  590. intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  591. if (enable)
  592. intr |= mask;
  593. else
  594. intr &= ~mask;
  595. DBG("intr=%x enable=%d", intr, enable);
  596. dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
  597. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  598. }
  599. static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
  600. {
  601. if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
  602. return BURST_MODE;
  603. else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  604. return NON_BURST_SYNCH_PULSE;
  605. return NON_BURST_SYNCH_EVENT;
  606. }
  607. static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
  608. const enum mipi_dsi_pixel_format mipi_fmt)
  609. {
  610. switch (mipi_fmt) {
  611. case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
  612. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
  613. case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
  614. case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
  615. default: return VID_DST_FORMAT_RGB888;
  616. }
  617. }
  618. static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
  619. const enum mipi_dsi_pixel_format mipi_fmt)
  620. {
  621. switch (mipi_fmt) {
  622. case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
  623. case MIPI_DSI_FMT_RGB666_PACKED:
  624. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
  625. case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
  626. default: return CMD_DST_FORMAT_RGB888;
  627. }
  628. }
  629. static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
  630. u32 clk_pre, u32 clk_post)
  631. {
  632. u32 flags = msm_host->mode_flags;
  633. enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
  634. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  635. u32 data = 0;
  636. if (!enable) {
  637. dsi_write(msm_host, REG_DSI_CTRL, 0);
  638. return;
  639. }
  640. if (flags & MIPI_DSI_MODE_VIDEO) {
  641. if (flags & MIPI_DSI_MODE_VIDEO_HSE)
  642. data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
  643. if (flags & MIPI_DSI_MODE_VIDEO_HFP)
  644. data |= DSI_VID_CFG0_HFP_POWER_STOP;
  645. if (flags & MIPI_DSI_MODE_VIDEO_HBP)
  646. data |= DSI_VID_CFG0_HBP_POWER_STOP;
  647. if (flags & MIPI_DSI_MODE_VIDEO_HSA)
  648. data |= DSI_VID_CFG0_HSA_POWER_STOP;
  649. /* Always set low power stop mode for BLLP
  650. * to let command engine send packets
  651. */
  652. data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
  653. DSI_VID_CFG0_BLLP_POWER_STOP;
  654. data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
  655. data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
  656. data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
  657. dsi_write(msm_host, REG_DSI_VID_CFG0, data);
  658. /* Do not swap RGB colors */
  659. data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
  660. dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
  661. } else {
  662. /* Do not swap RGB colors */
  663. data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
  664. data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
  665. dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
  666. data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
  667. DSI_CMD_CFG1_WR_MEM_CONTINUE(
  668. MIPI_DCS_WRITE_MEMORY_CONTINUE);
  669. /* Always insert DCS command */
  670. data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
  671. dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
  672. }
  673. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
  674. DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
  675. DSI_CMD_DMA_CTRL_LOW_POWER);
  676. data = 0;
  677. /* Always assume dedicated TE pin */
  678. data |= DSI_TRIG_CTRL_TE;
  679. data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
  680. data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
  681. data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
  682. if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
  683. (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
  684. data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
  685. dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
  686. data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
  687. DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
  688. dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
  689. data = 0;
  690. if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
  691. data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
  692. dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
  693. /* allow only ack-err-status to generate interrupt */
  694. dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
  695. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  696. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  697. data = DSI_CTRL_CLK_EN;
  698. DBG("lane number=%d", msm_host->lanes);
  699. data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
  700. dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
  701. DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
  702. if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
  703. dsi_write(msm_host, REG_DSI_LANE_CTRL,
  704. DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
  705. data |= DSI_CTRL_ENABLE;
  706. dsi_write(msm_host, REG_DSI_CTRL, data);
  707. }
  708. static void dsi_timing_setup(struct msm_dsi_host *msm_host)
  709. {
  710. struct drm_display_mode *mode = msm_host->mode;
  711. u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
  712. u32 h_total = mode->htotal;
  713. u32 v_total = mode->vtotal;
  714. u32 hs_end = mode->hsync_end - mode->hsync_start;
  715. u32 vs_end = mode->vsync_end - mode->vsync_start;
  716. u32 ha_start = h_total - mode->hsync_start;
  717. u32 ha_end = ha_start + mode->hdisplay;
  718. u32 va_start = v_total - mode->vsync_start;
  719. u32 va_end = va_start + mode->vdisplay;
  720. u32 wc;
  721. DBG("");
  722. if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
  723. dsi_write(msm_host, REG_DSI_ACTIVE_H,
  724. DSI_ACTIVE_H_START(ha_start) |
  725. DSI_ACTIVE_H_END(ha_end));
  726. dsi_write(msm_host, REG_DSI_ACTIVE_V,
  727. DSI_ACTIVE_V_START(va_start) |
  728. DSI_ACTIVE_V_END(va_end));
  729. dsi_write(msm_host, REG_DSI_TOTAL,
  730. DSI_TOTAL_H_TOTAL(h_total - 1) |
  731. DSI_TOTAL_V_TOTAL(v_total - 1));
  732. dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
  733. DSI_ACTIVE_HSYNC_START(hs_start) |
  734. DSI_ACTIVE_HSYNC_END(hs_end));
  735. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
  736. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
  737. DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
  738. DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
  739. } else { /* command mode */
  740. /* image data and 1 byte write_memory_start cmd */
  741. wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
  742. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
  743. DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
  744. DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
  745. msm_host->channel) |
  746. DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
  747. MIPI_DSI_DCS_LONG_WRITE));
  748. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
  749. DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
  750. DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
  751. }
  752. }
  753. static void dsi_sw_reset(struct msm_dsi_host *msm_host)
  754. {
  755. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  756. wmb(); /* clocks need to be enabled before reset */
  757. dsi_write(msm_host, REG_DSI_RESET, 1);
  758. wmb(); /* make sure reset happen */
  759. dsi_write(msm_host, REG_DSI_RESET, 0);
  760. }
  761. static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
  762. bool video_mode, bool enable)
  763. {
  764. u32 dsi_ctrl;
  765. dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
  766. if (!enable) {
  767. dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
  768. DSI_CTRL_CMD_MODE_EN);
  769. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
  770. DSI_IRQ_MASK_VIDEO_DONE, 0);
  771. } else {
  772. if (video_mode) {
  773. dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
  774. } else { /* command mode */
  775. dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
  776. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
  777. }
  778. dsi_ctrl |= DSI_CTRL_ENABLE;
  779. }
  780. dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
  781. }
  782. static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
  783. {
  784. u32 data;
  785. data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
  786. if (mode == 0)
  787. data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
  788. else
  789. data |= DSI_CMD_DMA_CTRL_LOW_POWER;
  790. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
  791. }
  792. static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
  793. {
  794. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
  795. reinit_completion(&msm_host->video_comp);
  796. wait_for_completion_timeout(&msm_host->video_comp,
  797. msecs_to_jiffies(70));
  798. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
  799. }
  800. static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
  801. {
  802. if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
  803. return;
  804. if (msm_host->power_on) {
  805. dsi_wait4video_done(msm_host);
  806. /* delay 4 ms to skip BLLP */
  807. usleep_range(2000, 4000);
  808. }
  809. }
  810. /* dsi_cmd */
  811. static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
  812. {
  813. struct drm_device *dev = msm_host->dev;
  814. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  815. int ret;
  816. u32 iova;
  817. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  818. mutex_lock(&dev->struct_mutex);
  819. msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
  820. if (IS_ERR(msm_host->tx_gem_obj)) {
  821. ret = PTR_ERR(msm_host->tx_gem_obj);
  822. pr_err("%s: failed to allocate gem, %d\n",
  823. __func__, ret);
  824. msm_host->tx_gem_obj = NULL;
  825. mutex_unlock(&dev->struct_mutex);
  826. return ret;
  827. }
  828. ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
  829. mutex_unlock(&dev->struct_mutex);
  830. if (ret) {
  831. pr_err("%s: failed to get iova, %d\n", __func__, ret);
  832. return ret;
  833. }
  834. if (iova & 0x07) {
  835. pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
  836. return -EINVAL;
  837. }
  838. msm_host->tx_size = msm_host->tx_gem_obj->size;
  839. } else {
  840. msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
  841. &msm_host->tx_buf_paddr, GFP_KERNEL);
  842. if (!msm_host->tx_buf) {
  843. ret = -ENOMEM;
  844. pr_err("%s: failed to allocate tx buf, %d\n",
  845. __func__, ret);
  846. return ret;
  847. }
  848. msm_host->tx_size = size;
  849. }
  850. return 0;
  851. }
  852. static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
  853. {
  854. struct drm_device *dev = msm_host->dev;
  855. if (msm_host->tx_gem_obj) {
  856. msm_gem_put_iova(msm_host->tx_gem_obj, 0);
  857. mutex_lock(&dev->struct_mutex);
  858. msm_gem_free_object(msm_host->tx_gem_obj);
  859. msm_host->tx_gem_obj = NULL;
  860. mutex_unlock(&dev->struct_mutex);
  861. }
  862. if (msm_host->tx_buf)
  863. dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
  864. msm_host->tx_buf_paddr);
  865. }
  866. /*
  867. * prepare cmd buffer to be txed
  868. */
  869. static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
  870. const struct mipi_dsi_msg *msg)
  871. {
  872. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  873. struct mipi_dsi_packet packet;
  874. int len;
  875. int ret;
  876. u8 *data;
  877. ret = mipi_dsi_create_packet(&packet, msg);
  878. if (ret) {
  879. pr_err("%s: create packet failed, %d\n", __func__, ret);
  880. return ret;
  881. }
  882. len = (packet.size + 3) & (~0x3);
  883. if (len > msm_host->tx_size) {
  884. pr_err("%s: packet size is too big\n", __func__);
  885. return -EINVAL;
  886. }
  887. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  888. data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
  889. if (IS_ERR(data)) {
  890. ret = PTR_ERR(data);
  891. pr_err("%s: get vaddr failed, %d\n", __func__, ret);
  892. return ret;
  893. }
  894. } else {
  895. data = msm_host->tx_buf;
  896. }
  897. /* MSM specific command format in memory */
  898. data[0] = packet.header[1];
  899. data[1] = packet.header[2];
  900. data[2] = packet.header[0];
  901. data[3] = BIT(7); /* Last packet */
  902. if (mipi_dsi_packet_format_is_long(msg->type))
  903. data[3] |= BIT(6);
  904. if (msg->rx_buf && msg->rx_len)
  905. data[3] |= BIT(5);
  906. /* Long packet */
  907. if (packet.payload && packet.payload_length)
  908. memcpy(data + 4, packet.payload, packet.payload_length);
  909. /* Append 0xff to the end */
  910. if (packet.size < len)
  911. memset(data + packet.size, 0xff, len - packet.size);
  912. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
  913. msm_gem_put_vaddr(msm_host->tx_gem_obj);
  914. return len;
  915. }
  916. /*
  917. * dsi_short_read1_resp: 1 parameter
  918. */
  919. static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  920. {
  921. u8 *data = msg->rx_buf;
  922. if (data && (msg->rx_len >= 1)) {
  923. *data = buf[1]; /* strip out dcs type */
  924. return 1;
  925. } else {
  926. pr_err("%s: read data does not match with rx_buf len %zu\n",
  927. __func__, msg->rx_len);
  928. return -EINVAL;
  929. }
  930. }
  931. /*
  932. * dsi_short_read2_resp: 2 parameter
  933. */
  934. static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  935. {
  936. u8 *data = msg->rx_buf;
  937. if (data && (msg->rx_len >= 2)) {
  938. data[0] = buf[1]; /* strip out dcs type */
  939. data[1] = buf[2];
  940. return 2;
  941. } else {
  942. pr_err("%s: read data does not match with rx_buf len %zu\n",
  943. __func__, msg->rx_len);
  944. return -EINVAL;
  945. }
  946. }
  947. static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  948. {
  949. /* strip out 4 byte dcs header */
  950. if (msg->rx_buf && msg->rx_len)
  951. memcpy(msg->rx_buf, buf + 4, msg->rx_len);
  952. return msg->rx_len;
  953. }
  954. static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
  955. {
  956. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  957. int ret;
  958. u32 dma_base;
  959. bool triggered;
  960. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  961. ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &dma_base);
  962. if (ret) {
  963. pr_err("%s: failed to get iova: %d\n", __func__, ret);
  964. return ret;
  965. }
  966. } else {
  967. dma_base = msm_host->tx_buf_paddr;
  968. }
  969. reinit_completion(&msm_host->dma_comp);
  970. dsi_wait4video_eng_busy(msm_host);
  971. triggered = msm_dsi_manager_cmd_xfer_trigger(
  972. msm_host->id, dma_base, len);
  973. if (triggered) {
  974. ret = wait_for_completion_timeout(&msm_host->dma_comp,
  975. msecs_to_jiffies(200));
  976. DBG("ret=%d", ret);
  977. if (ret == 0)
  978. ret = -ETIMEDOUT;
  979. else
  980. ret = len;
  981. } else
  982. ret = len;
  983. return ret;
  984. }
  985. static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
  986. u8 *buf, int rx_byte, int pkt_size)
  987. {
  988. u32 *lp, *temp, data;
  989. int i, j = 0, cnt;
  990. u32 read_cnt;
  991. u8 reg[16];
  992. int repeated_bytes = 0;
  993. int buf_offset = buf - msm_host->rx_buf;
  994. lp = (u32 *)buf;
  995. temp = (u32 *)reg;
  996. cnt = (rx_byte + 3) >> 2;
  997. if (cnt > 4)
  998. cnt = 4; /* 4 x 32 bits registers only */
  999. if (rx_byte == 4)
  1000. read_cnt = 4;
  1001. else
  1002. read_cnt = pkt_size + 6;
  1003. /*
  1004. * In case of multiple reads from the panel, after the first read, there
  1005. * is possibility that there are some bytes in the payload repeating in
  1006. * the RDBK_DATA registers. Since we read all the parameters from the
  1007. * panel right from the first byte for every pass. We need to skip the
  1008. * repeating bytes and then append the new parameters to the rx buffer.
  1009. */
  1010. if (read_cnt > 16) {
  1011. int bytes_shifted;
  1012. /* Any data more than 16 bytes will be shifted out.
  1013. * The temp read buffer should already contain these bytes.
  1014. * The remaining bytes in read buffer are the repeated bytes.
  1015. */
  1016. bytes_shifted = read_cnt - 16;
  1017. repeated_bytes = buf_offset - bytes_shifted;
  1018. }
  1019. for (i = cnt - 1; i >= 0; i--) {
  1020. data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
  1021. *temp++ = ntohl(data); /* to host byte order */
  1022. DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
  1023. }
  1024. for (i = repeated_bytes; i < 16; i++)
  1025. buf[j++] = reg[i];
  1026. return j;
  1027. }
  1028. static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
  1029. const struct mipi_dsi_msg *msg)
  1030. {
  1031. int len, ret;
  1032. int bllp_len = msm_host->mode->hdisplay *
  1033. dsi_get_bpp(msm_host->format) / 8;
  1034. len = dsi_cmd_dma_add(msm_host, msg);
  1035. if (!len) {
  1036. pr_err("%s: failed to add cmd type = 0x%x\n",
  1037. __func__, msg->type);
  1038. return -EINVAL;
  1039. }
  1040. /* for video mode, do not send cmds more than
  1041. * one pixel line, since it only transmit it
  1042. * during BLLP.
  1043. */
  1044. /* TODO: if the command is sent in LP mode, the bit rate is only
  1045. * half of esc clk rate. In this case, if the video is already
  1046. * actively streaming, we need to check more carefully if the
  1047. * command can be fit into one BLLP.
  1048. */
  1049. if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
  1050. pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
  1051. __func__, len);
  1052. return -EINVAL;
  1053. }
  1054. ret = dsi_cmd_dma_tx(msm_host, len);
  1055. if (ret < len) {
  1056. pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
  1057. __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
  1058. return -ECOMM;
  1059. }
  1060. return len;
  1061. }
  1062. static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
  1063. {
  1064. u32 data0, data1;
  1065. data0 = dsi_read(msm_host, REG_DSI_CTRL);
  1066. data1 = data0;
  1067. data1 &= ~DSI_CTRL_ENABLE;
  1068. dsi_write(msm_host, REG_DSI_CTRL, data1);
  1069. /*
  1070. * dsi controller need to be disabled before
  1071. * clocks turned on
  1072. */
  1073. wmb();
  1074. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  1075. wmb(); /* make sure clocks enabled */
  1076. /* dsi controller can only be reset while clocks are running */
  1077. dsi_write(msm_host, REG_DSI_RESET, 1);
  1078. wmb(); /* make sure reset happen */
  1079. dsi_write(msm_host, REG_DSI_RESET, 0);
  1080. wmb(); /* controller out of reset */
  1081. dsi_write(msm_host, REG_DSI_CTRL, data0);
  1082. wmb(); /* make sure dsi controller enabled again */
  1083. }
  1084. static void dsi_err_worker(struct work_struct *work)
  1085. {
  1086. struct msm_dsi_host *msm_host =
  1087. container_of(work, struct msm_dsi_host, err_work);
  1088. u32 status = msm_host->err_work_state;
  1089. pr_err_ratelimited("%s: status=%x\n", __func__, status);
  1090. if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
  1091. dsi_sw_reset_restore(msm_host);
  1092. /* It is safe to clear here because error irq is disabled. */
  1093. msm_host->err_work_state = 0;
  1094. /* enable dsi error interrupt */
  1095. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  1096. }
  1097. static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
  1098. {
  1099. u32 status;
  1100. status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
  1101. if (status) {
  1102. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
  1103. /* Writing of an extra 0 needed to clear error bits */
  1104. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
  1105. msm_host->err_work_state |= DSI_ERR_STATE_ACK;
  1106. }
  1107. }
  1108. static void dsi_timeout_status(struct msm_dsi_host *msm_host)
  1109. {
  1110. u32 status;
  1111. status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
  1112. if (status) {
  1113. dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
  1114. msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
  1115. }
  1116. }
  1117. static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
  1118. {
  1119. u32 status;
  1120. status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
  1121. if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
  1122. DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
  1123. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
  1124. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
  1125. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
  1126. dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
  1127. msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
  1128. }
  1129. }
  1130. static void dsi_fifo_status(struct msm_dsi_host *msm_host)
  1131. {
  1132. u32 status;
  1133. status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
  1134. /* fifo underflow, overflow */
  1135. if (status) {
  1136. dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
  1137. msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
  1138. if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
  1139. msm_host->err_work_state |=
  1140. DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
  1141. }
  1142. }
  1143. static void dsi_status(struct msm_dsi_host *msm_host)
  1144. {
  1145. u32 status;
  1146. status = dsi_read(msm_host, REG_DSI_STATUS0);
  1147. if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
  1148. dsi_write(msm_host, REG_DSI_STATUS0, status);
  1149. msm_host->err_work_state |=
  1150. DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
  1151. }
  1152. }
  1153. static void dsi_clk_status(struct msm_dsi_host *msm_host)
  1154. {
  1155. u32 status;
  1156. status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
  1157. if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
  1158. dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
  1159. msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
  1160. }
  1161. }
  1162. static void dsi_error(struct msm_dsi_host *msm_host)
  1163. {
  1164. /* disable dsi error interrupt */
  1165. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
  1166. dsi_clk_status(msm_host);
  1167. dsi_fifo_status(msm_host);
  1168. dsi_ack_err_status(msm_host);
  1169. dsi_timeout_status(msm_host);
  1170. dsi_status(msm_host);
  1171. dsi_dln0_phy_err(msm_host);
  1172. queue_work(msm_host->workqueue, &msm_host->err_work);
  1173. }
  1174. static irqreturn_t dsi_host_irq(int irq, void *ptr)
  1175. {
  1176. struct msm_dsi_host *msm_host = ptr;
  1177. u32 isr;
  1178. unsigned long flags;
  1179. if (!msm_host->ctrl_base)
  1180. return IRQ_HANDLED;
  1181. spin_lock_irqsave(&msm_host->intr_lock, flags);
  1182. isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  1183. dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
  1184. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  1185. DBG("isr=0x%x, id=%d", isr, msm_host->id);
  1186. if (isr & DSI_IRQ_ERROR)
  1187. dsi_error(msm_host);
  1188. if (isr & DSI_IRQ_VIDEO_DONE)
  1189. complete(&msm_host->video_comp);
  1190. if (isr & DSI_IRQ_CMD_DMA_DONE)
  1191. complete(&msm_host->dma_comp);
  1192. return IRQ_HANDLED;
  1193. }
  1194. static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
  1195. struct device *panel_device)
  1196. {
  1197. msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
  1198. "disp-enable",
  1199. GPIOD_OUT_LOW);
  1200. if (IS_ERR(msm_host->disp_en_gpio)) {
  1201. DBG("cannot get disp-enable-gpios %ld",
  1202. PTR_ERR(msm_host->disp_en_gpio));
  1203. return PTR_ERR(msm_host->disp_en_gpio);
  1204. }
  1205. msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
  1206. GPIOD_IN);
  1207. if (IS_ERR(msm_host->te_gpio)) {
  1208. DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
  1209. return PTR_ERR(msm_host->te_gpio);
  1210. }
  1211. return 0;
  1212. }
  1213. static int dsi_host_attach(struct mipi_dsi_host *host,
  1214. struct mipi_dsi_device *dsi)
  1215. {
  1216. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1217. int ret;
  1218. if (dsi->lanes > msm_host->num_data_lanes)
  1219. return -EINVAL;
  1220. msm_host->channel = dsi->channel;
  1221. msm_host->lanes = dsi->lanes;
  1222. msm_host->format = dsi->format;
  1223. msm_host->mode_flags = dsi->mode_flags;
  1224. /* Some gpios defined in panel DT need to be controlled by host */
  1225. ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
  1226. if (ret)
  1227. return ret;
  1228. DBG("id=%d", msm_host->id);
  1229. if (msm_host->dev)
  1230. drm_helper_hpd_irq_event(msm_host->dev);
  1231. return 0;
  1232. }
  1233. static int dsi_host_detach(struct mipi_dsi_host *host,
  1234. struct mipi_dsi_device *dsi)
  1235. {
  1236. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1237. msm_host->device_node = NULL;
  1238. DBG("id=%d", msm_host->id);
  1239. if (msm_host->dev)
  1240. drm_helper_hpd_irq_event(msm_host->dev);
  1241. return 0;
  1242. }
  1243. static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
  1244. const struct mipi_dsi_msg *msg)
  1245. {
  1246. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1247. int ret;
  1248. if (!msg || !msm_host->power_on)
  1249. return -EINVAL;
  1250. mutex_lock(&msm_host->cmd_mutex);
  1251. ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
  1252. mutex_unlock(&msm_host->cmd_mutex);
  1253. return ret;
  1254. }
  1255. static struct mipi_dsi_host_ops dsi_host_ops = {
  1256. .attach = dsi_host_attach,
  1257. .detach = dsi_host_detach,
  1258. .transfer = dsi_host_transfer,
  1259. };
  1260. /*
  1261. * List of supported physical to logical lane mappings.
  1262. * For example, the 2nd entry represents the following mapping:
  1263. *
  1264. * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
  1265. */
  1266. static const int supported_data_lane_swaps[][4] = {
  1267. { 0, 1, 2, 3 },
  1268. { 3, 0, 1, 2 },
  1269. { 2, 3, 0, 1 },
  1270. { 1, 2, 3, 0 },
  1271. { 0, 3, 2, 1 },
  1272. { 1, 0, 3, 2 },
  1273. { 2, 1, 0, 3 },
  1274. { 3, 2, 1, 0 },
  1275. };
  1276. static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
  1277. struct device_node *ep)
  1278. {
  1279. struct device *dev = &msm_host->pdev->dev;
  1280. struct property *prop;
  1281. u32 lane_map[4];
  1282. int ret, i, len, num_lanes;
  1283. prop = of_find_property(ep, "data-lanes", &len);
  1284. if (!prop) {
  1285. dev_dbg(dev, "failed to find data lane mapping\n");
  1286. return -EINVAL;
  1287. }
  1288. num_lanes = len / sizeof(u32);
  1289. if (num_lanes < 1 || num_lanes > 4) {
  1290. dev_err(dev, "bad number of data lanes\n");
  1291. return -EINVAL;
  1292. }
  1293. msm_host->num_data_lanes = num_lanes;
  1294. ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
  1295. num_lanes);
  1296. if (ret) {
  1297. dev_err(dev, "failed to read lane data\n");
  1298. return ret;
  1299. }
  1300. /*
  1301. * compare DT specified physical-logical lane mappings with the ones
  1302. * supported by hardware
  1303. */
  1304. for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
  1305. const int *swap = supported_data_lane_swaps[i];
  1306. int j;
  1307. /*
  1308. * the data-lanes array we get from DT has a logical->physical
  1309. * mapping. The "data lane swap" register field represents
  1310. * supported configurations in a physical->logical mapping.
  1311. * Translate the DT mapping to what we understand and find a
  1312. * configuration that works.
  1313. */
  1314. for (j = 0; j < num_lanes; j++) {
  1315. if (lane_map[j] < 0 || lane_map[j] > 3)
  1316. dev_err(dev, "bad physical lane entry %u\n",
  1317. lane_map[j]);
  1318. if (swap[lane_map[j]] != j)
  1319. break;
  1320. }
  1321. if (j == num_lanes) {
  1322. msm_host->dlane_swap = i;
  1323. return 0;
  1324. }
  1325. }
  1326. return -EINVAL;
  1327. }
  1328. static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
  1329. {
  1330. struct device *dev = &msm_host->pdev->dev;
  1331. struct device_node *np = dev->of_node;
  1332. struct device_node *endpoint, *device_node;
  1333. int ret;
  1334. /*
  1335. * Get the endpoint of the output port of the DSI host. In our case,
  1336. * this is mapped to port number with reg = 1. Don't return an error if
  1337. * the remote endpoint isn't defined. It's possible that there is
  1338. * nothing connected to the dsi output.
  1339. */
  1340. endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
  1341. if (!endpoint) {
  1342. dev_dbg(dev, "%s: no endpoint\n", __func__);
  1343. return 0;
  1344. }
  1345. ret = dsi_host_parse_lane_data(msm_host, endpoint);
  1346. if (ret) {
  1347. dev_err(dev, "%s: invalid lane configuration %d\n",
  1348. __func__, ret);
  1349. goto err;
  1350. }
  1351. /* Get panel node from the output port's endpoint data */
  1352. device_node = of_graph_get_remote_port_parent(endpoint);
  1353. if (!device_node) {
  1354. dev_err(dev, "%s: no valid device\n", __func__);
  1355. ret = -ENODEV;
  1356. goto err;
  1357. }
  1358. msm_host->device_node = device_node;
  1359. if (of_property_read_bool(np, "syscon-sfpb")) {
  1360. msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
  1361. "syscon-sfpb");
  1362. if (IS_ERR(msm_host->sfpb)) {
  1363. dev_err(dev, "%s: failed to get sfpb regmap\n",
  1364. __func__);
  1365. ret = PTR_ERR(msm_host->sfpb);
  1366. }
  1367. }
  1368. of_node_put(device_node);
  1369. err:
  1370. of_node_put(endpoint);
  1371. return ret;
  1372. }
  1373. static int dsi_host_get_id(struct msm_dsi_host *msm_host)
  1374. {
  1375. struct platform_device *pdev = msm_host->pdev;
  1376. const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
  1377. struct resource *res;
  1378. int i;
  1379. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
  1380. if (!res)
  1381. return -EINVAL;
  1382. for (i = 0; i < cfg->num_dsi; i++) {
  1383. if (cfg->io_start[i] == res->start)
  1384. return i;
  1385. }
  1386. return -EINVAL;
  1387. }
  1388. int msm_dsi_host_init(struct msm_dsi *msm_dsi)
  1389. {
  1390. struct msm_dsi_host *msm_host = NULL;
  1391. struct platform_device *pdev = msm_dsi->pdev;
  1392. int ret;
  1393. msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
  1394. if (!msm_host) {
  1395. pr_err("%s: FAILED: cannot alloc dsi host\n",
  1396. __func__);
  1397. ret = -ENOMEM;
  1398. goto fail;
  1399. }
  1400. msm_host->pdev = pdev;
  1401. ret = dsi_host_parse_dt(msm_host);
  1402. if (ret) {
  1403. pr_err("%s: failed to parse dt\n", __func__);
  1404. goto fail;
  1405. }
  1406. msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
  1407. if (IS_ERR(msm_host->ctrl_base)) {
  1408. pr_err("%s: unable to map Dsi ctrl base\n", __func__);
  1409. ret = PTR_ERR(msm_host->ctrl_base);
  1410. goto fail;
  1411. }
  1412. msm_host->cfg_hnd = dsi_get_config(msm_host);
  1413. if (!msm_host->cfg_hnd) {
  1414. ret = -EINVAL;
  1415. pr_err("%s: get config failed\n", __func__);
  1416. goto fail;
  1417. }
  1418. msm_host->id = dsi_host_get_id(msm_host);
  1419. if (msm_host->id < 0) {
  1420. ret = msm_host->id;
  1421. pr_err("%s: unable to identify DSI host index\n", __func__);
  1422. goto fail;
  1423. }
  1424. /* fixup base address by io offset */
  1425. msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
  1426. ret = dsi_regulator_init(msm_host);
  1427. if (ret) {
  1428. pr_err("%s: regulator init failed\n", __func__);
  1429. goto fail;
  1430. }
  1431. ret = dsi_clk_init(msm_host);
  1432. if (ret) {
  1433. pr_err("%s: unable to initialize dsi clks\n", __func__);
  1434. goto fail;
  1435. }
  1436. msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
  1437. if (!msm_host->rx_buf) {
  1438. pr_err("%s: alloc rx temp buf failed\n", __func__);
  1439. goto fail;
  1440. }
  1441. init_completion(&msm_host->dma_comp);
  1442. init_completion(&msm_host->video_comp);
  1443. mutex_init(&msm_host->dev_mutex);
  1444. mutex_init(&msm_host->cmd_mutex);
  1445. mutex_init(&msm_host->clk_mutex);
  1446. spin_lock_init(&msm_host->intr_lock);
  1447. /* setup workqueue */
  1448. msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
  1449. INIT_WORK(&msm_host->err_work, dsi_err_worker);
  1450. msm_dsi->host = &msm_host->base;
  1451. msm_dsi->id = msm_host->id;
  1452. DBG("Dsi Host %d initialized", msm_host->id);
  1453. return 0;
  1454. fail:
  1455. return ret;
  1456. }
  1457. void msm_dsi_host_destroy(struct mipi_dsi_host *host)
  1458. {
  1459. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1460. DBG("");
  1461. dsi_tx_buf_free(msm_host);
  1462. if (msm_host->workqueue) {
  1463. flush_workqueue(msm_host->workqueue);
  1464. destroy_workqueue(msm_host->workqueue);
  1465. msm_host->workqueue = NULL;
  1466. }
  1467. mutex_destroy(&msm_host->clk_mutex);
  1468. mutex_destroy(&msm_host->cmd_mutex);
  1469. mutex_destroy(&msm_host->dev_mutex);
  1470. }
  1471. int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
  1472. struct drm_device *dev)
  1473. {
  1474. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1475. struct platform_device *pdev = msm_host->pdev;
  1476. int ret;
  1477. msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
  1478. if (msm_host->irq < 0) {
  1479. ret = msm_host->irq;
  1480. dev_err(dev->dev, "failed to get irq: %d\n", ret);
  1481. return ret;
  1482. }
  1483. ret = devm_request_irq(&pdev->dev, msm_host->irq,
  1484. dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  1485. "dsi_isr", msm_host);
  1486. if (ret < 0) {
  1487. dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
  1488. msm_host->irq, ret);
  1489. return ret;
  1490. }
  1491. msm_host->dev = dev;
  1492. ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
  1493. if (ret) {
  1494. pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
  1495. return ret;
  1496. }
  1497. return 0;
  1498. }
  1499. int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
  1500. {
  1501. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1502. int ret;
  1503. /* Register mipi dsi host */
  1504. if (!msm_host->registered) {
  1505. host->dev = &msm_host->pdev->dev;
  1506. host->ops = &dsi_host_ops;
  1507. ret = mipi_dsi_host_register(host);
  1508. if (ret)
  1509. return ret;
  1510. msm_host->registered = true;
  1511. /* If the panel driver has not been probed after host register,
  1512. * we should defer the host's probe.
  1513. * It makes sure panel is connected when fbcon detects
  1514. * connector status and gets the proper display mode to
  1515. * create framebuffer.
  1516. * Don't try to defer if there is nothing connected to the dsi
  1517. * output
  1518. */
  1519. if (check_defer && msm_host->device_node) {
  1520. if (!of_drm_find_panel(msm_host->device_node))
  1521. if (!of_drm_find_bridge(msm_host->device_node))
  1522. return -EPROBE_DEFER;
  1523. }
  1524. }
  1525. return 0;
  1526. }
  1527. void msm_dsi_host_unregister(struct mipi_dsi_host *host)
  1528. {
  1529. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1530. if (msm_host->registered) {
  1531. mipi_dsi_host_unregister(host);
  1532. host->dev = NULL;
  1533. host->ops = NULL;
  1534. msm_host->registered = false;
  1535. }
  1536. }
  1537. int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
  1538. const struct mipi_dsi_msg *msg)
  1539. {
  1540. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1541. /* TODO: make sure dsi_cmd_mdp is idle.
  1542. * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
  1543. * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
  1544. * How to handle the old versions? Wait for mdp cmd done?
  1545. */
  1546. /*
  1547. * mdss interrupt is generated in mdp core clock domain
  1548. * mdp clock need to be enabled to receive dsi interrupt
  1549. */
  1550. dsi_clk_ctrl(msm_host, 1);
  1551. /* TODO: vote for bus bandwidth */
  1552. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1553. dsi_set_tx_power_mode(0, msm_host);
  1554. msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
  1555. dsi_write(msm_host, REG_DSI_CTRL,
  1556. msm_host->dma_cmd_ctrl_restore |
  1557. DSI_CTRL_CMD_MODE_EN |
  1558. DSI_CTRL_ENABLE);
  1559. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
  1560. return 0;
  1561. }
  1562. void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
  1563. const struct mipi_dsi_msg *msg)
  1564. {
  1565. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1566. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
  1567. dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
  1568. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1569. dsi_set_tx_power_mode(1, msm_host);
  1570. /* TODO: unvote for bus bandwidth */
  1571. dsi_clk_ctrl(msm_host, 0);
  1572. }
  1573. int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
  1574. const struct mipi_dsi_msg *msg)
  1575. {
  1576. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1577. return dsi_cmds2buf_tx(msm_host, msg);
  1578. }
  1579. int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
  1580. const struct mipi_dsi_msg *msg)
  1581. {
  1582. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1583. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1584. int data_byte, rx_byte, dlen, end;
  1585. int short_response, diff, pkt_size, ret = 0;
  1586. char cmd;
  1587. int rlen = msg->rx_len;
  1588. u8 *buf;
  1589. if (rlen <= 2) {
  1590. short_response = 1;
  1591. pkt_size = rlen;
  1592. rx_byte = 4;
  1593. } else {
  1594. short_response = 0;
  1595. data_byte = 10; /* first read */
  1596. if (rlen < data_byte)
  1597. pkt_size = rlen;
  1598. else
  1599. pkt_size = data_byte;
  1600. rx_byte = data_byte + 6; /* 4 header + 2 crc */
  1601. }
  1602. buf = msm_host->rx_buf;
  1603. end = 0;
  1604. while (!end) {
  1605. u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
  1606. struct mipi_dsi_msg max_pkt_size_msg = {
  1607. .channel = msg->channel,
  1608. .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
  1609. .tx_len = 2,
  1610. .tx_buf = tx,
  1611. };
  1612. DBG("rlen=%d pkt_size=%d rx_byte=%d",
  1613. rlen, pkt_size, rx_byte);
  1614. ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
  1615. if (ret < 2) {
  1616. pr_err("%s: Set max pkt size failed, %d\n",
  1617. __func__, ret);
  1618. return -EINVAL;
  1619. }
  1620. if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
  1621. (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
  1622. /* Clear the RDBK_DATA registers */
  1623. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
  1624. DSI_RDBK_DATA_CTRL_CLR);
  1625. wmb(); /* make sure the RDBK registers are cleared */
  1626. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
  1627. wmb(); /* release cleared status before transfer */
  1628. }
  1629. ret = dsi_cmds2buf_tx(msm_host, msg);
  1630. if (ret < msg->tx_len) {
  1631. pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
  1632. return ret;
  1633. }
  1634. /*
  1635. * once cmd_dma_done interrupt received,
  1636. * return data from client is ready and stored
  1637. * at RDBK_DATA register already
  1638. * since rx fifo is 16 bytes, dcs header is kept at first loop,
  1639. * after that dcs header lost during shift into registers
  1640. */
  1641. dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
  1642. if (dlen <= 0)
  1643. return 0;
  1644. if (short_response)
  1645. break;
  1646. if (rlen <= data_byte) {
  1647. diff = data_byte - rlen;
  1648. end = 1;
  1649. } else {
  1650. diff = 0;
  1651. rlen -= data_byte;
  1652. }
  1653. if (!end) {
  1654. dlen -= 2; /* 2 crc */
  1655. dlen -= diff;
  1656. buf += dlen; /* next start position */
  1657. data_byte = 14; /* NOT first read */
  1658. if (rlen < data_byte)
  1659. pkt_size += rlen;
  1660. else
  1661. pkt_size += data_byte;
  1662. DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
  1663. }
  1664. }
  1665. /*
  1666. * For single Long read, if the requested rlen < 10,
  1667. * we need to shift the start position of rx
  1668. * data buffer to skip the bytes which are not
  1669. * updated.
  1670. */
  1671. if (pkt_size < 10 && !short_response)
  1672. buf = msm_host->rx_buf + (10 - rlen);
  1673. else
  1674. buf = msm_host->rx_buf;
  1675. cmd = buf[0];
  1676. switch (cmd) {
  1677. case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
  1678. pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
  1679. ret = 0;
  1680. break;
  1681. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
  1682. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
  1683. ret = dsi_short_read1_resp(buf, msg);
  1684. break;
  1685. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
  1686. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
  1687. ret = dsi_short_read2_resp(buf, msg);
  1688. break;
  1689. case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
  1690. case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
  1691. ret = dsi_long_read_resp(buf, msg);
  1692. break;
  1693. default:
  1694. pr_warn("%s:Invalid response cmd\n", __func__);
  1695. ret = 0;
  1696. }
  1697. return ret;
  1698. }
  1699. void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
  1700. u32 len)
  1701. {
  1702. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1703. dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
  1704. dsi_write(msm_host, REG_DSI_DMA_LEN, len);
  1705. dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
  1706. /* Make sure trigger happens */
  1707. wmb();
  1708. }
  1709. int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
  1710. struct msm_dsi_pll *src_pll)
  1711. {
  1712. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1713. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1714. struct clk *byte_clk_provider, *pixel_clk_provider;
  1715. int ret;
  1716. ret = msm_dsi_pll_get_clk_provider(src_pll,
  1717. &byte_clk_provider, &pixel_clk_provider);
  1718. if (ret) {
  1719. pr_info("%s: can't get provider from pll, don't set parent\n",
  1720. __func__);
  1721. return 0;
  1722. }
  1723. ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
  1724. if (ret) {
  1725. pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
  1726. __func__, ret);
  1727. goto exit;
  1728. }
  1729. ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
  1730. if (ret) {
  1731. pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
  1732. __func__, ret);
  1733. goto exit;
  1734. }
  1735. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
  1736. ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
  1737. if (ret) {
  1738. pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
  1739. __func__, ret);
  1740. goto exit;
  1741. }
  1742. ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
  1743. if (ret) {
  1744. pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
  1745. __func__, ret);
  1746. goto exit;
  1747. }
  1748. }
  1749. exit:
  1750. return ret;
  1751. }
  1752. int msm_dsi_host_enable(struct mipi_dsi_host *host)
  1753. {
  1754. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1755. dsi_op_mode_config(msm_host,
  1756. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
  1757. /* TODO: clock should be turned off for command mode,
  1758. * and only turned on before MDP START.
  1759. * This part of code should be enabled once mdp driver support it.
  1760. */
  1761. /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
  1762. dsi_clk_ctrl(msm_host, 0); */
  1763. return 0;
  1764. }
  1765. int msm_dsi_host_disable(struct mipi_dsi_host *host)
  1766. {
  1767. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1768. dsi_op_mode_config(msm_host,
  1769. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
  1770. /* Since we have disabled INTF, the video engine won't stop so that
  1771. * the cmd engine will be blocked.
  1772. * Reset to disable video engine so that we can send off cmd.
  1773. */
  1774. dsi_sw_reset(msm_host);
  1775. return 0;
  1776. }
  1777. static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
  1778. {
  1779. enum sfpb_ahb_arb_master_port_en en;
  1780. if (!msm_host->sfpb)
  1781. return;
  1782. en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
  1783. regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
  1784. SFPB_GPREG_MASTER_PORT_EN__MASK,
  1785. SFPB_GPREG_MASTER_PORT_EN(en));
  1786. }
  1787. int msm_dsi_host_power_on(struct mipi_dsi_host *host)
  1788. {
  1789. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1790. u32 clk_pre = 0, clk_post = 0;
  1791. int ret = 0;
  1792. mutex_lock(&msm_host->dev_mutex);
  1793. if (msm_host->power_on) {
  1794. DBG("dsi host already on");
  1795. goto unlock_ret;
  1796. }
  1797. msm_dsi_sfpb_config(msm_host, true);
  1798. ret = dsi_calc_clk_rate(msm_host);
  1799. if (ret) {
  1800. pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
  1801. goto unlock_ret;
  1802. }
  1803. ret = dsi_host_regulator_enable(msm_host);
  1804. if (ret) {
  1805. pr_err("%s:Failed to enable vregs.ret=%d\n",
  1806. __func__, ret);
  1807. goto unlock_ret;
  1808. }
  1809. ret = dsi_bus_clk_enable(msm_host);
  1810. if (ret) {
  1811. pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
  1812. goto fail_disable_reg;
  1813. }
  1814. dsi_phy_sw_reset(msm_host);
  1815. ret = msm_dsi_manager_phy_enable(msm_host->id,
  1816. msm_host->byte_clk_rate * 8,
  1817. msm_host->esc_clk_rate,
  1818. &clk_pre, &clk_post);
  1819. dsi_bus_clk_disable(msm_host);
  1820. if (ret) {
  1821. pr_err("%s: failed to enable phy, %d\n", __func__, ret);
  1822. goto fail_disable_reg;
  1823. }
  1824. ret = dsi_clk_ctrl(msm_host, 1);
  1825. if (ret) {
  1826. pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
  1827. goto fail_disable_reg;
  1828. }
  1829. ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
  1830. if (ret) {
  1831. pr_err("%s: failed to set pinctrl default state, %d\n",
  1832. __func__, ret);
  1833. goto fail_disable_clk;
  1834. }
  1835. dsi_timing_setup(msm_host);
  1836. dsi_sw_reset(msm_host);
  1837. dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
  1838. if (msm_host->disp_en_gpio)
  1839. gpiod_set_value(msm_host->disp_en_gpio, 1);
  1840. msm_host->power_on = true;
  1841. mutex_unlock(&msm_host->dev_mutex);
  1842. return 0;
  1843. fail_disable_clk:
  1844. dsi_clk_ctrl(msm_host, 0);
  1845. fail_disable_reg:
  1846. dsi_host_regulator_disable(msm_host);
  1847. unlock_ret:
  1848. mutex_unlock(&msm_host->dev_mutex);
  1849. return ret;
  1850. }
  1851. int msm_dsi_host_power_off(struct mipi_dsi_host *host)
  1852. {
  1853. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1854. mutex_lock(&msm_host->dev_mutex);
  1855. if (!msm_host->power_on) {
  1856. DBG("dsi host already off");
  1857. goto unlock_ret;
  1858. }
  1859. dsi_ctrl_config(msm_host, false, 0, 0);
  1860. if (msm_host->disp_en_gpio)
  1861. gpiod_set_value(msm_host->disp_en_gpio, 0);
  1862. pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
  1863. msm_dsi_manager_phy_disable(msm_host->id);
  1864. dsi_clk_ctrl(msm_host, 0);
  1865. dsi_host_regulator_disable(msm_host);
  1866. msm_dsi_sfpb_config(msm_host, false);
  1867. DBG("-");
  1868. msm_host->power_on = false;
  1869. unlock_ret:
  1870. mutex_unlock(&msm_host->dev_mutex);
  1871. return 0;
  1872. }
  1873. int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
  1874. struct drm_display_mode *mode)
  1875. {
  1876. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1877. if (msm_host->mode) {
  1878. drm_mode_destroy(msm_host->dev, msm_host->mode);
  1879. msm_host->mode = NULL;
  1880. }
  1881. msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
  1882. if (!msm_host->mode) {
  1883. pr_err("%s: cannot duplicate mode\n", __func__);
  1884. return -ENOMEM;
  1885. }
  1886. return 0;
  1887. }
  1888. struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
  1889. unsigned long *panel_flags)
  1890. {
  1891. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1892. struct drm_panel *panel;
  1893. panel = of_drm_find_panel(msm_host->device_node);
  1894. if (panel_flags)
  1895. *panel_flags = msm_host->mode_flags;
  1896. return panel;
  1897. }
  1898. struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
  1899. {
  1900. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1901. return of_drm_find_bridge(msm_host->device_node);
  1902. }