dsi_host.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082
  1. /*
  2. * Copyright (c) 2015, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/gpio.h>
  17. #include <linux/gpio/consumer.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/of_device.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/pinctrl/consumer.h>
  23. #include <linux/of_graph.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/spinlock.h>
  26. #include <video/mipi_display.h>
  27. #include "dsi.h"
  28. #include "dsi.xml.h"
  29. #define MSM_DSI_VER_MAJOR_V2 0x02
  30. #define MSM_DSI_VER_MAJOR_6G 0x03
  31. #define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
  32. #define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
  33. #define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
  34. #define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
  35. #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
  36. #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
  37. #define DSI_6G_REG_SHIFT 4
  38. struct dsi_config {
  39. u32 major;
  40. u32 minor;
  41. u32 io_offset;
  42. struct dsi_reg_config reg_cfg;
  43. };
  44. static const struct dsi_config dsi_cfgs[] = {
  45. {MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} },
  46. { /* 8974 v1 */
  47. .major = MSM_DSI_VER_MAJOR_6G,
  48. .minor = MSM_DSI_6G_VER_MINOR_V1_0,
  49. .io_offset = DSI_6G_REG_SHIFT,
  50. .reg_cfg = {
  51. .num = 4,
  52. .regs = {
  53. {"gdsc", -1, -1, -1, -1},
  54. {"vdd", 3000000, 3000000, 150000, 100},
  55. {"vdda", 1200000, 1200000, 100000, 100},
  56. {"vddio", 1800000, 1800000, 100000, 100},
  57. },
  58. },
  59. },
  60. { /* 8974 v2 */
  61. .major = MSM_DSI_VER_MAJOR_6G,
  62. .minor = MSM_DSI_6G_VER_MINOR_V1_1,
  63. .io_offset = DSI_6G_REG_SHIFT,
  64. .reg_cfg = {
  65. .num = 4,
  66. .regs = {
  67. {"gdsc", -1, -1, -1, -1},
  68. {"vdd", 3000000, 3000000, 150000, 100},
  69. {"vdda", 1200000, 1200000, 100000, 100},
  70. {"vddio", 1800000, 1800000, 100000, 100},
  71. },
  72. },
  73. },
  74. { /* 8974 v3 */
  75. .major = MSM_DSI_VER_MAJOR_6G,
  76. .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
  77. .io_offset = DSI_6G_REG_SHIFT,
  78. .reg_cfg = {
  79. .num = 4,
  80. .regs = {
  81. {"gdsc", -1, -1, -1, -1},
  82. {"vdd", 3000000, 3000000, 150000, 100},
  83. {"vdda", 1200000, 1200000, 100000, 100},
  84. {"vddio", 1800000, 1800000, 100000, 100},
  85. },
  86. },
  87. },
  88. { /* 8084 */
  89. .major = MSM_DSI_VER_MAJOR_6G,
  90. .minor = MSM_DSI_6G_VER_MINOR_V1_2,
  91. .io_offset = DSI_6G_REG_SHIFT,
  92. .reg_cfg = {
  93. .num = 4,
  94. .regs = {
  95. {"gdsc", -1, -1, -1, -1},
  96. {"vdd", 3000000, 3000000, 150000, 100},
  97. {"vdda", 1200000, 1200000, 100000, 100},
  98. {"vddio", 1800000, 1800000, 100000, 100},
  99. },
  100. },
  101. },
  102. { /* 8916 */
  103. .major = MSM_DSI_VER_MAJOR_6G,
  104. .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
  105. .io_offset = DSI_6G_REG_SHIFT,
  106. .reg_cfg = {
  107. .num = 4,
  108. .regs = {
  109. {"gdsc", -1, -1, -1, -1},
  110. {"vdd", 2850000, 2850000, 100000, 100},
  111. {"vdda", 1200000, 1200000, 100000, 100},
  112. {"vddio", 1800000, 1800000, 100000, 100},
  113. },
  114. },
  115. },
  116. { /* 8x94 */
  117. .major = MSM_DSI_VER_MAJOR_6G,
  118. .minor = MSM_DSI_6G_VER_MINOR_V1_3,
  119. .io_offset = DSI_6G_REG_SHIFT,
  120. .reg_cfg = {
  121. .num = 7,
  122. .regs = {
  123. {"gdsc", -1, -1, -1, -1},
  124. {"vdda", 1250000, 1250000, 100000, 100},
  125. {"vddio", 1800000, 1800000, 100000, 100},
  126. {"vcca", 1000000, 1000000, 10000, 100},
  127. {"vdd", 1800000, 1800000, 100000, 100},
  128. {"lab_reg", -1, -1, -1, -1},
  129. {"ibb_reg", -1, -1, -1, -1},
  130. },
  131. }
  132. },
  133. };
  134. static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
  135. {
  136. u32 ver;
  137. u32 ver_6g;
  138. if (!major || !minor)
  139. return -EINVAL;
  140. /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
  141. * makes all other registers 4-byte shifted down.
  142. */
  143. ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
  144. if (ver_6g == 0) {
  145. ver = msm_readl(base + REG_DSI_VERSION);
  146. ver = FIELD(ver, DSI_VERSION_MAJOR);
  147. if (ver <= MSM_DSI_VER_MAJOR_V2) {
  148. /* old versions */
  149. *major = ver;
  150. *minor = 0;
  151. return 0;
  152. } else {
  153. return -EINVAL;
  154. }
  155. } else {
  156. ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
  157. ver = FIELD(ver, DSI_VERSION_MAJOR);
  158. if (ver == MSM_DSI_VER_MAJOR_6G) {
  159. /* 6G version */
  160. *major = ver;
  161. *minor = ver_6g;
  162. return 0;
  163. } else {
  164. return -EINVAL;
  165. }
  166. }
  167. }
  168. #define DSI_ERR_STATE_ACK 0x0000
  169. #define DSI_ERR_STATE_TIMEOUT 0x0001
  170. #define DSI_ERR_STATE_DLN0_PHY 0x0002
  171. #define DSI_ERR_STATE_FIFO 0x0004
  172. #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
  173. #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
  174. #define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
  175. #define DSI_CLK_CTRL_ENABLE_CLKS \
  176. (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
  177. DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
  178. DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
  179. DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
  180. struct msm_dsi_host {
  181. struct mipi_dsi_host base;
  182. struct platform_device *pdev;
  183. struct drm_device *dev;
  184. int id;
  185. void __iomem *ctrl_base;
  186. struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
  187. struct clk *mdp_core_clk;
  188. struct clk *ahb_clk;
  189. struct clk *axi_clk;
  190. struct clk *mmss_misc_ahb_clk;
  191. struct clk *byte_clk;
  192. struct clk *esc_clk;
  193. struct clk *pixel_clk;
  194. struct clk *byte_clk_src;
  195. struct clk *pixel_clk_src;
  196. u32 byte_clk_rate;
  197. struct gpio_desc *disp_en_gpio;
  198. struct gpio_desc *te_gpio;
  199. const struct dsi_config *cfg;
  200. struct completion dma_comp;
  201. struct completion video_comp;
  202. struct mutex dev_mutex;
  203. struct mutex cmd_mutex;
  204. struct mutex clk_mutex;
  205. spinlock_t intr_lock; /* Protect interrupt ctrl register */
  206. u32 err_work_state;
  207. struct work_struct err_work;
  208. struct workqueue_struct *workqueue;
  209. struct drm_gem_object *tx_gem_obj;
  210. u8 *rx_buf;
  211. struct drm_display_mode *mode;
  212. /* connected device info */
  213. struct device_node *device_node;
  214. unsigned int channel;
  215. unsigned int lanes;
  216. enum mipi_dsi_pixel_format format;
  217. unsigned long mode_flags;
  218. u32 dma_cmd_ctrl_restore;
  219. bool registered;
  220. bool power_on;
  221. int irq;
  222. };
  223. static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
  224. {
  225. switch (fmt) {
  226. case MIPI_DSI_FMT_RGB565: return 16;
  227. case MIPI_DSI_FMT_RGB666_PACKED: return 18;
  228. case MIPI_DSI_FMT_RGB666:
  229. case MIPI_DSI_FMT_RGB888:
  230. default: return 24;
  231. }
  232. }
  233. static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
  234. {
  235. return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
  236. }
  237. static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
  238. {
  239. msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
  240. }
  241. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
  242. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
  243. static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
  244. {
  245. const struct dsi_config *cfg;
  246. struct regulator *gdsc_reg;
  247. int i, ret;
  248. u32 major = 0, minor = 0;
  249. gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
  250. if (IS_ERR(gdsc_reg)) {
  251. pr_err("%s: cannot get gdsc\n", __func__);
  252. goto fail;
  253. }
  254. ret = regulator_enable(gdsc_reg);
  255. if (ret) {
  256. pr_err("%s: unable to enable gdsc\n", __func__);
  257. regulator_put(gdsc_reg);
  258. goto fail;
  259. }
  260. ret = clk_prepare_enable(msm_host->ahb_clk);
  261. if (ret) {
  262. pr_err("%s: unable to enable ahb_clk\n", __func__);
  263. regulator_disable(gdsc_reg);
  264. regulator_put(gdsc_reg);
  265. goto fail;
  266. }
  267. ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
  268. clk_disable_unprepare(msm_host->ahb_clk);
  269. regulator_disable(gdsc_reg);
  270. regulator_put(gdsc_reg);
  271. if (ret) {
  272. pr_err("%s: Invalid version\n", __func__);
  273. goto fail;
  274. }
  275. for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
  276. cfg = dsi_cfgs + i;
  277. if ((cfg->major == major) && (cfg->minor == minor))
  278. return cfg;
  279. }
  280. pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
  281. fail:
  282. return NULL;
  283. }
  284. static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
  285. {
  286. return container_of(host, struct msm_dsi_host, base);
  287. }
  288. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
  289. {
  290. struct regulator_bulk_data *s = msm_host->supplies;
  291. const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
  292. int num = msm_host->cfg->reg_cfg.num;
  293. int i;
  294. DBG("");
  295. for (i = num - 1; i >= 0; i--)
  296. if (regs[i].disable_load >= 0)
  297. regulator_set_load(s[i].consumer,
  298. regs[i].disable_load);
  299. regulator_bulk_disable(num, s);
  300. }
  301. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
  302. {
  303. struct regulator_bulk_data *s = msm_host->supplies;
  304. const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
  305. int num = msm_host->cfg->reg_cfg.num;
  306. int ret, i;
  307. DBG("");
  308. for (i = 0; i < num; i++) {
  309. if (regs[i].enable_load >= 0) {
  310. ret = regulator_set_load(s[i].consumer,
  311. regs[i].enable_load);
  312. if (ret < 0) {
  313. pr_err("regulator %d set op mode failed, %d\n",
  314. i, ret);
  315. goto fail;
  316. }
  317. }
  318. }
  319. ret = regulator_bulk_enable(num, s);
  320. if (ret < 0) {
  321. pr_err("regulator enable failed, %d\n", ret);
  322. goto fail;
  323. }
  324. return 0;
  325. fail:
  326. for (i--; i >= 0; i--)
  327. regulator_set_load(s[i].consumer, regs[i].disable_load);
  328. return ret;
  329. }
  330. static int dsi_regulator_init(struct msm_dsi_host *msm_host)
  331. {
  332. struct regulator_bulk_data *s = msm_host->supplies;
  333. const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
  334. int num = msm_host->cfg->reg_cfg.num;
  335. int i, ret;
  336. for (i = 0; i < num; i++)
  337. s[i].supply = regs[i].name;
  338. ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
  339. if (ret < 0) {
  340. pr_err("%s: failed to init regulator, ret=%d\n",
  341. __func__, ret);
  342. return ret;
  343. }
  344. for (i = 0; i < num; i++) {
  345. if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
  346. ret = regulator_set_voltage(s[i].consumer,
  347. regs[i].min_voltage, regs[i].max_voltage);
  348. if (ret < 0) {
  349. pr_err("regulator %d set voltage failed, %d\n",
  350. i, ret);
  351. return ret;
  352. }
  353. }
  354. }
  355. return 0;
  356. }
  357. static int dsi_clk_init(struct msm_dsi_host *msm_host)
  358. {
  359. struct device *dev = &msm_host->pdev->dev;
  360. int ret = 0;
  361. msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
  362. if (IS_ERR(msm_host->mdp_core_clk)) {
  363. ret = PTR_ERR(msm_host->mdp_core_clk);
  364. pr_err("%s: Unable to get mdp core clk. ret=%d\n",
  365. __func__, ret);
  366. goto exit;
  367. }
  368. msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
  369. if (IS_ERR(msm_host->ahb_clk)) {
  370. ret = PTR_ERR(msm_host->ahb_clk);
  371. pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
  372. __func__, ret);
  373. goto exit;
  374. }
  375. msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
  376. if (IS_ERR(msm_host->axi_clk)) {
  377. ret = PTR_ERR(msm_host->axi_clk);
  378. pr_err("%s: Unable to get axi bus clk. ret=%d\n",
  379. __func__, ret);
  380. goto exit;
  381. }
  382. msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
  383. if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
  384. ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
  385. pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
  386. __func__, ret);
  387. goto exit;
  388. }
  389. msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
  390. if (IS_ERR(msm_host->byte_clk)) {
  391. ret = PTR_ERR(msm_host->byte_clk);
  392. pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
  393. __func__, ret);
  394. msm_host->byte_clk = NULL;
  395. goto exit;
  396. }
  397. msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
  398. if (IS_ERR(msm_host->pixel_clk)) {
  399. ret = PTR_ERR(msm_host->pixel_clk);
  400. pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
  401. __func__, ret);
  402. msm_host->pixel_clk = NULL;
  403. goto exit;
  404. }
  405. msm_host->esc_clk = devm_clk_get(dev, "core_clk");
  406. if (IS_ERR(msm_host->esc_clk)) {
  407. ret = PTR_ERR(msm_host->esc_clk);
  408. pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
  409. __func__, ret);
  410. msm_host->esc_clk = NULL;
  411. goto exit;
  412. }
  413. msm_host->byte_clk_src = devm_clk_get(dev, "byte_clk_src");
  414. if (IS_ERR(msm_host->byte_clk_src)) {
  415. ret = PTR_ERR(msm_host->byte_clk_src);
  416. pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
  417. msm_host->byte_clk_src = NULL;
  418. goto exit;
  419. }
  420. msm_host->pixel_clk_src = devm_clk_get(dev, "pixel_clk_src");
  421. if (IS_ERR(msm_host->pixel_clk_src)) {
  422. ret = PTR_ERR(msm_host->pixel_clk_src);
  423. pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
  424. msm_host->pixel_clk_src = NULL;
  425. goto exit;
  426. }
  427. exit:
  428. return ret;
  429. }
  430. static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
  431. {
  432. int ret;
  433. DBG("id=%d", msm_host->id);
  434. ret = clk_prepare_enable(msm_host->mdp_core_clk);
  435. if (ret) {
  436. pr_err("%s: failed to enable mdp_core_clock, %d\n",
  437. __func__, ret);
  438. goto core_clk_err;
  439. }
  440. ret = clk_prepare_enable(msm_host->ahb_clk);
  441. if (ret) {
  442. pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
  443. goto ahb_clk_err;
  444. }
  445. ret = clk_prepare_enable(msm_host->axi_clk);
  446. if (ret) {
  447. pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
  448. goto axi_clk_err;
  449. }
  450. ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
  451. if (ret) {
  452. pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
  453. __func__, ret);
  454. goto misc_ahb_clk_err;
  455. }
  456. return 0;
  457. misc_ahb_clk_err:
  458. clk_disable_unprepare(msm_host->axi_clk);
  459. axi_clk_err:
  460. clk_disable_unprepare(msm_host->ahb_clk);
  461. ahb_clk_err:
  462. clk_disable_unprepare(msm_host->mdp_core_clk);
  463. core_clk_err:
  464. return ret;
  465. }
  466. static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
  467. {
  468. DBG("");
  469. clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
  470. clk_disable_unprepare(msm_host->axi_clk);
  471. clk_disable_unprepare(msm_host->ahb_clk);
  472. clk_disable_unprepare(msm_host->mdp_core_clk);
  473. }
  474. static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
  475. {
  476. int ret;
  477. DBG("Set clk rates: pclk=%d, byteclk=%d",
  478. msm_host->mode->clock, msm_host->byte_clk_rate);
  479. ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
  480. if (ret) {
  481. pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
  482. goto error;
  483. }
  484. ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
  485. if (ret) {
  486. pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
  487. goto error;
  488. }
  489. ret = clk_prepare_enable(msm_host->esc_clk);
  490. if (ret) {
  491. pr_err("%s: Failed to enable dsi esc clk\n", __func__);
  492. goto error;
  493. }
  494. ret = clk_prepare_enable(msm_host->byte_clk);
  495. if (ret) {
  496. pr_err("%s: Failed to enable dsi byte clk\n", __func__);
  497. goto byte_clk_err;
  498. }
  499. ret = clk_prepare_enable(msm_host->pixel_clk);
  500. if (ret) {
  501. pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
  502. goto pixel_clk_err;
  503. }
  504. return 0;
  505. pixel_clk_err:
  506. clk_disable_unprepare(msm_host->byte_clk);
  507. byte_clk_err:
  508. clk_disable_unprepare(msm_host->esc_clk);
  509. error:
  510. return ret;
  511. }
  512. static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
  513. {
  514. clk_disable_unprepare(msm_host->esc_clk);
  515. clk_disable_unprepare(msm_host->pixel_clk);
  516. clk_disable_unprepare(msm_host->byte_clk);
  517. }
  518. static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
  519. {
  520. int ret = 0;
  521. mutex_lock(&msm_host->clk_mutex);
  522. if (enable) {
  523. ret = dsi_bus_clk_enable(msm_host);
  524. if (ret) {
  525. pr_err("%s: Can not enable bus clk, %d\n",
  526. __func__, ret);
  527. goto unlock_ret;
  528. }
  529. ret = dsi_link_clk_enable(msm_host);
  530. if (ret) {
  531. pr_err("%s: Can not enable link clk, %d\n",
  532. __func__, ret);
  533. dsi_bus_clk_disable(msm_host);
  534. goto unlock_ret;
  535. }
  536. } else {
  537. dsi_link_clk_disable(msm_host);
  538. dsi_bus_clk_disable(msm_host);
  539. }
  540. unlock_ret:
  541. mutex_unlock(&msm_host->clk_mutex);
  542. return ret;
  543. }
  544. static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
  545. {
  546. struct drm_display_mode *mode = msm_host->mode;
  547. u8 lanes = msm_host->lanes;
  548. u32 bpp = dsi_get_bpp(msm_host->format);
  549. u32 pclk_rate;
  550. if (!mode) {
  551. pr_err("%s: mode not set\n", __func__);
  552. return -EINVAL;
  553. }
  554. pclk_rate = mode->clock * 1000;
  555. if (lanes > 0) {
  556. msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
  557. } else {
  558. pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
  559. msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
  560. }
  561. DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
  562. return 0;
  563. }
  564. static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
  565. {
  566. DBG("");
  567. dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
  568. /* Make sure fully reset */
  569. wmb();
  570. udelay(1000);
  571. dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
  572. udelay(100);
  573. }
  574. static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
  575. {
  576. u32 intr;
  577. unsigned long flags;
  578. spin_lock_irqsave(&msm_host->intr_lock, flags);
  579. intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  580. if (enable)
  581. intr |= mask;
  582. else
  583. intr &= ~mask;
  584. DBG("intr=%x enable=%d", intr, enable);
  585. dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
  586. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  587. }
  588. static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
  589. {
  590. if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
  591. return BURST_MODE;
  592. else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  593. return NON_BURST_SYNCH_PULSE;
  594. return NON_BURST_SYNCH_EVENT;
  595. }
  596. static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
  597. const enum mipi_dsi_pixel_format mipi_fmt)
  598. {
  599. switch (mipi_fmt) {
  600. case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
  601. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
  602. case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
  603. case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
  604. default: return VID_DST_FORMAT_RGB888;
  605. }
  606. }
  607. static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
  608. const enum mipi_dsi_pixel_format mipi_fmt)
  609. {
  610. switch (mipi_fmt) {
  611. case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
  612. case MIPI_DSI_FMT_RGB666_PACKED:
  613. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
  614. case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
  615. default: return CMD_DST_FORMAT_RGB888;
  616. }
  617. }
  618. static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
  619. u32 clk_pre, u32 clk_post)
  620. {
  621. u32 flags = msm_host->mode_flags;
  622. enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
  623. u32 data = 0;
  624. if (!enable) {
  625. dsi_write(msm_host, REG_DSI_CTRL, 0);
  626. return;
  627. }
  628. if (flags & MIPI_DSI_MODE_VIDEO) {
  629. if (flags & MIPI_DSI_MODE_VIDEO_HSE)
  630. data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
  631. if (flags & MIPI_DSI_MODE_VIDEO_HFP)
  632. data |= DSI_VID_CFG0_HFP_POWER_STOP;
  633. if (flags & MIPI_DSI_MODE_VIDEO_HBP)
  634. data |= DSI_VID_CFG0_HBP_POWER_STOP;
  635. if (flags & MIPI_DSI_MODE_VIDEO_HSA)
  636. data |= DSI_VID_CFG0_HSA_POWER_STOP;
  637. /* Always set low power stop mode for BLLP
  638. * to let command engine send packets
  639. */
  640. data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
  641. DSI_VID_CFG0_BLLP_POWER_STOP;
  642. data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
  643. data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
  644. data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
  645. dsi_write(msm_host, REG_DSI_VID_CFG0, data);
  646. /* Do not swap RGB colors */
  647. data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
  648. dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
  649. } else {
  650. /* Do not swap RGB colors */
  651. data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
  652. data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
  653. dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
  654. data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
  655. DSI_CMD_CFG1_WR_MEM_CONTINUE(
  656. MIPI_DCS_WRITE_MEMORY_CONTINUE);
  657. /* Always insert DCS command */
  658. data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
  659. dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
  660. }
  661. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
  662. DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
  663. DSI_CMD_DMA_CTRL_LOW_POWER);
  664. data = 0;
  665. /* Always assume dedicated TE pin */
  666. data |= DSI_TRIG_CTRL_TE;
  667. data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
  668. data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
  669. data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
  670. if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
  671. (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
  672. data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
  673. dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
  674. data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
  675. DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
  676. dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
  677. data = 0;
  678. if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
  679. data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
  680. dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
  681. /* allow only ack-err-status to generate interrupt */
  682. dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
  683. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  684. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  685. data = DSI_CTRL_CLK_EN;
  686. DBG("lane number=%d", msm_host->lanes);
  687. if (msm_host->lanes == 2) {
  688. data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
  689. /* swap lanes for 2-lane panel for better performance */
  690. dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
  691. DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
  692. } else {
  693. /* Take 4 lanes as default */
  694. data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
  695. DSI_CTRL_LANE3;
  696. /* Do not swap lanes for 4-lane panel */
  697. dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
  698. DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
  699. }
  700. if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
  701. dsi_write(msm_host, REG_DSI_LANE_CTRL,
  702. DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
  703. data |= DSI_CTRL_ENABLE;
  704. dsi_write(msm_host, REG_DSI_CTRL, data);
  705. }
  706. static void dsi_timing_setup(struct msm_dsi_host *msm_host)
  707. {
  708. struct drm_display_mode *mode = msm_host->mode;
  709. u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
  710. u32 h_total = mode->htotal;
  711. u32 v_total = mode->vtotal;
  712. u32 hs_end = mode->hsync_end - mode->hsync_start;
  713. u32 vs_end = mode->vsync_end - mode->vsync_start;
  714. u32 ha_start = h_total - mode->hsync_start;
  715. u32 ha_end = ha_start + mode->hdisplay;
  716. u32 va_start = v_total - mode->vsync_start;
  717. u32 va_end = va_start + mode->vdisplay;
  718. u32 wc;
  719. DBG("");
  720. if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
  721. dsi_write(msm_host, REG_DSI_ACTIVE_H,
  722. DSI_ACTIVE_H_START(ha_start) |
  723. DSI_ACTIVE_H_END(ha_end));
  724. dsi_write(msm_host, REG_DSI_ACTIVE_V,
  725. DSI_ACTIVE_V_START(va_start) |
  726. DSI_ACTIVE_V_END(va_end));
  727. dsi_write(msm_host, REG_DSI_TOTAL,
  728. DSI_TOTAL_H_TOTAL(h_total - 1) |
  729. DSI_TOTAL_V_TOTAL(v_total - 1));
  730. dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
  731. DSI_ACTIVE_HSYNC_START(hs_start) |
  732. DSI_ACTIVE_HSYNC_END(hs_end));
  733. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
  734. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
  735. DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
  736. DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
  737. } else { /* command mode */
  738. /* image data and 1 byte write_memory_start cmd */
  739. wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
  740. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
  741. DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
  742. DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
  743. msm_host->channel) |
  744. DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
  745. MIPI_DSI_DCS_LONG_WRITE));
  746. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
  747. DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
  748. DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
  749. }
  750. }
  751. static void dsi_sw_reset(struct msm_dsi_host *msm_host)
  752. {
  753. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  754. wmb(); /* clocks need to be enabled before reset */
  755. dsi_write(msm_host, REG_DSI_RESET, 1);
  756. wmb(); /* make sure reset happen */
  757. dsi_write(msm_host, REG_DSI_RESET, 0);
  758. }
  759. static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
  760. bool video_mode, bool enable)
  761. {
  762. u32 dsi_ctrl;
  763. dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
  764. if (!enable) {
  765. dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
  766. DSI_CTRL_CMD_MODE_EN);
  767. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
  768. DSI_IRQ_MASK_VIDEO_DONE, 0);
  769. } else {
  770. if (video_mode) {
  771. dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
  772. } else { /* command mode */
  773. dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
  774. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
  775. }
  776. dsi_ctrl |= DSI_CTRL_ENABLE;
  777. }
  778. dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
  779. }
  780. static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
  781. {
  782. u32 data;
  783. data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
  784. if (mode == 0)
  785. data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
  786. else
  787. data |= DSI_CMD_DMA_CTRL_LOW_POWER;
  788. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
  789. }
  790. static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
  791. {
  792. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
  793. reinit_completion(&msm_host->video_comp);
  794. wait_for_completion_timeout(&msm_host->video_comp,
  795. msecs_to_jiffies(70));
  796. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
  797. }
  798. static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
  799. {
  800. if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
  801. return;
  802. if (msm_host->power_on) {
  803. dsi_wait4video_done(msm_host);
  804. /* delay 4 ms to skip BLLP */
  805. usleep_range(2000, 4000);
  806. }
  807. }
  808. /* dsi_cmd */
  809. static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
  810. {
  811. struct drm_device *dev = msm_host->dev;
  812. int ret;
  813. u32 iova;
  814. mutex_lock(&dev->struct_mutex);
  815. msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
  816. if (IS_ERR(msm_host->tx_gem_obj)) {
  817. ret = PTR_ERR(msm_host->tx_gem_obj);
  818. pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
  819. msm_host->tx_gem_obj = NULL;
  820. mutex_unlock(&dev->struct_mutex);
  821. return ret;
  822. }
  823. ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
  824. if (ret) {
  825. pr_err("%s: failed to get iova, %d\n", __func__, ret);
  826. return ret;
  827. }
  828. mutex_unlock(&dev->struct_mutex);
  829. if (iova & 0x07) {
  830. pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
  831. return -EINVAL;
  832. }
  833. return 0;
  834. }
  835. static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
  836. {
  837. struct drm_device *dev = msm_host->dev;
  838. if (msm_host->tx_gem_obj) {
  839. msm_gem_put_iova(msm_host->tx_gem_obj, 0);
  840. mutex_lock(&dev->struct_mutex);
  841. msm_gem_free_object(msm_host->tx_gem_obj);
  842. msm_host->tx_gem_obj = NULL;
  843. mutex_unlock(&dev->struct_mutex);
  844. }
  845. }
  846. /*
  847. * prepare cmd buffer to be txed
  848. */
  849. static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
  850. const struct mipi_dsi_msg *msg)
  851. {
  852. struct mipi_dsi_packet packet;
  853. int len;
  854. int ret;
  855. u8 *data;
  856. ret = mipi_dsi_create_packet(&packet, msg);
  857. if (ret) {
  858. pr_err("%s: create packet failed, %d\n", __func__, ret);
  859. return ret;
  860. }
  861. len = (packet.size + 3) & (~0x3);
  862. if (len > tx_gem->size) {
  863. pr_err("%s: packet size is too big\n", __func__);
  864. return -EINVAL;
  865. }
  866. data = msm_gem_vaddr(tx_gem);
  867. if (IS_ERR(data)) {
  868. ret = PTR_ERR(data);
  869. pr_err("%s: get vaddr failed, %d\n", __func__, ret);
  870. return ret;
  871. }
  872. /* MSM specific command format in memory */
  873. data[0] = packet.header[1];
  874. data[1] = packet.header[2];
  875. data[2] = packet.header[0];
  876. data[3] = BIT(7); /* Last packet */
  877. if (mipi_dsi_packet_format_is_long(msg->type))
  878. data[3] |= BIT(6);
  879. if (msg->rx_buf && msg->rx_len)
  880. data[3] |= BIT(5);
  881. /* Long packet */
  882. if (packet.payload && packet.payload_length)
  883. memcpy(data + 4, packet.payload, packet.payload_length);
  884. /* Append 0xff to the end */
  885. if (packet.size < len)
  886. memset(data + packet.size, 0xff, len - packet.size);
  887. return len;
  888. }
  889. /*
  890. * dsi_short_read1_resp: 1 parameter
  891. */
  892. static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  893. {
  894. u8 *data = msg->rx_buf;
  895. if (data && (msg->rx_len >= 1)) {
  896. *data = buf[1]; /* strip out dcs type */
  897. return 1;
  898. } else {
  899. pr_err("%s: read data does not match with rx_buf len %zu\n",
  900. __func__, msg->rx_len);
  901. return -EINVAL;
  902. }
  903. }
  904. /*
  905. * dsi_short_read2_resp: 2 parameter
  906. */
  907. static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  908. {
  909. u8 *data = msg->rx_buf;
  910. if (data && (msg->rx_len >= 2)) {
  911. data[0] = buf[1]; /* strip out dcs type */
  912. data[1] = buf[2];
  913. return 2;
  914. } else {
  915. pr_err("%s: read data does not match with rx_buf len %zu\n",
  916. __func__, msg->rx_len);
  917. return -EINVAL;
  918. }
  919. }
  920. static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  921. {
  922. /* strip out 4 byte dcs header */
  923. if (msg->rx_buf && msg->rx_len)
  924. memcpy(msg->rx_buf, buf + 4, msg->rx_len);
  925. return msg->rx_len;
  926. }
  927. static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
  928. {
  929. int ret;
  930. u32 iova;
  931. bool triggered;
  932. ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
  933. if (ret) {
  934. pr_err("%s: failed to get iova: %d\n", __func__, ret);
  935. return ret;
  936. }
  937. reinit_completion(&msm_host->dma_comp);
  938. dsi_wait4video_eng_busy(msm_host);
  939. triggered = msm_dsi_manager_cmd_xfer_trigger(
  940. msm_host->id, iova, len);
  941. if (triggered) {
  942. ret = wait_for_completion_timeout(&msm_host->dma_comp,
  943. msecs_to_jiffies(200));
  944. DBG("ret=%d", ret);
  945. if (ret == 0)
  946. ret = -ETIMEDOUT;
  947. else
  948. ret = len;
  949. } else
  950. ret = len;
  951. return ret;
  952. }
  953. static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
  954. u8 *buf, int rx_byte, int pkt_size)
  955. {
  956. u32 *lp, *temp, data;
  957. int i, j = 0, cnt;
  958. u32 read_cnt;
  959. u8 reg[16];
  960. int repeated_bytes = 0;
  961. int buf_offset = buf - msm_host->rx_buf;
  962. lp = (u32 *)buf;
  963. temp = (u32 *)reg;
  964. cnt = (rx_byte + 3) >> 2;
  965. if (cnt > 4)
  966. cnt = 4; /* 4 x 32 bits registers only */
  967. if (rx_byte == 4)
  968. read_cnt = 4;
  969. else
  970. read_cnt = pkt_size + 6;
  971. /*
  972. * In case of multiple reads from the panel, after the first read, there
  973. * is possibility that there are some bytes in the payload repeating in
  974. * the RDBK_DATA registers. Since we read all the parameters from the
  975. * panel right from the first byte for every pass. We need to skip the
  976. * repeating bytes and then append the new parameters to the rx buffer.
  977. */
  978. if (read_cnt > 16) {
  979. int bytes_shifted;
  980. /* Any data more than 16 bytes will be shifted out.
  981. * The temp read buffer should already contain these bytes.
  982. * The remaining bytes in read buffer are the repeated bytes.
  983. */
  984. bytes_shifted = read_cnt - 16;
  985. repeated_bytes = buf_offset - bytes_shifted;
  986. }
  987. for (i = cnt - 1; i >= 0; i--) {
  988. data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
  989. *temp++ = ntohl(data); /* to host byte order */
  990. DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
  991. }
  992. for (i = repeated_bytes; i < 16; i++)
  993. buf[j++] = reg[i];
  994. return j;
  995. }
  996. static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
  997. const struct mipi_dsi_msg *msg)
  998. {
  999. int len, ret;
  1000. int bllp_len = msm_host->mode->hdisplay *
  1001. dsi_get_bpp(msm_host->format) / 8;
  1002. len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
  1003. if (!len) {
  1004. pr_err("%s: failed to add cmd type = 0x%x\n",
  1005. __func__, msg->type);
  1006. return -EINVAL;
  1007. }
  1008. /* for video mode, do not send cmds more than
  1009. * one pixel line, since it only transmit it
  1010. * during BLLP.
  1011. */
  1012. /* TODO: if the command is sent in LP mode, the bit rate is only
  1013. * half of esc clk rate. In this case, if the video is already
  1014. * actively streaming, we need to check more carefully if the
  1015. * command can be fit into one BLLP.
  1016. */
  1017. if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
  1018. pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
  1019. __func__, len);
  1020. return -EINVAL;
  1021. }
  1022. ret = dsi_cmd_dma_tx(msm_host, len);
  1023. if (ret < len) {
  1024. pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
  1025. __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
  1026. return -ECOMM;
  1027. }
  1028. return len;
  1029. }
  1030. static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
  1031. {
  1032. u32 data0, data1;
  1033. data0 = dsi_read(msm_host, REG_DSI_CTRL);
  1034. data1 = data0;
  1035. data1 &= ~DSI_CTRL_ENABLE;
  1036. dsi_write(msm_host, REG_DSI_CTRL, data1);
  1037. /*
  1038. * dsi controller need to be disabled before
  1039. * clocks turned on
  1040. */
  1041. wmb();
  1042. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  1043. wmb(); /* make sure clocks enabled */
  1044. /* dsi controller can only be reset while clocks are running */
  1045. dsi_write(msm_host, REG_DSI_RESET, 1);
  1046. wmb(); /* make sure reset happen */
  1047. dsi_write(msm_host, REG_DSI_RESET, 0);
  1048. wmb(); /* controller out of reset */
  1049. dsi_write(msm_host, REG_DSI_CTRL, data0);
  1050. wmb(); /* make sure dsi controller enabled again */
  1051. }
  1052. static void dsi_err_worker(struct work_struct *work)
  1053. {
  1054. struct msm_dsi_host *msm_host =
  1055. container_of(work, struct msm_dsi_host, err_work);
  1056. u32 status = msm_host->err_work_state;
  1057. pr_err_ratelimited("%s: status=%x\n", __func__, status);
  1058. if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
  1059. dsi_sw_reset_restore(msm_host);
  1060. /* It is safe to clear here because error irq is disabled. */
  1061. msm_host->err_work_state = 0;
  1062. /* enable dsi error interrupt */
  1063. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  1064. }
  1065. static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
  1066. {
  1067. u32 status;
  1068. status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
  1069. if (status) {
  1070. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
  1071. /* Writing of an extra 0 needed to clear error bits */
  1072. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
  1073. msm_host->err_work_state |= DSI_ERR_STATE_ACK;
  1074. }
  1075. }
  1076. static void dsi_timeout_status(struct msm_dsi_host *msm_host)
  1077. {
  1078. u32 status;
  1079. status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
  1080. if (status) {
  1081. dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
  1082. msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
  1083. }
  1084. }
  1085. static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
  1086. {
  1087. u32 status;
  1088. status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
  1089. if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
  1090. DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
  1091. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
  1092. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
  1093. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
  1094. dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
  1095. msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
  1096. }
  1097. }
  1098. static void dsi_fifo_status(struct msm_dsi_host *msm_host)
  1099. {
  1100. u32 status;
  1101. status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
  1102. /* fifo underflow, overflow */
  1103. if (status) {
  1104. dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
  1105. msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
  1106. if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
  1107. msm_host->err_work_state |=
  1108. DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
  1109. }
  1110. }
  1111. static void dsi_status(struct msm_dsi_host *msm_host)
  1112. {
  1113. u32 status;
  1114. status = dsi_read(msm_host, REG_DSI_STATUS0);
  1115. if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
  1116. dsi_write(msm_host, REG_DSI_STATUS0, status);
  1117. msm_host->err_work_state |=
  1118. DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
  1119. }
  1120. }
  1121. static void dsi_clk_status(struct msm_dsi_host *msm_host)
  1122. {
  1123. u32 status;
  1124. status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
  1125. if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
  1126. dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
  1127. msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
  1128. }
  1129. }
  1130. static void dsi_error(struct msm_dsi_host *msm_host)
  1131. {
  1132. /* disable dsi error interrupt */
  1133. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
  1134. dsi_clk_status(msm_host);
  1135. dsi_fifo_status(msm_host);
  1136. dsi_ack_err_status(msm_host);
  1137. dsi_timeout_status(msm_host);
  1138. dsi_status(msm_host);
  1139. dsi_dln0_phy_err(msm_host);
  1140. queue_work(msm_host->workqueue, &msm_host->err_work);
  1141. }
  1142. static irqreturn_t dsi_host_irq(int irq, void *ptr)
  1143. {
  1144. struct msm_dsi_host *msm_host = ptr;
  1145. u32 isr;
  1146. unsigned long flags;
  1147. if (!msm_host->ctrl_base)
  1148. return IRQ_HANDLED;
  1149. spin_lock_irqsave(&msm_host->intr_lock, flags);
  1150. isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  1151. dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
  1152. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  1153. DBG("isr=0x%x, id=%d", isr, msm_host->id);
  1154. if (isr & DSI_IRQ_ERROR)
  1155. dsi_error(msm_host);
  1156. if (isr & DSI_IRQ_VIDEO_DONE)
  1157. complete(&msm_host->video_comp);
  1158. if (isr & DSI_IRQ_CMD_DMA_DONE)
  1159. complete(&msm_host->dma_comp);
  1160. return IRQ_HANDLED;
  1161. }
  1162. static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
  1163. struct device *panel_device)
  1164. {
  1165. msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
  1166. "disp-enable",
  1167. GPIOD_OUT_LOW);
  1168. if (IS_ERR(msm_host->disp_en_gpio)) {
  1169. DBG("cannot get disp-enable-gpios %ld",
  1170. PTR_ERR(msm_host->disp_en_gpio));
  1171. return PTR_ERR(msm_host->disp_en_gpio);
  1172. }
  1173. msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
  1174. GPIOD_IN);
  1175. if (IS_ERR(msm_host->te_gpio)) {
  1176. DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
  1177. return PTR_ERR(msm_host->te_gpio);
  1178. }
  1179. return 0;
  1180. }
  1181. static int dsi_host_attach(struct mipi_dsi_host *host,
  1182. struct mipi_dsi_device *dsi)
  1183. {
  1184. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1185. int ret;
  1186. msm_host->channel = dsi->channel;
  1187. msm_host->lanes = dsi->lanes;
  1188. msm_host->format = dsi->format;
  1189. msm_host->mode_flags = dsi->mode_flags;
  1190. WARN_ON(dsi->dev.of_node != msm_host->device_node);
  1191. /* Some gpios defined in panel DT need to be controlled by host */
  1192. ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
  1193. if (ret)
  1194. return ret;
  1195. DBG("id=%d", msm_host->id);
  1196. if (msm_host->dev)
  1197. drm_helper_hpd_irq_event(msm_host->dev);
  1198. return 0;
  1199. }
  1200. static int dsi_host_detach(struct mipi_dsi_host *host,
  1201. struct mipi_dsi_device *dsi)
  1202. {
  1203. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1204. msm_host->device_node = NULL;
  1205. DBG("id=%d", msm_host->id);
  1206. if (msm_host->dev)
  1207. drm_helper_hpd_irq_event(msm_host->dev);
  1208. return 0;
  1209. }
  1210. static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
  1211. const struct mipi_dsi_msg *msg)
  1212. {
  1213. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1214. int ret;
  1215. if (!msg || !msm_host->power_on)
  1216. return -EINVAL;
  1217. mutex_lock(&msm_host->cmd_mutex);
  1218. ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
  1219. mutex_unlock(&msm_host->cmd_mutex);
  1220. return ret;
  1221. }
  1222. static struct mipi_dsi_host_ops dsi_host_ops = {
  1223. .attach = dsi_host_attach,
  1224. .detach = dsi_host_detach,
  1225. .transfer = dsi_host_transfer,
  1226. };
  1227. static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
  1228. {
  1229. struct device *dev = &msm_host->pdev->dev;
  1230. struct device_node *np = dev->of_node;
  1231. struct device_node *endpoint, *device_node;
  1232. int ret;
  1233. ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
  1234. if (ret) {
  1235. dev_err(dev, "%s: host index not specified, ret=%d\n",
  1236. __func__, ret);
  1237. return ret;
  1238. }
  1239. /*
  1240. * Get the first endpoint node. In our case, dsi has one output port
  1241. * to which the panel is connected. Don't return an error if a port
  1242. * isn't defined. It's possible that there is nothing connected to
  1243. * the dsi output.
  1244. */
  1245. endpoint = of_graph_get_next_endpoint(np, NULL);
  1246. if (!endpoint) {
  1247. dev_dbg(dev, "%s: no endpoint\n", __func__);
  1248. return 0;
  1249. }
  1250. /* Get panel node from the output port's endpoint data */
  1251. device_node = of_graph_get_remote_port_parent(endpoint);
  1252. if (!device_node) {
  1253. dev_err(dev, "%s: no valid device\n", __func__);
  1254. of_node_put(endpoint);
  1255. return -ENODEV;
  1256. }
  1257. of_node_put(endpoint);
  1258. of_node_put(device_node);
  1259. msm_host->device_node = device_node;
  1260. return 0;
  1261. }
  1262. int msm_dsi_host_init(struct msm_dsi *msm_dsi)
  1263. {
  1264. struct msm_dsi_host *msm_host = NULL;
  1265. struct platform_device *pdev = msm_dsi->pdev;
  1266. int ret;
  1267. msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
  1268. if (!msm_host) {
  1269. pr_err("%s: FAILED: cannot alloc dsi host\n",
  1270. __func__);
  1271. ret = -ENOMEM;
  1272. goto fail;
  1273. }
  1274. msm_host->pdev = pdev;
  1275. ret = dsi_host_parse_dt(msm_host);
  1276. if (ret) {
  1277. pr_err("%s: failed to parse dt\n", __func__);
  1278. goto fail;
  1279. }
  1280. ret = dsi_clk_init(msm_host);
  1281. if (ret) {
  1282. pr_err("%s: unable to initialize dsi clks\n", __func__);
  1283. goto fail;
  1284. }
  1285. msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
  1286. if (IS_ERR(msm_host->ctrl_base)) {
  1287. pr_err("%s: unable to map Dsi ctrl base\n", __func__);
  1288. ret = PTR_ERR(msm_host->ctrl_base);
  1289. goto fail;
  1290. }
  1291. msm_host->cfg = dsi_get_config(msm_host);
  1292. if (!msm_host->cfg) {
  1293. ret = -EINVAL;
  1294. pr_err("%s: get config failed\n", __func__);
  1295. goto fail;
  1296. }
  1297. ret = dsi_regulator_init(msm_host);
  1298. if (ret) {
  1299. pr_err("%s: regulator init failed\n", __func__);
  1300. goto fail;
  1301. }
  1302. msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
  1303. if (!msm_host->rx_buf) {
  1304. pr_err("%s: alloc rx temp buf failed\n", __func__);
  1305. goto fail;
  1306. }
  1307. init_completion(&msm_host->dma_comp);
  1308. init_completion(&msm_host->video_comp);
  1309. mutex_init(&msm_host->dev_mutex);
  1310. mutex_init(&msm_host->cmd_mutex);
  1311. mutex_init(&msm_host->clk_mutex);
  1312. spin_lock_init(&msm_host->intr_lock);
  1313. /* setup workqueue */
  1314. msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
  1315. INIT_WORK(&msm_host->err_work, dsi_err_worker);
  1316. msm_dsi->host = &msm_host->base;
  1317. msm_dsi->id = msm_host->id;
  1318. DBG("Dsi Host %d initialized", msm_host->id);
  1319. return 0;
  1320. fail:
  1321. return ret;
  1322. }
  1323. void msm_dsi_host_destroy(struct mipi_dsi_host *host)
  1324. {
  1325. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1326. DBG("");
  1327. dsi_tx_buf_free(msm_host);
  1328. if (msm_host->workqueue) {
  1329. flush_workqueue(msm_host->workqueue);
  1330. destroy_workqueue(msm_host->workqueue);
  1331. msm_host->workqueue = NULL;
  1332. }
  1333. mutex_destroy(&msm_host->clk_mutex);
  1334. mutex_destroy(&msm_host->cmd_mutex);
  1335. mutex_destroy(&msm_host->dev_mutex);
  1336. }
  1337. int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
  1338. struct drm_device *dev)
  1339. {
  1340. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1341. struct platform_device *pdev = msm_host->pdev;
  1342. int ret;
  1343. msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
  1344. if (msm_host->irq < 0) {
  1345. ret = msm_host->irq;
  1346. dev_err(dev->dev, "failed to get irq: %d\n", ret);
  1347. return ret;
  1348. }
  1349. ret = devm_request_irq(&pdev->dev, msm_host->irq,
  1350. dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  1351. "dsi_isr", msm_host);
  1352. if (ret < 0) {
  1353. dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
  1354. msm_host->irq, ret);
  1355. return ret;
  1356. }
  1357. msm_host->dev = dev;
  1358. ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
  1359. if (ret) {
  1360. pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
  1361. return ret;
  1362. }
  1363. return 0;
  1364. }
  1365. int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
  1366. {
  1367. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1368. int ret;
  1369. /* Register mipi dsi host */
  1370. if (!msm_host->registered) {
  1371. host->dev = &msm_host->pdev->dev;
  1372. host->ops = &dsi_host_ops;
  1373. ret = mipi_dsi_host_register(host);
  1374. if (ret)
  1375. return ret;
  1376. msm_host->registered = true;
  1377. /* If the panel driver has not been probed after host register,
  1378. * we should defer the host's probe.
  1379. * It makes sure panel is connected when fbcon detects
  1380. * connector status and gets the proper display mode to
  1381. * create framebuffer.
  1382. * Don't try to defer if there is nothing connected to the dsi
  1383. * output
  1384. */
  1385. if (check_defer && msm_host->device_node) {
  1386. if (!of_drm_find_panel(msm_host->device_node))
  1387. if (!of_drm_find_bridge(msm_host->device_node))
  1388. return -EPROBE_DEFER;
  1389. }
  1390. }
  1391. return 0;
  1392. }
  1393. void msm_dsi_host_unregister(struct mipi_dsi_host *host)
  1394. {
  1395. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1396. if (msm_host->registered) {
  1397. mipi_dsi_host_unregister(host);
  1398. host->dev = NULL;
  1399. host->ops = NULL;
  1400. msm_host->registered = false;
  1401. }
  1402. }
  1403. int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
  1404. const struct mipi_dsi_msg *msg)
  1405. {
  1406. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1407. /* TODO: make sure dsi_cmd_mdp is idle.
  1408. * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
  1409. * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
  1410. * How to handle the old versions? Wait for mdp cmd done?
  1411. */
  1412. /*
  1413. * mdss interrupt is generated in mdp core clock domain
  1414. * mdp clock need to be enabled to receive dsi interrupt
  1415. */
  1416. dsi_clk_ctrl(msm_host, 1);
  1417. /* TODO: vote for bus bandwidth */
  1418. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1419. dsi_set_tx_power_mode(0, msm_host);
  1420. msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
  1421. dsi_write(msm_host, REG_DSI_CTRL,
  1422. msm_host->dma_cmd_ctrl_restore |
  1423. DSI_CTRL_CMD_MODE_EN |
  1424. DSI_CTRL_ENABLE);
  1425. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
  1426. return 0;
  1427. }
  1428. void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
  1429. const struct mipi_dsi_msg *msg)
  1430. {
  1431. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1432. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
  1433. dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
  1434. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1435. dsi_set_tx_power_mode(1, msm_host);
  1436. /* TODO: unvote for bus bandwidth */
  1437. dsi_clk_ctrl(msm_host, 0);
  1438. }
  1439. int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
  1440. const struct mipi_dsi_msg *msg)
  1441. {
  1442. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1443. return dsi_cmds2buf_tx(msm_host, msg);
  1444. }
  1445. int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
  1446. const struct mipi_dsi_msg *msg)
  1447. {
  1448. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1449. int data_byte, rx_byte, dlen, end;
  1450. int short_response, diff, pkt_size, ret = 0;
  1451. char cmd;
  1452. int rlen = msg->rx_len;
  1453. u8 *buf;
  1454. if (rlen <= 2) {
  1455. short_response = 1;
  1456. pkt_size = rlen;
  1457. rx_byte = 4;
  1458. } else {
  1459. short_response = 0;
  1460. data_byte = 10; /* first read */
  1461. if (rlen < data_byte)
  1462. pkt_size = rlen;
  1463. else
  1464. pkt_size = data_byte;
  1465. rx_byte = data_byte + 6; /* 4 header + 2 crc */
  1466. }
  1467. buf = msm_host->rx_buf;
  1468. end = 0;
  1469. while (!end) {
  1470. u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
  1471. struct mipi_dsi_msg max_pkt_size_msg = {
  1472. .channel = msg->channel,
  1473. .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
  1474. .tx_len = 2,
  1475. .tx_buf = tx,
  1476. };
  1477. DBG("rlen=%d pkt_size=%d rx_byte=%d",
  1478. rlen, pkt_size, rx_byte);
  1479. ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
  1480. if (ret < 2) {
  1481. pr_err("%s: Set max pkt size failed, %d\n",
  1482. __func__, ret);
  1483. return -EINVAL;
  1484. }
  1485. if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
  1486. (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
  1487. /* Clear the RDBK_DATA registers */
  1488. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
  1489. DSI_RDBK_DATA_CTRL_CLR);
  1490. wmb(); /* make sure the RDBK registers are cleared */
  1491. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
  1492. wmb(); /* release cleared status before transfer */
  1493. }
  1494. ret = dsi_cmds2buf_tx(msm_host, msg);
  1495. if (ret < msg->tx_len) {
  1496. pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
  1497. return ret;
  1498. }
  1499. /*
  1500. * once cmd_dma_done interrupt received,
  1501. * return data from client is ready and stored
  1502. * at RDBK_DATA register already
  1503. * since rx fifo is 16 bytes, dcs header is kept at first loop,
  1504. * after that dcs header lost during shift into registers
  1505. */
  1506. dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
  1507. if (dlen <= 0)
  1508. return 0;
  1509. if (short_response)
  1510. break;
  1511. if (rlen <= data_byte) {
  1512. diff = data_byte - rlen;
  1513. end = 1;
  1514. } else {
  1515. diff = 0;
  1516. rlen -= data_byte;
  1517. }
  1518. if (!end) {
  1519. dlen -= 2; /* 2 crc */
  1520. dlen -= diff;
  1521. buf += dlen; /* next start position */
  1522. data_byte = 14; /* NOT first read */
  1523. if (rlen < data_byte)
  1524. pkt_size += rlen;
  1525. else
  1526. pkt_size += data_byte;
  1527. DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
  1528. }
  1529. }
  1530. /*
  1531. * For single Long read, if the requested rlen < 10,
  1532. * we need to shift the start position of rx
  1533. * data buffer to skip the bytes which are not
  1534. * updated.
  1535. */
  1536. if (pkt_size < 10 && !short_response)
  1537. buf = msm_host->rx_buf + (10 - rlen);
  1538. else
  1539. buf = msm_host->rx_buf;
  1540. cmd = buf[0];
  1541. switch (cmd) {
  1542. case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
  1543. pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
  1544. ret = 0;
  1545. break;
  1546. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
  1547. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
  1548. ret = dsi_short_read1_resp(buf, msg);
  1549. break;
  1550. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
  1551. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
  1552. ret = dsi_short_read2_resp(buf, msg);
  1553. break;
  1554. case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
  1555. case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
  1556. ret = dsi_long_read_resp(buf, msg);
  1557. break;
  1558. default:
  1559. pr_warn("%s:Invalid response cmd\n", __func__);
  1560. ret = 0;
  1561. }
  1562. return ret;
  1563. }
  1564. void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
  1565. {
  1566. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1567. dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
  1568. dsi_write(msm_host, REG_DSI_DMA_LEN, len);
  1569. dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
  1570. /* Make sure trigger happens */
  1571. wmb();
  1572. }
  1573. int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
  1574. struct msm_dsi_pll *src_pll)
  1575. {
  1576. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1577. struct clk *byte_clk_provider, *pixel_clk_provider;
  1578. int ret;
  1579. ret = msm_dsi_pll_get_clk_provider(src_pll,
  1580. &byte_clk_provider, &pixel_clk_provider);
  1581. if (ret) {
  1582. pr_info("%s: can't get provider from pll, don't set parent\n",
  1583. __func__);
  1584. return 0;
  1585. }
  1586. ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
  1587. if (ret) {
  1588. pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
  1589. __func__, ret);
  1590. goto exit;
  1591. }
  1592. ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
  1593. if (ret) {
  1594. pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
  1595. __func__, ret);
  1596. goto exit;
  1597. }
  1598. exit:
  1599. return ret;
  1600. }
  1601. int msm_dsi_host_enable(struct mipi_dsi_host *host)
  1602. {
  1603. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1604. dsi_op_mode_config(msm_host,
  1605. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
  1606. /* TODO: clock should be turned off for command mode,
  1607. * and only turned on before MDP START.
  1608. * This part of code should be enabled once mdp driver support it.
  1609. */
  1610. /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
  1611. dsi_clk_ctrl(msm_host, 0); */
  1612. return 0;
  1613. }
  1614. int msm_dsi_host_disable(struct mipi_dsi_host *host)
  1615. {
  1616. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1617. dsi_op_mode_config(msm_host,
  1618. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
  1619. /* Since we have disabled INTF, the video engine won't stop so that
  1620. * the cmd engine will be blocked.
  1621. * Reset to disable video engine so that we can send off cmd.
  1622. */
  1623. dsi_sw_reset(msm_host);
  1624. return 0;
  1625. }
  1626. int msm_dsi_host_power_on(struct mipi_dsi_host *host)
  1627. {
  1628. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1629. u32 clk_pre = 0, clk_post = 0;
  1630. int ret = 0;
  1631. mutex_lock(&msm_host->dev_mutex);
  1632. if (msm_host->power_on) {
  1633. DBG("dsi host already on");
  1634. goto unlock_ret;
  1635. }
  1636. ret = dsi_calc_clk_rate(msm_host);
  1637. if (ret) {
  1638. pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
  1639. goto unlock_ret;
  1640. }
  1641. ret = dsi_host_regulator_enable(msm_host);
  1642. if (ret) {
  1643. pr_err("%s:Failed to enable vregs.ret=%d\n",
  1644. __func__, ret);
  1645. goto unlock_ret;
  1646. }
  1647. ret = dsi_bus_clk_enable(msm_host);
  1648. if (ret) {
  1649. pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
  1650. goto fail_disable_reg;
  1651. }
  1652. dsi_phy_sw_reset(msm_host);
  1653. ret = msm_dsi_manager_phy_enable(msm_host->id,
  1654. msm_host->byte_clk_rate * 8,
  1655. clk_get_rate(msm_host->esc_clk),
  1656. &clk_pre, &clk_post);
  1657. dsi_bus_clk_disable(msm_host);
  1658. if (ret) {
  1659. pr_err("%s: failed to enable phy, %d\n", __func__, ret);
  1660. goto fail_disable_reg;
  1661. }
  1662. ret = dsi_clk_ctrl(msm_host, 1);
  1663. if (ret) {
  1664. pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
  1665. goto fail_disable_reg;
  1666. }
  1667. ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
  1668. if (ret) {
  1669. pr_err("%s: failed to set pinctrl default state, %d\n",
  1670. __func__, ret);
  1671. goto fail_disable_clk;
  1672. }
  1673. dsi_timing_setup(msm_host);
  1674. dsi_sw_reset(msm_host);
  1675. dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
  1676. if (msm_host->disp_en_gpio)
  1677. gpiod_set_value(msm_host->disp_en_gpio, 1);
  1678. msm_host->power_on = true;
  1679. mutex_unlock(&msm_host->dev_mutex);
  1680. return 0;
  1681. fail_disable_clk:
  1682. dsi_clk_ctrl(msm_host, 0);
  1683. fail_disable_reg:
  1684. dsi_host_regulator_disable(msm_host);
  1685. unlock_ret:
  1686. mutex_unlock(&msm_host->dev_mutex);
  1687. return ret;
  1688. }
  1689. int msm_dsi_host_power_off(struct mipi_dsi_host *host)
  1690. {
  1691. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1692. mutex_lock(&msm_host->dev_mutex);
  1693. if (!msm_host->power_on) {
  1694. DBG("dsi host already off");
  1695. goto unlock_ret;
  1696. }
  1697. dsi_ctrl_config(msm_host, false, 0, 0);
  1698. if (msm_host->disp_en_gpio)
  1699. gpiod_set_value(msm_host->disp_en_gpio, 0);
  1700. pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
  1701. msm_dsi_manager_phy_disable(msm_host->id);
  1702. dsi_clk_ctrl(msm_host, 0);
  1703. dsi_host_regulator_disable(msm_host);
  1704. DBG("-");
  1705. msm_host->power_on = false;
  1706. unlock_ret:
  1707. mutex_unlock(&msm_host->dev_mutex);
  1708. return 0;
  1709. }
  1710. int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
  1711. struct drm_display_mode *mode)
  1712. {
  1713. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1714. if (msm_host->mode) {
  1715. drm_mode_destroy(msm_host->dev, msm_host->mode);
  1716. msm_host->mode = NULL;
  1717. }
  1718. msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
  1719. if (IS_ERR(msm_host->mode)) {
  1720. pr_err("%s: cannot duplicate mode\n", __func__);
  1721. return PTR_ERR(msm_host->mode);
  1722. }
  1723. return 0;
  1724. }
  1725. struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
  1726. unsigned long *panel_flags)
  1727. {
  1728. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1729. struct drm_panel *panel;
  1730. panel = of_drm_find_panel(msm_host->device_node);
  1731. if (panel_flags)
  1732. *panel_flags = msm_host->mode_flags;
  1733. return panel;
  1734. }
  1735. struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
  1736. {
  1737. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1738. return of_drm_find_bridge(msm_host->device_node);
  1739. }