dsi_host.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179
  1. /*
  2. * Copyright (c) 2015, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/gpio.h>
  17. #include <linux/gpio/consumer.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/of_device.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/pinctrl/consumer.h>
  23. #include <linux/of_graph.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/spinlock.h>
  26. #include <video/mipi_display.h>
  27. #include "dsi.h"
  28. #include "dsi.xml.h"
  29. #include "dsi_cfg.h"
  30. static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
  31. {
  32. u32 ver;
  33. if (!major || !minor)
  34. return -EINVAL;
  35. /*
  36. * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
  37. * makes all other registers 4-byte shifted down.
  38. *
  39. * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
  40. * older, we read the DSI_VERSION register without any shift(offset
  41. * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
  42. * the case of DSI6G, this has to be zero (the offset points to a
  43. * scratch register which we never touch)
  44. */
  45. ver = msm_readl(base + REG_DSI_VERSION);
  46. if (ver) {
  47. /* older dsi host, there is no register shift */
  48. ver = FIELD(ver, DSI_VERSION_MAJOR);
  49. if (ver <= MSM_DSI_VER_MAJOR_V2) {
  50. /* old versions */
  51. *major = ver;
  52. *minor = 0;
  53. return 0;
  54. } else {
  55. return -EINVAL;
  56. }
  57. } else {
  58. /*
  59. * newer host, offset 0 has 6G_HW_VERSION, the rest of the
  60. * registers are shifted down, read DSI_VERSION again with
  61. * the shifted offset
  62. */
  63. ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
  64. ver = FIELD(ver, DSI_VERSION_MAJOR);
  65. if (ver == MSM_DSI_VER_MAJOR_6G) {
  66. /* 6G version */
  67. *major = ver;
  68. *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
  69. return 0;
  70. } else {
  71. return -EINVAL;
  72. }
  73. }
  74. }
  75. #define DSI_ERR_STATE_ACK 0x0000
  76. #define DSI_ERR_STATE_TIMEOUT 0x0001
  77. #define DSI_ERR_STATE_DLN0_PHY 0x0002
  78. #define DSI_ERR_STATE_FIFO 0x0004
  79. #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
  80. #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
  81. #define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
  82. #define DSI_CLK_CTRL_ENABLE_CLKS \
  83. (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
  84. DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
  85. DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
  86. DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
  87. struct msm_dsi_host {
  88. struct mipi_dsi_host base;
  89. struct platform_device *pdev;
  90. struct drm_device *dev;
  91. int id;
  92. void __iomem *ctrl_base;
  93. struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
  94. struct clk *bus_clks[DSI_BUS_CLK_MAX];
  95. struct clk *byte_clk;
  96. struct clk *esc_clk;
  97. struct clk *pixel_clk;
  98. struct clk *byte_clk_src;
  99. struct clk *pixel_clk_src;
  100. u32 byte_clk_rate;
  101. u32 esc_clk_rate;
  102. /* DSI v2 specific clocks */
  103. struct clk *src_clk;
  104. struct clk *esc_clk_src;
  105. struct clk *dsi_clk_src;
  106. u32 src_clk_rate;
  107. struct gpio_desc *disp_en_gpio;
  108. struct gpio_desc *te_gpio;
  109. const struct msm_dsi_cfg_handler *cfg_hnd;
  110. struct completion dma_comp;
  111. struct completion video_comp;
  112. struct mutex dev_mutex;
  113. struct mutex cmd_mutex;
  114. struct mutex clk_mutex;
  115. spinlock_t intr_lock; /* Protect interrupt ctrl register */
  116. u32 err_work_state;
  117. struct work_struct err_work;
  118. struct workqueue_struct *workqueue;
  119. /* DSI 6G TX buffer*/
  120. struct drm_gem_object *tx_gem_obj;
  121. /* DSI v2 TX buffer */
  122. void *tx_buf;
  123. dma_addr_t tx_buf_paddr;
  124. int tx_size;
  125. u8 *rx_buf;
  126. struct drm_display_mode *mode;
  127. /* connected device info */
  128. struct device_node *device_node;
  129. unsigned int channel;
  130. unsigned int lanes;
  131. enum mipi_dsi_pixel_format format;
  132. unsigned long mode_flags;
  133. u32 dma_cmd_ctrl_restore;
  134. bool registered;
  135. bool power_on;
  136. int irq;
  137. };
  138. static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
  139. {
  140. switch (fmt) {
  141. case MIPI_DSI_FMT_RGB565: return 16;
  142. case MIPI_DSI_FMT_RGB666_PACKED: return 18;
  143. case MIPI_DSI_FMT_RGB666:
  144. case MIPI_DSI_FMT_RGB888:
  145. default: return 24;
  146. }
  147. }
  148. static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
  149. {
  150. return msm_readl(msm_host->ctrl_base + reg);
  151. }
  152. static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
  153. {
  154. msm_writel(data, msm_host->ctrl_base + reg);
  155. }
  156. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
  157. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
  158. static const struct msm_dsi_cfg_handler *dsi_get_config(
  159. struct msm_dsi_host *msm_host)
  160. {
  161. const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
  162. struct device *dev = &msm_host->pdev->dev;
  163. struct regulator *gdsc_reg;
  164. struct clk *ahb_clk;
  165. int ret;
  166. u32 major = 0, minor = 0;
  167. gdsc_reg = regulator_get(dev, "gdsc");
  168. if (IS_ERR(gdsc_reg)) {
  169. pr_err("%s: cannot get gdsc\n", __func__);
  170. goto exit;
  171. }
  172. ahb_clk = clk_get(dev, "iface_clk");
  173. if (IS_ERR(ahb_clk)) {
  174. pr_err("%s: cannot get interface clock\n", __func__);
  175. goto put_gdsc;
  176. }
  177. ret = regulator_enable(gdsc_reg);
  178. if (ret) {
  179. pr_err("%s: unable to enable gdsc\n", __func__);
  180. goto put_clk;
  181. }
  182. ret = clk_prepare_enable(ahb_clk);
  183. if (ret) {
  184. pr_err("%s: unable to enable ahb_clk\n", __func__);
  185. goto disable_gdsc;
  186. }
  187. ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
  188. if (ret) {
  189. pr_err("%s: Invalid version\n", __func__);
  190. goto disable_clks;
  191. }
  192. cfg_hnd = msm_dsi_cfg_get(major, minor);
  193. DBG("%s: Version %x:%x\n", __func__, major, minor);
  194. disable_clks:
  195. clk_disable_unprepare(ahb_clk);
  196. disable_gdsc:
  197. regulator_disable(gdsc_reg);
  198. put_clk:
  199. clk_put(ahb_clk);
  200. put_gdsc:
  201. regulator_put(gdsc_reg);
  202. exit:
  203. return cfg_hnd;
  204. }
  205. static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
  206. {
  207. return container_of(host, struct msm_dsi_host, base);
  208. }
  209. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
  210. {
  211. struct regulator_bulk_data *s = msm_host->supplies;
  212. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  213. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  214. int i;
  215. DBG("");
  216. for (i = num - 1; i >= 0; i--)
  217. if (regs[i].disable_load >= 0)
  218. regulator_set_load(s[i].consumer,
  219. regs[i].disable_load);
  220. regulator_bulk_disable(num, s);
  221. }
  222. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
  223. {
  224. struct regulator_bulk_data *s = msm_host->supplies;
  225. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  226. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  227. int ret, i;
  228. DBG("");
  229. for (i = 0; i < num; i++) {
  230. if (regs[i].enable_load >= 0) {
  231. ret = regulator_set_load(s[i].consumer,
  232. regs[i].enable_load);
  233. if (ret < 0) {
  234. pr_err("regulator %d set op mode failed, %d\n",
  235. i, ret);
  236. goto fail;
  237. }
  238. }
  239. }
  240. ret = regulator_bulk_enable(num, s);
  241. if (ret < 0) {
  242. pr_err("regulator enable failed, %d\n", ret);
  243. goto fail;
  244. }
  245. return 0;
  246. fail:
  247. for (i--; i >= 0; i--)
  248. regulator_set_load(s[i].consumer, regs[i].disable_load);
  249. return ret;
  250. }
  251. static int dsi_regulator_init(struct msm_dsi_host *msm_host)
  252. {
  253. struct regulator_bulk_data *s = msm_host->supplies;
  254. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  255. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  256. int i, ret;
  257. for (i = 0; i < num; i++)
  258. s[i].supply = regs[i].name;
  259. ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
  260. if (ret < 0) {
  261. pr_err("%s: failed to init regulator, ret=%d\n",
  262. __func__, ret);
  263. return ret;
  264. }
  265. for (i = 0; i < num; i++) {
  266. if (regulator_can_change_voltage(s[i].consumer)) {
  267. ret = regulator_set_voltage(s[i].consumer,
  268. regs[i].min_voltage, regs[i].max_voltage);
  269. if (ret < 0) {
  270. pr_err("regulator %d set voltage failed, %d\n",
  271. i, ret);
  272. return ret;
  273. }
  274. }
  275. }
  276. return 0;
  277. }
  278. static int dsi_clk_init(struct msm_dsi_host *msm_host)
  279. {
  280. struct device *dev = &msm_host->pdev->dev;
  281. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  282. const struct msm_dsi_config *cfg = cfg_hnd->cfg;
  283. int i, ret = 0;
  284. /* get bus clocks */
  285. for (i = 0; i < cfg->num_bus_clks; i++) {
  286. msm_host->bus_clks[i] = devm_clk_get(dev,
  287. cfg->bus_clk_names[i]);
  288. if (IS_ERR(msm_host->bus_clks[i])) {
  289. ret = PTR_ERR(msm_host->bus_clks[i]);
  290. pr_err("%s: Unable to get %s, ret = %d\n",
  291. __func__, cfg->bus_clk_names[i], ret);
  292. goto exit;
  293. }
  294. }
  295. /* get link and source clocks */
  296. msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
  297. if (IS_ERR(msm_host->byte_clk)) {
  298. ret = PTR_ERR(msm_host->byte_clk);
  299. pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
  300. __func__, ret);
  301. msm_host->byte_clk = NULL;
  302. goto exit;
  303. }
  304. msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
  305. if (IS_ERR(msm_host->pixel_clk)) {
  306. ret = PTR_ERR(msm_host->pixel_clk);
  307. pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
  308. __func__, ret);
  309. msm_host->pixel_clk = NULL;
  310. goto exit;
  311. }
  312. msm_host->esc_clk = devm_clk_get(dev, "core_clk");
  313. if (IS_ERR(msm_host->esc_clk)) {
  314. ret = PTR_ERR(msm_host->esc_clk);
  315. pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
  316. __func__, ret);
  317. msm_host->esc_clk = NULL;
  318. goto exit;
  319. }
  320. msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
  321. if (!msm_host->byte_clk_src) {
  322. ret = -ENODEV;
  323. pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
  324. goto exit;
  325. }
  326. msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
  327. if (!msm_host->pixel_clk_src) {
  328. ret = -ENODEV;
  329. pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
  330. goto exit;
  331. }
  332. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
  333. msm_host->src_clk = devm_clk_get(dev, "src_clk");
  334. if (IS_ERR(msm_host->src_clk)) {
  335. ret = PTR_ERR(msm_host->src_clk);
  336. pr_err("%s: can't find dsi_src_clk. ret=%d\n",
  337. __func__, ret);
  338. msm_host->src_clk = NULL;
  339. goto exit;
  340. }
  341. msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
  342. if (!msm_host->esc_clk_src) {
  343. ret = -ENODEV;
  344. pr_err("%s: can't get esc_clk_src. ret=%d\n",
  345. __func__, ret);
  346. goto exit;
  347. }
  348. msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
  349. if (!msm_host->dsi_clk_src) {
  350. ret = -ENODEV;
  351. pr_err("%s: can't get dsi_clk_src. ret=%d\n",
  352. __func__, ret);
  353. }
  354. }
  355. exit:
  356. return ret;
  357. }
  358. static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
  359. {
  360. const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
  361. int i, ret;
  362. DBG("id=%d", msm_host->id);
  363. for (i = 0; i < cfg->num_bus_clks; i++) {
  364. ret = clk_prepare_enable(msm_host->bus_clks[i]);
  365. if (ret) {
  366. pr_err("%s: failed to enable bus clock %d ret %d\n",
  367. __func__, i, ret);
  368. goto err;
  369. }
  370. }
  371. return 0;
  372. err:
  373. for (; i > 0; i--)
  374. clk_disable_unprepare(msm_host->bus_clks[i]);
  375. return ret;
  376. }
  377. static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
  378. {
  379. const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
  380. int i;
  381. DBG("");
  382. for (i = cfg->num_bus_clks - 1; i >= 0; i--)
  383. clk_disable_unprepare(msm_host->bus_clks[i]);
  384. }
  385. static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
  386. {
  387. int ret;
  388. DBG("Set clk rates: pclk=%d, byteclk=%d",
  389. msm_host->mode->clock, msm_host->byte_clk_rate);
  390. ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
  391. if (ret) {
  392. pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
  393. goto error;
  394. }
  395. ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
  396. if (ret) {
  397. pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
  398. goto error;
  399. }
  400. ret = clk_prepare_enable(msm_host->esc_clk);
  401. if (ret) {
  402. pr_err("%s: Failed to enable dsi esc clk\n", __func__);
  403. goto error;
  404. }
  405. ret = clk_prepare_enable(msm_host->byte_clk);
  406. if (ret) {
  407. pr_err("%s: Failed to enable dsi byte clk\n", __func__);
  408. goto byte_clk_err;
  409. }
  410. ret = clk_prepare_enable(msm_host->pixel_clk);
  411. if (ret) {
  412. pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
  413. goto pixel_clk_err;
  414. }
  415. return 0;
  416. pixel_clk_err:
  417. clk_disable_unprepare(msm_host->byte_clk);
  418. byte_clk_err:
  419. clk_disable_unprepare(msm_host->esc_clk);
  420. error:
  421. return ret;
  422. }
  423. static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
  424. {
  425. int ret;
  426. DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
  427. msm_host->mode->clock, msm_host->byte_clk_rate,
  428. msm_host->esc_clk_rate, msm_host->src_clk_rate);
  429. ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
  430. if (ret) {
  431. pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
  432. goto error;
  433. }
  434. ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
  435. if (ret) {
  436. pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
  437. goto error;
  438. }
  439. ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
  440. if (ret) {
  441. pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
  442. goto error;
  443. }
  444. ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
  445. if (ret) {
  446. pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
  447. goto error;
  448. }
  449. ret = clk_prepare_enable(msm_host->byte_clk);
  450. if (ret) {
  451. pr_err("%s: Failed to enable dsi byte clk\n", __func__);
  452. goto error;
  453. }
  454. ret = clk_prepare_enable(msm_host->esc_clk);
  455. if (ret) {
  456. pr_err("%s: Failed to enable dsi esc clk\n", __func__);
  457. goto esc_clk_err;
  458. }
  459. ret = clk_prepare_enable(msm_host->src_clk);
  460. if (ret) {
  461. pr_err("%s: Failed to enable dsi src clk\n", __func__);
  462. goto src_clk_err;
  463. }
  464. ret = clk_prepare_enable(msm_host->pixel_clk);
  465. if (ret) {
  466. pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
  467. goto pixel_clk_err;
  468. }
  469. return 0;
  470. pixel_clk_err:
  471. clk_disable_unprepare(msm_host->src_clk);
  472. src_clk_err:
  473. clk_disable_unprepare(msm_host->esc_clk);
  474. esc_clk_err:
  475. clk_disable_unprepare(msm_host->byte_clk);
  476. error:
  477. return ret;
  478. }
  479. static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
  480. {
  481. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  482. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
  483. return dsi_link_clk_enable_6g(msm_host);
  484. else
  485. return dsi_link_clk_enable_v2(msm_host);
  486. }
  487. static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
  488. {
  489. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  490. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  491. clk_disable_unprepare(msm_host->esc_clk);
  492. clk_disable_unprepare(msm_host->pixel_clk);
  493. clk_disable_unprepare(msm_host->byte_clk);
  494. } else {
  495. clk_disable_unprepare(msm_host->pixel_clk);
  496. clk_disable_unprepare(msm_host->src_clk);
  497. clk_disable_unprepare(msm_host->esc_clk);
  498. clk_disable_unprepare(msm_host->byte_clk);
  499. }
  500. }
  501. static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
  502. {
  503. int ret = 0;
  504. mutex_lock(&msm_host->clk_mutex);
  505. if (enable) {
  506. ret = dsi_bus_clk_enable(msm_host);
  507. if (ret) {
  508. pr_err("%s: Can not enable bus clk, %d\n",
  509. __func__, ret);
  510. goto unlock_ret;
  511. }
  512. ret = dsi_link_clk_enable(msm_host);
  513. if (ret) {
  514. pr_err("%s: Can not enable link clk, %d\n",
  515. __func__, ret);
  516. dsi_bus_clk_disable(msm_host);
  517. goto unlock_ret;
  518. }
  519. } else {
  520. dsi_link_clk_disable(msm_host);
  521. dsi_bus_clk_disable(msm_host);
  522. }
  523. unlock_ret:
  524. mutex_unlock(&msm_host->clk_mutex);
  525. return ret;
  526. }
  527. static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
  528. {
  529. struct drm_display_mode *mode = msm_host->mode;
  530. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  531. u8 lanes = msm_host->lanes;
  532. u32 bpp = dsi_get_bpp(msm_host->format);
  533. u32 pclk_rate;
  534. if (!mode) {
  535. pr_err("%s: mode not set\n", __func__);
  536. return -EINVAL;
  537. }
  538. pclk_rate = mode->clock * 1000;
  539. if (lanes > 0) {
  540. msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
  541. } else {
  542. pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
  543. msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
  544. }
  545. DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
  546. msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
  547. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
  548. unsigned int esc_mhz, esc_div;
  549. unsigned long byte_mhz;
  550. msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
  551. /*
  552. * esc clock is byte clock followed by a 4 bit divider,
  553. * we need to find an escape clock frequency within the
  554. * mipi DSI spec range within the maximum divider limit
  555. * We iterate here between an escape clock frequencey
  556. * between 20 Mhz to 5 Mhz and pick up the first one
  557. * that can be supported by our divider
  558. */
  559. byte_mhz = msm_host->byte_clk_rate / 1000000;
  560. for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
  561. esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
  562. /*
  563. * TODO: Ideally, we shouldn't know what sort of divider
  564. * is available in mmss_cc, we're just assuming that
  565. * it'll always be a 4 bit divider. Need to come up with
  566. * a better way here.
  567. */
  568. if (esc_div >= 1 && esc_div <= 16)
  569. break;
  570. }
  571. if (esc_mhz < 5)
  572. return -EINVAL;
  573. msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
  574. DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
  575. msm_host->src_clk_rate);
  576. }
  577. return 0;
  578. }
  579. static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
  580. {
  581. DBG("");
  582. dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
  583. /* Make sure fully reset */
  584. wmb();
  585. udelay(1000);
  586. dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
  587. udelay(100);
  588. }
  589. static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
  590. {
  591. u32 intr;
  592. unsigned long flags;
  593. spin_lock_irqsave(&msm_host->intr_lock, flags);
  594. intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  595. if (enable)
  596. intr |= mask;
  597. else
  598. intr &= ~mask;
  599. DBG("intr=%x enable=%d", intr, enable);
  600. dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
  601. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  602. }
  603. static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
  604. {
  605. if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
  606. return BURST_MODE;
  607. else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  608. return NON_BURST_SYNCH_PULSE;
  609. return NON_BURST_SYNCH_EVENT;
  610. }
  611. static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
  612. const enum mipi_dsi_pixel_format mipi_fmt)
  613. {
  614. switch (mipi_fmt) {
  615. case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
  616. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
  617. case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
  618. case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
  619. default: return VID_DST_FORMAT_RGB888;
  620. }
  621. }
  622. static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
  623. const enum mipi_dsi_pixel_format mipi_fmt)
  624. {
  625. switch (mipi_fmt) {
  626. case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
  627. case MIPI_DSI_FMT_RGB666_PACKED:
  628. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
  629. case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
  630. default: return CMD_DST_FORMAT_RGB888;
  631. }
  632. }
  633. static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
  634. u32 clk_pre, u32 clk_post)
  635. {
  636. u32 flags = msm_host->mode_flags;
  637. enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
  638. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  639. u32 data = 0;
  640. if (!enable) {
  641. dsi_write(msm_host, REG_DSI_CTRL, 0);
  642. return;
  643. }
  644. if (flags & MIPI_DSI_MODE_VIDEO) {
  645. if (flags & MIPI_DSI_MODE_VIDEO_HSE)
  646. data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
  647. if (flags & MIPI_DSI_MODE_VIDEO_HFP)
  648. data |= DSI_VID_CFG0_HFP_POWER_STOP;
  649. if (flags & MIPI_DSI_MODE_VIDEO_HBP)
  650. data |= DSI_VID_CFG0_HBP_POWER_STOP;
  651. if (flags & MIPI_DSI_MODE_VIDEO_HSA)
  652. data |= DSI_VID_CFG0_HSA_POWER_STOP;
  653. /* Always set low power stop mode for BLLP
  654. * to let command engine send packets
  655. */
  656. data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
  657. DSI_VID_CFG0_BLLP_POWER_STOP;
  658. data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
  659. data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
  660. data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
  661. dsi_write(msm_host, REG_DSI_VID_CFG0, data);
  662. /* Do not swap RGB colors */
  663. data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
  664. dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
  665. } else {
  666. /* Do not swap RGB colors */
  667. data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
  668. data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
  669. dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
  670. data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
  671. DSI_CMD_CFG1_WR_MEM_CONTINUE(
  672. MIPI_DCS_WRITE_MEMORY_CONTINUE);
  673. /* Always insert DCS command */
  674. data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
  675. dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
  676. }
  677. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
  678. DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
  679. DSI_CMD_DMA_CTRL_LOW_POWER);
  680. data = 0;
  681. /* Always assume dedicated TE pin */
  682. data |= DSI_TRIG_CTRL_TE;
  683. data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
  684. data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
  685. data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
  686. if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
  687. (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
  688. data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
  689. dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
  690. data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
  691. DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
  692. dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
  693. data = 0;
  694. if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
  695. data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
  696. dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
  697. /* allow only ack-err-status to generate interrupt */
  698. dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
  699. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  700. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  701. data = DSI_CTRL_CLK_EN;
  702. DBG("lane number=%d", msm_host->lanes);
  703. if (msm_host->lanes == 2) {
  704. data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
  705. /* swap lanes for 2-lane panel for better performance */
  706. dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
  707. DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
  708. } else {
  709. /* Take 4 lanes as default */
  710. data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
  711. DSI_CTRL_LANE3;
  712. /* Do not swap lanes for 4-lane panel */
  713. dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
  714. DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
  715. }
  716. if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
  717. dsi_write(msm_host, REG_DSI_LANE_CTRL,
  718. DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
  719. data |= DSI_CTRL_ENABLE;
  720. dsi_write(msm_host, REG_DSI_CTRL, data);
  721. }
  722. static void dsi_timing_setup(struct msm_dsi_host *msm_host)
  723. {
  724. struct drm_display_mode *mode = msm_host->mode;
  725. u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
  726. u32 h_total = mode->htotal;
  727. u32 v_total = mode->vtotal;
  728. u32 hs_end = mode->hsync_end - mode->hsync_start;
  729. u32 vs_end = mode->vsync_end - mode->vsync_start;
  730. u32 ha_start = h_total - mode->hsync_start;
  731. u32 ha_end = ha_start + mode->hdisplay;
  732. u32 va_start = v_total - mode->vsync_start;
  733. u32 va_end = va_start + mode->vdisplay;
  734. u32 wc;
  735. DBG("");
  736. if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
  737. dsi_write(msm_host, REG_DSI_ACTIVE_H,
  738. DSI_ACTIVE_H_START(ha_start) |
  739. DSI_ACTIVE_H_END(ha_end));
  740. dsi_write(msm_host, REG_DSI_ACTIVE_V,
  741. DSI_ACTIVE_V_START(va_start) |
  742. DSI_ACTIVE_V_END(va_end));
  743. dsi_write(msm_host, REG_DSI_TOTAL,
  744. DSI_TOTAL_H_TOTAL(h_total - 1) |
  745. DSI_TOTAL_V_TOTAL(v_total - 1));
  746. dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
  747. DSI_ACTIVE_HSYNC_START(hs_start) |
  748. DSI_ACTIVE_HSYNC_END(hs_end));
  749. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
  750. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
  751. DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
  752. DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
  753. } else { /* command mode */
  754. /* image data and 1 byte write_memory_start cmd */
  755. wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
  756. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
  757. DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
  758. DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
  759. msm_host->channel) |
  760. DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
  761. MIPI_DSI_DCS_LONG_WRITE));
  762. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
  763. DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
  764. DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
  765. }
  766. }
  767. static void dsi_sw_reset(struct msm_dsi_host *msm_host)
  768. {
  769. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  770. wmb(); /* clocks need to be enabled before reset */
  771. dsi_write(msm_host, REG_DSI_RESET, 1);
  772. wmb(); /* make sure reset happen */
  773. dsi_write(msm_host, REG_DSI_RESET, 0);
  774. }
  775. static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
  776. bool video_mode, bool enable)
  777. {
  778. u32 dsi_ctrl;
  779. dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
  780. if (!enable) {
  781. dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
  782. DSI_CTRL_CMD_MODE_EN);
  783. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
  784. DSI_IRQ_MASK_VIDEO_DONE, 0);
  785. } else {
  786. if (video_mode) {
  787. dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
  788. } else { /* command mode */
  789. dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
  790. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
  791. }
  792. dsi_ctrl |= DSI_CTRL_ENABLE;
  793. }
  794. dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
  795. }
  796. static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
  797. {
  798. u32 data;
  799. data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
  800. if (mode == 0)
  801. data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
  802. else
  803. data |= DSI_CMD_DMA_CTRL_LOW_POWER;
  804. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
  805. }
  806. static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
  807. {
  808. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
  809. reinit_completion(&msm_host->video_comp);
  810. wait_for_completion_timeout(&msm_host->video_comp,
  811. msecs_to_jiffies(70));
  812. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
  813. }
  814. static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
  815. {
  816. if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
  817. return;
  818. if (msm_host->power_on) {
  819. dsi_wait4video_done(msm_host);
  820. /* delay 4 ms to skip BLLP */
  821. usleep_range(2000, 4000);
  822. }
  823. }
  824. /* dsi_cmd */
  825. static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
  826. {
  827. struct drm_device *dev = msm_host->dev;
  828. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  829. int ret;
  830. u32 iova;
  831. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  832. mutex_lock(&dev->struct_mutex);
  833. msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
  834. if (IS_ERR(msm_host->tx_gem_obj)) {
  835. ret = PTR_ERR(msm_host->tx_gem_obj);
  836. pr_err("%s: failed to allocate gem, %d\n",
  837. __func__, ret);
  838. msm_host->tx_gem_obj = NULL;
  839. mutex_unlock(&dev->struct_mutex);
  840. return ret;
  841. }
  842. ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
  843. if (ret) {
  844. pr_err("%s: failed to get iova, %d\n", __func__, ret);
  845. return ret;
  846. }
  847. mutex_unlock(&dev->struct_mutex);
  848. if (iova & 0x07) {
  849. pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
  850. return -EINVAL;
  851. }
  852. msm_host->tx_size = msm_host->tx_gem_obj->size;
  853. } else {
  854. msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
  855. &msm_host->tx_buf_paddr, GFP_KERNEL);
  856. if (!msm_host->tx_buf) {
  857. ret = -ENOMEM;
  858. pr_err("%s: failed to allocate tx buf, %d\n",
  859. __func__, ret);
  860. return ret;
  861. }
  862. msm_host->tx_size = size;
  863. }
  864. return 0;
  865. }
  866. static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
  867. {
  868. struct drm_device *dev = msm_host->dev;
  869. if (msm_host->tx_gem_obj) {
  870. msm_gem_put_iova(msm_host->tx_gem_obj, 0);
  871. mutex_lock(&dev->struct_mutex);
  872. msm_gem_free_object(msm_host->tx_gem_obj);
  873. msm_host->tx_gem_obj = NULL;
  874. mutex_unlock(&dev->struct_mutex);
  875. }
  876. if (msm_host->tx_buf)
  877. dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
  878. msm_host->tx_buf_paddr);
  879. }
  880. /*
  881. * prepare cmd buffer to be txed
  882. */
  883. static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
  884. const struct mipi_dsi_msg *msg)
  885. {
  886. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  887. struct mipi_dsi_packet packet;
  888. int len;
  889. int ret;
  890. u8 *data;
  891. ret = mipi_dsi_create_packet(&packet, msg);
  892. if (ret) {
  893. pr_err("%s: create packet failed, %d\n", __func__, ret);
  894. return ret;
  895. }
  896. len = (packet.size + 3) & (~0x3);
  897. if (len > msm_host->tx_size) {
  898. pr_err("%s: packet size is too big\n", __func__);
  899. return -EINVAL;
  900. }
  901. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  902. data = msm_gem_vaddr(msm_host->tx_gem_obj);
  903. if (IS_ERR(data)) {
  904. ret = PTR_ERR(data);
  905. pr_err("%s: get vaddr failed, %d\n", __func__, ret);
  906. return ret;
  907. }
  908. } else {
  909. data = msm_host->tx_buf;
  910. }
  911. /* MSM specific command format in memory */
  912. data[0] = packet.header[1];
  913. data[1] = packet.header[2];
  914. data[2] = packet.header[0];
  915. data[3] = BIT(7); /* Last packet */
  916. if (mipi_dsi_packet_format_is_long(msg->type))
  917. data[3] |= BIT(6);
  918. if (msg->rx_buf && msg->rx_len)
  919. data[3] |= BIT(5);
  920. /* Long packet */
  921. if (packet.payload && packet.payload_length)
  922. memcpy(data + 4, packet.payload, packet.payload_length);
  923. /* Append 0xff to the end */
  924. if (packet.size < len)
  925. memset(data + packet.size, 0xff, len - packet.size);
  926. return len;
  927. }
  928. /*
  929. * dsi_short_read1_resp: 1 parameter
  930. */
  931. static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  932. {
  933. u8 *data = msg->rx_buf;
  934. if (data && (msg->rx_len >= 1)) {
  935. *data = buf[1]; /* strip out dcs type */
  936. return 1;
  937. } else {
  938. pr_err("%s: read data does not match with rx_buf len %zu\n",
  939. __func__, msg->rx_len);
  940. return -EINVAL;
  941. }
  942. }
  943. /*
  944. * dsi_short_read2_resp: 2 parameter
  945. */
  946. static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  947. {
  948. u8 *data = msg->rx_buf;
  949. if (data && (msg->rx_len >= 2)) {
  950. data[0] = buf[1]; /* strip out dcs type */
  951. data[1] = buf[2];
  952. return 2;
  953. } else {
  954. pr_err("%s: read data does not match with rx_buf len %zu\n",
  955. __func__, msg->rx_len);
  956. return -EINVAL;
  957. }
  958. }
  959. static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  960. {
  961. /* strip out 4 byte dcs header */
  962. if (msg->rx_buf && msg->rx_len)
  963. memcpy(msg->rx_buf, buf + 4, msg->rx_len);
  964. return msg->rx_len;
  965. }
  966. static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
  967. {
  968. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  969. int ret;
  970. u32 dma_base;
  971. bool triggered;
  972. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  973. ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &dma_base);
  974. if (ret) {
  975. pr_err("%s: failed to get iova: %d\n", __func__, ret);
  976. return ret;
  977. }
  978. } else {
  979. dma_base = msm_host->tx_buf_paddr;
  980. }
  981. reinit_completion(&msm_host->dma_comp);
  982. dsi_wait4video_eng_busy(msm_host);
  983. triggered = msm_dsi_manager_cmd_xfer_trigger(
  984. msm_host->id, dma_base, len);
  985. if (triggered) {
  986. ret = wait_for_completion_timeout(&msm_host->dma_comp,
  987. msecs_to_jiffies(200));
  988. DBG("ret=%d", ret);
  989. if (ret == 0)
  990. ret = -ETIMEDOUT;
  991. else
  992. ret = len;
  993. } else
  994. ret = len;
  995. return ret;
  996. }
  997. static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
  998. u8 *buf, int rx_byte, int pkt_size)
  999. {
  1000. u32 *lp, *temp, data;
  1001. int i, j = 0, cnt;
  1002. u32 read_cnt;
  1003. u8 reg[16];
  1004. int repeated_bytes = 0;
  1005. int buf_offset = buf - msm_host->rx_buf;
  1006. lp = (u32 *)buf;
  1007. temp = (u32 *)reg;
  1008. cnt = (rx_byte + 3) >> 2;
  1009. if (cnt > 4)
  1010. cnt = 4; /* 4 x 32 bits registers only */
  1011. if (rx_byte == 4)
  1012. read_cnt = 4;
  1013. else
  1014. read_cnt = pkt_size + 6;
  1015. /*
  1016. * In case of multiple reads from the panel, after the first read, there
  1017. * is possibility that there are some bytes in the payload repeating in
  1018. * the RDBK_DATA registers. Since we read all the parameters from the
  1019. * panel right from the first byte for every pass. We need to skip the
  1020. * repeating bytes and then append the new parameters to the rx buffer.
  1021. */
  1022. if (read_cnt > 16) {
  1023. int bytes_shifted;
  1024. /* Any data more than 16 bytes will be shifted out.
  1025. * The temp read buffer should already contain these bytes.
  1026. * The remaining bytes in read buffer are the repeated bytes.
  1027. */
  1028. bytes_shifted = read_cnt - 16;
  1029. repeated_bytes = buf_offset - bytes_shifted;
  1030. }
  1031. for (i = cnt - 1; i >= 0; i--) {
  1032. data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
  1033. *temp++ = ntohl(data); /* to host byte order */
  1034. DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
  1035. }
  1036. for (i = repeated_bytes; i < 16; i++)
  1037. buf[j++] = reg[i];
  1038. return j;
  1039. }
  1040. static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
  1041. const struct mipi_dsi_msg *msg)
  1042. {
  1043. int len, ret;
  1044. int bllp_len = msm_host->mode->hdisplay *
  1045. dsi_get_bpp(msm_host->format) / 8;
  1046. len = dsi_cmd_dma_add(msm_host, msg);
  1047. if (!len) {
  1048. pr_err("%s: failed to add cmd type = 0x%x\n",
  1049. __func__, msg->type);
  1050. return -EINVAL;
  1051. }
  1052. /* for video mode, do not send cmds more than
  1053. * one pixel line, since it only transmit it
  1054. * during BLLP.
  1055. */
  1056. /* TODO: if the command is sent in LP mode, the bit rate is only
  1057. * half of esc clk rate. In this case, if the video is already
  1058. * actively streaming, we need to check more carefully if the
  1059. * command can be fit into one BLLP.
  1060. */
  1061. if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
  1062. pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
  1063. __func__, len);
  1064. return -EINVAL;
  1065. }
  1066. ret = dsi_cmd_dma_tx(msm_host, len);
  1067. if (ret < len) {
  1068. pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
  1069. __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
  1070. return -ECOMM;
  1071. }
  1072. return len;
  1073. }
  1074. static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
  1075. {
  1076. u32 data0, data1;
  1077. data0 = dsi_read(msm_host, REG_DSI_CTRL);
  1078. data1 = data0;
  1079. data1 &= ~DSI_CTRL_ENABLE;
  1080. dsi_write(msm_host, REG_DSI_CTRL, data1);
  1081. /*
  1082. * dsi controller need to be disabled before
  1083. * clocks turned on
  1084. */
  1085. wmb();
  1086. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  1087. wmb(); /* make sure clocks enabled */
  1088. /* dsi controller can only be reset while clocks are running */
  1089. dsi_write(msm_host, REG_DSI_RESET, 1);
  1090. wmb(); /* make sure reset happen */
  1091. dsi_write(msm_host, REG_DSI_RESET, 0);
  1092. wmb(); /* controller out of reset */
  1093. dsi_write(msm_host, REG_DSI_CTRL, data0);
  1094. wmb(); /* make sure dsi controller enabled again */
  1095. }
  1096. static void dsi_err_worker(struct work_struct *work)
  1097. {
  1098. struct msm_dsi_host *msm_host =
  1099. container_of(work, struct msm_dsi_host, err_work);
  1100. u32 status = msm_host->err_work_state;
  1101. pr_err_ratelimited("%s: status=%x\n", __func__, status);
  1102. if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
  1103. dsi_sw_reset_restore(msm_host);
  1104. /* It is safe to clear here because error irq is disabled. */
  1105. msm_host->err_work_state = 0;
  1106. /* enable dsi error interrupt */
  1107. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  1108. }
  1109. static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
  1110. {
  1111. u32 status;
  1112. status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
  1113. if (status) {
  1114. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
  1115. /* Writing of an extra 0 needed to clear error bits */
  1116. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
  1117. msm_host->err_work_state |= DSI_ERR_STATE_ACK;
  1118. }
  1119. }
  1120. static void dsi_timeout_status(struct msm_dsi_host *msm_host)
  1121. {
  1122. u32 status;
  1123. status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
  1124. if (status) {
  1125. dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
  1126. msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
  1127. }
  1128. }
  1129. static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
  1130. {
  1131. u32 status;
  1132. status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
  1133. if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
  1134. DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
  1135. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
  1136. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
  1137. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
  1138. dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
  1139. msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
  1140. }
  1141. }
  1142. static void dsi_fifo_status(struct msm_dsi_host *msm_host)
  1143. {
  1144. u32 status;
  1145. status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
  1146. /* fifo underflow, overflow */
  1147. if (status) {
  1148. dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
  1149. msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
  1150. if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
  1151. msm_host->err_work_state |=
  1152. DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
  1153. }
  1154. }
  1155. static void dsi_status(struct msm_dsi_host *msm_host)
  1156. {
  1157. u32 status;
  1158. status = dsi_read(msm_host, REG_DSI_STATUS0);
  1159. if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
  1160. dsi_write(msm_host, REG_DSI_STATUS0, status);
  1161. msm_host->err_work_state |=
  1162. DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
  1163. }
  1164. }
  1165. static void dsi_clk_status(struct msm_dsi_host *msm_host)
  1166. {
  1167. u32 status;
  1168. status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
  1169. if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
  1170. dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
  1171. msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
  1172. }
  1173. }
  1174. static void dsi_error(struct msm_dsi_host *msm_host)
  1175. {
  1176. /* disable dsi error interrupt */
  1177. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
  1178. dsi_clk_status(msm_host);
  1179. dsi_fifo_status(msm_host);
  1180. dsi_ack_err_status(msm_host);
  1181. dsi_timeout_status(msm_host);
  1182. dsi_status(msm_host);
  1183. dsi_dln0_phy_err(msm_host);
  1184. queue_work(msm_host->workqueue, &msm_host->err_work);
  1185. }
  1186. static irqreturn_t dsi_host_irq(int irq, void *ptr)
  1187. {
  1188. struct msm_dsi_host *msm_host = ptr;
  1189. u32 isr;
  1190. unsigned long flags;
  1191. if (!msm_host->ctrl_base)
  1192. return IRQ_HANDLED;
  1193. spin_lock_irqsave(&msm_host->intr_lock, flags);
  1194. isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  1195. dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
  1196. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  1197. DBG("isr=0x%x, id=%d", isr, msm_host->id);
  1198. if (isr & DSI_IRQ_ERROR)
  1199. dsi_error(msm_host);
  1200. if (isr & DSI_IRQ_VIDEO_DONE)
  1201. complete(&msm_host->video_comp);
  1202. if (isr & DSI_IRQ_CMD_DMA_DONE)
  1203. complete(&msm_host->dma_comp);
  1204. return IRQ_HANDLED;
  1205. }
  1206. static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
  1207. struct device *panel_device)
  1208. {
  1209. msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
  1210. "disp-enable",
  1211. GPIOD_OUT_LOW);
  1212. if (IS_ERR(msm_host->disp_en_gpio)) {
  1213. DBG("cannot get disp-enable-gpios %ld",
  1214. PTR_ERR(msm_host->disp_en_gpio));
  1215. return PTR_ERR(msm_host->disp_en_gpio);
  1216. }
  1217. msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
  1218. GPIOD_IN);
  1219. if (IS_ERR(msm_host->te_gpio)) {
  1220. DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
  1221. return PTR_ERR(msm_host->te_gpio);
  1222. }
  1223. return 0;
  1224. }
  1225. static int dsi_host_attach(struct mipi_dsi_host *host,
  1226. struct mipi_dsi_device *dsi)
  1227. {
  1228. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1229. int ret;
  1230. msm_host->channel = dsi->channel;
  1231. msm_host->lanes = dsi->lanes;
  1232. msm_host->format = dsi->format;
  1233. msm_host->mode_flags = dsi->mode_flags;
  1234. WARN_ON(dsi->dev.of_node != msm_host->device_node);
  1235. /* Some gpios defined in panel DT need to be controlled by host */
  1236. ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
  1237. if (ret)
  1238. return ret;
  1239. DBG("id=%d", msm_host->id);
  1240. if (msm_host->dev)
  1241. drm_helper_hpd_irq_event(msm_host->dev);
  1242. return 0;
  1243. }
  1244. static int dsi_host_detach(struct mipi_dsi_host *host,
  1245. struct mipi_dsi_device *dsi)
  1246. {
  1247. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1248. msm_host->device_node = NULL;
  1249. DBG("id=%d", msm_host->id);
  1250. if (msm_host->dev)
  1251. drm_helper_hpd_irq_event(msm_host->dev);
  1252. return 0;
  1253. }
  1254. static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
  1255. const struct mipi_dsi_msg *msg)
  1256. {
  1257. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1258. int ret;
  1259. if (!msg || !msm_host->power_on)
  1260. return -EINVAL;
  1261. mutex_lock(&msm_host->cmd_mutex);
  1262. ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
  1263. mutex_unlock(&msm_host->cmd_mutex);
  1264. return ret;
  1265. }
  1266. static struct mipi_dsi_host_ops dsi_host_ops = {
  1267. .attach = dsi_host_attach,
  1268. .detach = dsi_host_detach,
  1269. .transfer = dsi_host_transfer,
  1270. };
  1271. static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
  1272. {
  1273. struct device *dev = &msm_host->pdev->dev;
  1274. struct device_node *np = dev->of_node;
  1275. struct device_node *endpoint, *device_node;
  1276. int ret;
  1277. ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
  1278. if (ret) {
  1279. dev_err(dev, "%s: host index not specified, ret=%d\n",
  1280. __func__, ret);
  1281. return ret;
  1282. }
  1283. /*
  1284. * Get the first endpoint node. In our case, dsi has one output port
  1285. * to which the panel is connected. Don't return an error if a port
  1286. * isn't defined. It's possible that there is nothing connected to
  1287. * the dsi output.
  1288. */
  1289. endpoint = of_graph_get_next_endpoint(np, NULL);
  1290. if (!endpoint) {
  1291. dev_dbg(dev, "%s: no endpoint\n", __func__);
  1292. return 0;
  1293. }
  1294. /* Get panel node from the output port's endpoint data */
  1295. device_node = of_graph_get_remote_port_parent(endpoint);
  1296. if (!device_node) {
  1297. dev_err(dev, "%s: no valid device\n", __func__);
  1298. of_node_put(endpoint);
  1299. return -ENODEV;
  1300. }
  1301. of_node_put(endpoint);
  1302. of_node_put(device_node);
  1303. msm_host->device_node = device_node;
  1304. return 0;
  1305. }
  1306. int msm_dsi_host_init(struct msm_dsi *msm_dsi)
  1307. {
  1308. struct msm_dsi_host *msm_host = NULL;
  1309. struct platform_device *pdev = msm_dsi->pdev;
  1310. int ret;
  1311. msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
  1312. if (!msm_host) {
  1313. pr_err("%s: FAILED: cannot alloc dsi host\n",
  1314. __func__);
  1315. ret = -ENOMEM;
  1316. goto fail;
  1317. }
  1318. msm_host->pdev = pdev;
  1319. ret = dsi_host_parse_dt(msm_host);
  1320. if (ret) {
  1321. pr_err("%s: failed to parse dt\n", __func__);
  1322. goto fail;
  1323. }
  1324. msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
  1325. if (IS_ERR(msm_host->ctrl_base)) {
  1326. pr_err("%s: unable to map Dsi ctrl base\n", __func__);
  1327. ret = PTR_ERR(msm_host->ctrl_base);
  1328. goto fail;
  1329. }
  1330. msm_host->cfg_hnd = dsi_get_config(msm_host);
  1331. if (!msm_host->cfg_hnd) {
  1332. ret = -EINVAL;
  1333. pr_err("%s: get config failed\n", __func__);
  1334. goto fail;
  1335. }
  1336. /* fixup base address by io offset */
  1337. msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
  1338. ret = dsi_regulator_init(msm_host);
  1339. if (ret) {
  1340. pr_err("%s: regulator init failed\n", __func__);
  1341. goto fail;
  1342. }
  1343. ret = dsi_clk_init(msm_host);
  1344. if (ret) {
  1345. pr_err("%s: unable to initialize dsi clks\n", __func__);
  1346. goto fail;
  1347. }
  1348. msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
  1349. if (!msm_host->rx_buf) {
  1350. pr_err("%s: alloc rx temp buf failed\n", __func__);
  1351. goto fail;
  1352. }
  1353. init_completion(&msm_host->dma_comp);
  1354. init_completion(&msm_host->video_comp);
  1355. mutex_init(&msm_host->dev_mutex);
  1356. mutex_init(&msm_host->cmd_mutex);
  1357. mutex_init(&msm_host->clk_mutex);
  1358. spin_lock_init(&msm_host->intr_lock);
  1359. /* setup workqueue */
  1360. msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
  1361. INIT_WORK(&msm_host->err_work, dsi_err_worker);
  1362. msm_dsi->host = &msm_host->base;
  1363. msm_dsi->id = msm_host->id;
  1364. DBG("Dsi Host %d initialized", msm_host->id);
  1365. return 0;
  1366. fail:
  1367. return ret;
  1368. }
  1369. void msm_dsi_host_destroy(struct mipi_dsi_host *host)
  1370. {
  1371. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1372. DBG("");
  1373. dsi_tx_buf_free(msm_host);
  1374. if (msm_host->workqueue) {
  1375. flush_workqueue(msm_host->workqueue);
  1376. destroy_workqueue(msm_host->workqueue);
  1377. msm_host->workqueue = NULL;
  1378. }
  1379. mutex_destroy(&msm_host->clk_mutex);
  1380. mutex_destroy(&msm_host->cmd_mutex);
  1381. mutex_destroy(&msm_host->dev_mutex);
  1382. }
  1383. int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
  1384. struct drm_device *dev)
  1385. {
  1386. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1387. struct platform_device *pdev = msm_host->pdev;
  1388. int ret;
  1389. msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
  1390. if (msm_host->irq < 0) {
  1391. ret = msm_host->irq;
  1392. dev_err(dev->dev, "failed to get irq: %d\n", ret);
  1393. return ret;
  1394. }
  1395. ret = devm_request_irq(&pdev->dev, msm_host->irq,
  1396. dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  1397. "dsi_isr", msm_host);
  1398. if (ret < 0) {
  1399. dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
  1400. msm_host->irq, ret);
  1401. return ret;
  1402. }
  1403. msm_host->dev = dev;
  1404. ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
  1405. if (ret) {
  1406. pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
  1407. return ret;
  1408. }
  1409. return 0;
  1410. }
  1411. int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
  1412. {
  1413. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1414. int ret;
  1415. /* Register mipi dsi host */
  1416. if (!msm_host->registered) {
  1417. host->dev = &msm_host->pdev->dev;
  1418. host->ops = &dsi_host_ops;
  1419. ret = mipi_dsi_host_register(host);
  1420. if (ret)
  1421. return ret;
  1422. msm_host->registered = true;
  1423. /* If the panel driver has not been probed after host register,
  1424. * we should defer the host's probe.
  1425. * It makes sure panel is connected when fbcon detects
  1426. * connector status and gets the proper display mode to
  1427. * create framebuffer.
  1428. * Don't try to defer if there is nothing connected to the dsi
  1429. * output
  1430. */
  1431. if (check_defer && msm_host->device_node) {
  1432. if (!of_drm_find_panel(msm_host->device_node))
  1433. if (!of_drm_find_bridge(msm_host->device_node))
  1434. return -EPROBE_DEFER;
  1435. }
  1436. }
  1437. return 0;
  1438. }
  1439. void msm_dsi_host_unregister(struct mipi_dsi_host *host)
  1440. {
  1441. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1442. if (msm_host->registered) {
  1443. mipi_dsi_host_unregister(host);
  1444. host->dev = NULL;
  1445. host->ops = NULL;
  1446. msm_host->registered = false;
  1447. }
  1448. }
  1449. int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
  1450. const struct mipi_dsi_msg *msg)
  1451. {
  1452. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1453. /* TODO: make sure dsi_cmd_mdp is idle.
  1454. * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
  1455. * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
  1456. * How to handle the old versions? Wait for mdp cmd done?
  1457. */
  1458. /*
  1459. * mdss interrupt is generated in mdp core clock domain
  1460. * mdp clock need to be enabled to receive dsi interrupt
  1461. */
  1462. dsi_clk_ctrl(msm_host, 1);
  1463. /* TODO: vote for bus bandwidth */
  1464. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1465. dsi_set_tx_power_mode(0, msm_host);
  1466. msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
  1467. dsi_write(msm_host, REG_DSI_CTRL,
  1468. msm_host->dma_cmd_ctrl_restore |
  1469. DSI_CTRL_CMD_MODE_EN |
  1470. DSI_CTRL_ENABLE);
  1471. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
  1472. return 0;
  1473. }
  1474. void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
  1475. const struct mipi_dsi_msg *msg)
  1476. {
  1477. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1478. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
  1479. dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
  1480. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1481. dsi_set_tx_power_mode(1, msm_host);
  1482. /* TODO: unvote for bus bandwidth */
  1483. dsi_clk_ctrl(msm_host, 0);
  1484. }
  1485. int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
  1486. const struct mipi_dsi_msg *msg)
  1487. {
  1488. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1489. return dsi_cmds2buf_tx(msm_host, msg);
  1490. }
  1491. int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
  1492. const struct mipi_dsi_msg *msg)
  1493. {
  1494. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1495. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1496. int data_byte, rx_byte, dlen, end;
  1497. int short_response, diff, pkt_size, ret = 0;
  1498. char cmd;
  1499. int rlen = msg->rx_len;
  1500. u8 *buf;
  1501. if (rlen <= 2) {
  1502. short_response = 1;
  1503. pkt_size = rlen;
  1504. rx_byte = 4;
  1505. } else {
  1506. short_response = 0;
  1507. data_byte = 10; /* first read */
  1508. if (rlen < data_byte)
  1509. pkt_size = rlen;
  1510. else
  1511. pkt_size = data_byte;
  1512. rx_byte = data_byte + 6; /* 4 header + 2 crc */
  1513. }
  1514. buf = msm_host->rx_buf;
  1515. end = 0;
  1516. while (!end) {
  1517. u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
  1518. struct mipi_dsi_msg max_pkt_size_msg = {
  1519. .channel = msg->channel,
  1520. .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
  1521. .tx_len = 2,
  1522. .tx_buf = tx,
  1523. };
  1524. DBG("rlen=%d pkt_size=%d rx_byte=%d",
  1525. rlen, pkt_size, rx_byte);
  1526. ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
  1527. if (ret < 2) {
  1528. pr_err("%s: Set max pkt size failed, %d\n",
  1529. __func__, ret);
  1530. return -EINVAL;
  1531. }
  1532. if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
  1533. (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
  1534. /* Clear the RDBK_DATA registers */
  1535. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
  1536. DSI_RDBK_DATA_CTRL_CLR);
  1537. wmb(); /* make sure the RDBK registers are cleared */
  1538. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
  1539. wmb(); /* release cleared status before transfer */
  1540. }
  1541. ret = dsi_cmds2buf_tx(msm_host, msg);
  1542. if (ret < msg->tx_len) {
  1543. pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
  1544. return ret;
  1545. }
  1546. /*
  1547. * once cmd_dma_done interrupt received,
  1548. * return data from client is ready and stored
  1549. * at RDBK_DATA register already
  1550. * since rx fifo is 16 bytes, dcs header is kept at first loop,
  1551. * after that dcs header lost during shift into registers
  1552. */
  1553. dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
  1554. if (dlen <= 0)
  1555. return 0;
  1556. if (short_response)
  1557. break;
  1558. if (rlen <= data_byte) {
  1559. diff = data_byte - rlen;
  1560. end = 1;
  1561. } else {
  1562. diff = 0;
  1563. rlen -= data_byte;
  1564. }
  1565. if (!end) {
  1566. dlen -= 2; /* 2 crc */
  1567. dlen -= diff;
  1568. buf += dlen; /* next start position */
  1569. data_byte = 14; /* NOT first read */
  1570. if (rlen < data_byte)
  1571. pkt_size += rlen;
  1572. else
  1573. pkt_size += data_byte;
  1574. DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
  1575. }
  1576. }
  1577. /*
  1578. * For single Long read, if the requested rlen < 10,
  1579. * we need to shift the start position of rx
  1580. * data buffer to skip the bytes which are not
  1581. * updated.
  1582. */
  1583. if (pkt_size < 10 && !short_response)
  1584. buf = msm_host->rx_buf + (10 - rlen);
  1585. else
  1586. buf = msm_host->rx_buf;
  1587. cmd = buf[0];
  1588. switch (cmd) {
  1589. case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
  1590. pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
  1591. ret = 0;
  1592. break;
  1593. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
  1594. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
  1595. ret = dsi_short_read1_resp(buf, msg);
  1596. break;
  1597. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
  1598. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
  1599. ret = dsi_short_read2_resp(buf, msg);
  1600. break;
  1601. case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
  1602. case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
  1603. ret = dsi_long_read_resp(buf, msg);
  1604. break;
  1605. default:
  1606. pr_warn("%s:Invalid response cmd\n", __func__);
  1607. ret = 0;
  1608. }
  1609. return ret;
  1610. }
  1611. void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
  1612. u32 len)
  1613. {
  1614. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1615. dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
  1616. dsi_write(msm_host, REG_DSI_DMA_LEN, len);
  1617. dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
  1618. /* Make sure trigger happens */
  1619. wmb();
  1620. }
  1621. int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
  1622. struct msm_dsi_pll *src_pll)
  1623. {
  1624. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1625. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1626. struct clk *byte_clk_provider, *pixel_clk_provider;
  1627. int ret;
  1628. ret = msm_dsi_pll_get_clk_provider(src_pll,
  1629. &byte_clk_provider, &pixel_clk_provider);
  1630. if (ret) {
  1631. pr_info("%s: can't get provider from pll, don't set parent\n",
  1632. __func__);
  1633. return 0;
  1634. }
  1635. ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
  1636. if (ret) {
  1637. pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
  1638. __func__, ret);
  1639. goto exit;
  1640. }
  1641. ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
  1642. if (ret) {
  1643. pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
  1644. __func__, ret);
  1645. goto exit;
  1646. }
  1647. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
  1648. ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
  1649. if (ret) {
  1650. pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
  1651. __func__, ret);
  1652. goto exit;
  1653. }
  1654. ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
  1655. if (ret) {
  1656. pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
  1657. __func__, ret);
  1658. goto exit;
  1659. }
  1660. }
  1661. exit:
  1662. return ret;
  1663. }
  1664. int msm_dsi_host_enable(struct mipi_dsi_host *host)
  1665. {
  1666. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1667. dsi_op_mode_config(msm_host,
  1668. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
  1669. /* TODO: clock should be turned off for command mode,
  1670. * and only turned on before MDP START.
  1671. * This part of code should be enabled once mdp driver support it.
  1672. */
  1673. /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
  1674. dsi_clk_ctrl(msm_host, 0); */
  1675. return 0;
  1676. }
  1677. int msm_dsi_host_disable(struct mipi_dsi_host *host)
  1678. {
  1679. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1680. dsi_op_mode_config(msm_host,
  1681. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
  1682. /* Since we have disabled INTF, the video engine won't stop so that
  1683. * the cmd engine will be blocked.
  1684. * Reset to disable video engine so that we can send off cmd.
  1685. */
  1686. dsi_sw_reset(msm_host);
  1687. return 0;
  1688. }
  1689. int msm_dsi_host_power_on(struct mipi_dsi_host *host)
  1690. {
  1691. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1692. u32 clk_pre = 0, clk_post = 0;
  1693. int ret = 0;
  1694. mutex_lock(&msm_host->dev_mutex);
  1695. if (msm_host->power_on) {
  1696. DBG("dsi host already on");
  1697. goto unlock_ret;
  1698. }
  1699. ret = dsi_calc_clk_rate(msm_host);
  1700. if (ret) {
  1701. pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
  1702. goto unlock_ret;
  1703. }
  1704. ret = dsi_host_regulator_enable(msm_host);
  1705. if (ret) {
  1706. pr_err("%s:Failed to enable vregs.ret=%d\n",
  1707. __func__, ret);
  1708. goto unlock_ret;
  1709. }
  1710. ret = dsi_bus_clk_enable(msm_host);
  1711. if (ret) {
  1712. pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
  1713. goto fail_disable_reg;
  1714. }
  1715. dsi_phy_sw_reset(msm_host);
  1716. ret = msm_dsi_manager_phy_enable(msm_host->id,
  1717. msm_host->byte_clk_rate * 8,
  1718. msm_host->esc_clk_rate,
  1719. &clk_pre, &clk_post);
  1720. dsi_bus_clk_disable(msm_host);
  1721. if (ret) {
  1722. pr_err("%s: failed to enable phy, %d\n", __func__, ret);
  1723. goto fail_disable_reg;
  1724. }
  1725. ret = dsi_clk_ctrl(msm_host, 1);
  1726. if (ret) {
  1727. pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
  1728. goto fail_disable_reg;
  1729. }
  1730. ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
  1731. if (ret) {
  1732. pr_err("%s: failed to set pinctrl default state, %d\n",
  1733. __func__, ret);
  1734. goto fail_disable_clk;
  1735. }
  1736. dsi_timing_setup(msm_host);
  1737. dsi_sw_reset(msm_host);
  1738. dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
  1739. if (msm_host->disp_en_gpio)
  1740. gpiod_set_value(msm_host->disp_en_gpio, 1);
  1741. msm_host->power_on = true;
  1742. mutex_unlock(&msm_host->dev_mutex);
  1743. return 0;
  1744. fail_disable_clk:
  1745. dsi_clk_ctrl(msm_host, 0);
  1746. fail_disable_reg:
  1747. dsi_host_regulator_disable(msm_host);
  1748. unlock_ret:
  1749. mutex_unlock(&msm_host->dev_mutex);
  1750. return ret;
  1751. }
  1752. int msm_dsi_host_power_off(struct mipi_dsi_host *host)
  1753. {
  1754. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1755. mutex_lock(&msm_host->dev_mutex);
  1756. if (!msm_host->power_on) {
  1757. DBG("dsi host already off");
  1758. goto unlock_ret;
  1759. }
  1760. dsi_ctrl_config(msm_host, false, 0, 0);
  1761. if (msm_host->disp_en_gpio)
  1762. gpiod_set_value(msm_host->disp_en_gpio, 0);
  1763. pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
  1764. msm_dsi_manager_phy_disable(msm_host->id);
  1765. dsi_clk_ctrl(msm_host, 0);
  1766. dsi_host_regulator_disable(msm_host);
  1767. DBG("-");
  1768. msm_host->power_on = false;
  1769. unlock_ret:
  1770. mutex_unlock(&msm_host->dev_mutex);
  1771. return 0;
  1772. }
  1773. int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
  1774. struct drm_display_mode *mode)
  1775. {
  1776. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1777. if (msm_host->mode) {
  1778. drm_mode_destroy(msm_host->dev, msm_host->mode);
  1779. msm_host->mode = NULL;
  1780. }
  1781. msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
  1782. if (IS_ERR(msm_host->mode)) {
  1783. pr_err("%s: cannot duplicate mode\n", __func__);
  1784. return PTR_ERR(msm_host->mode);
  1785. }
  1786. return 0;
  1787. }
  1788. struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
  1789. unsigned long *panel_flags)
  1790. {
  1791. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1792. struct drm_panel *panel;
  1793. panel = of_drm_find_panel(msm_host->device_node);
  1794. if (panel_flags)
  1795. *panel_flags = msm_host->mode_flags;
  1796. return panel;
  1797. }
  1798. struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
  1799. {
  1800. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1801. return of_drm_find_bridge(msm_host->device_node);
  1802. }