dsi_host.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210
  1. /*
  2. * Copyright (c) 2015, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/gpio.h>
  17. #include <linux/gpio/consumer.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/of_device.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/pinctrl/consumer.h>
  23. #include <linux/of_graph.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/mfd/syscon.h>
  27. #include <linux/regmap.h>
  28. #include <video/mipi_display.h>
  29. #include "dsi.h"
  30. #include "dsi.xml.h"
  31. #include "sfpb.xml.h"
  32. #include "dsi_cfg.h"
  33. static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
  34. {
  35. u32 ver;
  36. if (!major || !minor)
  37. return -EINVAL;
  38. /*
  39. * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
  40. * makes all other registers 4-byte shifted down.
  41. *
  42. * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
  43. * older, we read the DSI_VERSION register without any shift(offset
  44. * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
  45. * the case of DSI6G, this has to be zero (the offset points to a
  46. * scratch register which we never touch)
  47. */
  48. ver = msm_readl(base + REG_DSI_VERSION);
  49. if (ver) {
  50. /* older dsi host, there is no register shift */
  51. ver = FIELD(ver, DSI_VERSION_MAJOR);
  52. if (ver <= MSM_DSI_VER_MAJOR_V2) {
  53. /* old versions */
  54. *major = ver;
  55. *minor = 0;
  56. return 0;
  57. } else {
  58. return -EINVAL;
  59. }
  60. } else {
  61. /*
  62. * newer host, offset 0 has 6G_HW_VERSION, the rest of the
  63. * registers are shifted down, read DSI_VERSION again with
  64. * the shifted offset
  65. */
  66. ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
  67. ver = FIELD(ver, DSI_VERSION_MAJOR);
  68. if (ver == MSM_DSI_VER_MAJOR_6G) {
  69. /* 6G version */
  70. *major = ver;
  71. *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
  72. return 0;
  73. } else {
  74. return -EINVAL;
  75. }
  76. }
  77. }
  78. #define DSI_ERR_STATE_ACK 0x0000
  79. #define DSI_ERR_STATE_TIMEOUT 0x0001
  80. #define DSI_ERR_STATE_DLN0_PHY 0x0002
  81. #define DSI_ERR_STATE_FIFO 0x0004
  82. #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
  83. #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
  84. #define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
  85. #define DSI_CLK_CTRL_ENABLE_CLKS \
  86. (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
  87. DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
  88. DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
  89. DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
  90. struct msm_dsi_host {
  91. struct mipi_dsi_host base;
  92. struct platform_device *pdev;
  93. struct drm_device *dev;
  94. int id;
  95. void __iomem *ctrl_base;
  96. struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
  97. struct clk *bus_clks[DSI_BUS_CLK_MAX];
  98. struct clk *byte_clk;
  99. struct clk *esc_clk;
  100. struct clk *pixel_clk;
  101. struct clk *byte_clk_src;
  102. struct clk *pixel_clk_src;
  103. u32 byte_clk_rate;
  104. u32 esc_clk_rate;
  105. /* DSI v2 specific clocks */
  106. struct clk *src_clk;
  107. struct clk *esc_clk_src;
  108. struct clk *dsi_clk_src;
  109. u32 src_clk_rate;
  110. struct gpio_desc *disp_en_gpio;
  111. struct gpio_desc *te_gpio;
  112. const struct msm_dsi_cfg_handler *cfg_hnd;
  113. struct completion dma_comp;
  114. struct completion video_comp;
  115. struct mutex dev_mutex;
  116. struct mutex cmd_mutex;
  117. struct mutex clk_mutex;
  118. spinlock_t intr_lock; /* Protect interrupt ctrl register */
  119. u32 err_work_state;
  120. struct work_struct err_work;
  121. struct workqueue_struct *workqueue;
  122. /* DSI 6G TX buffer*/
  123. struct drm_gem_object *tx_gem_obj;
  124. /* DSI v2 TX buffer */
  125. void *tx_buf;
  126. dma_addr_t tx_buf_paddr;
  127. int tx_size;
  128. u8 *rx_buf;
  129. struct regmap *sfpb;
  130. struct drm_display_mode *mode;
  131. /* connected device info */
  132. struct device_node *device_node;
  133. unsigned int channel;
  134. unsigned int lanes;
  135. enum mipi_dsi_pixel_format format;
  136. unsigned long mode_flags;
  137. u32 dma_cmd_ctrl_restore;
  138. bool registered;
  139. bool power_on;
  140. int irq;
  141. };
  142. static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
  143. {
  144. switch (fmt) {
  145. case MIPI_DSI_FMT_RGB565: return 16;
  146. case MIPI_DSI_FMT_RGB666_PACKED: return 18;
  147. case MIPI_DSI_FMT_RGB666:
  148. case MIPI_DSI_FMT_RGB888:
  149. default: return 24;
  150. }
  151. }
  152. static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
  153. {
  154. return msm_readl(msm_host->ctrl_base + reg);
  155. }
  156. static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
  157. {
  158. msm_writel(data, msm_host->ctrl_base + reg);
  159. }
  160. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
  161. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
  162. static const struct msm_dsi_cfg_handler *dsi_get_config(
  163. struct msm_dsi_host *msm_host)
  164. {
  165. const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
  166. struct device *dev = &msm_host->pdev->dev;
  167. struct regulator *gdsc_reg;
  168. struct clk *ahb_clk;
  169. int ret;
  170. u32 major = 0, minor = 0;
  171. gdsc_reg = regulator_get(dev, "gdsc");
  172. if (IS_ERR(gdsc_reg)) {
  173. pr_err("%s: cannot get gdsc\n", __func__);
  174. goto exit;
  175. }
  176. ahb_clk = clk_get(dev, "iface_clk");
  177. if (IS_ERR(ahb_clk)) {
  178. pr_err("%s: cannot get interface clock\n", __func__);
  179. goto put_gdsc;
  180. }
  181. ret = regulator_enable(gdsc_reg);
  182. if (ret) {
  183. pr_err("%s: unable to enable gdsc\n", __func__);
  184. goto put_clk;
  185. }
  186. ret = clk_prepare_enable(ahb_clk);
  187. if (ret) {
  188. pr_err("%s: unable to enable ahb_clk\n", __func__);
  189. goto disable_gdsc;
  190. }
  191. ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
  192. if (ret) {
  193. pr_err("%s: Invalid version\n", __func__);
  194. goto disable_clks;
  195. }
  196. cfg_hnd = msm_dsi_cfg_get(major, minor);
  197. DBG("%s: Version %x:%x\n", __func__, major, minor);
  198. disable_clks:
  199. clk_disable_unprepare(ahb_clk);
  200. disable_gdsc:
  201. regulator_disable(gdsc_reg);
  202. put_clk:
  203. clk_put(ahb_clk);
  204. put_gdsc:
  205. regulator_put(gdsc_reg);
  206. exit:
  207. return cfg_hnd;
  208. }
  209. static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
  210. {
  211. return container_of(host, struct msm_dsi_host, base);
  212. }
  213. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
  214. {
  215. struct regulator_bulk_data *s = msm_host->supplies;
  216. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  217. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  218. int i;
  219. DBG("");
  220. for (i = num - 1; i >= 0; i--)
  221. if (regs[i].disable_load >= 0)
  222. regulator_set_load(s[i].consumer,
  223. regs[i].disable_load);
  224. regulator_bulk_disable(num, s);
  225. }
  226. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
  227. {
  228. struct regulator_bulk_data *s = msm_host->supplies;
  229. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  230. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  231. int ret, i;
  232. DBG("");
  233. for (i = 0; i < num; i++) {
  234. if (regs[i].enable_load >= 0) {
  235. ret = regulator_set_load(s[i].consumer,
  236. regs[i].enable_load);
  237. if (ret < 0) {
  238. pr_err("regulator %d set op mode failed, %d\n",
  239. i, ret);
  240. goto fail;
  241. }
  242. }
  243. }
  244. ret = regulator_bulk_enable(num, s);
  245. if (ret < 0) {
  246. pr_err("regulator enable failed, %d\n", ret);
  247. goto fail;
  248. }
  249. return 0;
  250. fail:
  251. for (i--; i >= 0; i--)
  252. regulator_set_load(s[i].consumer, regs[i].disable_load);
  253. return ret;
  254. }
  255. static int dsi_regulator_init(struct msm_dsi_host *msm_host)
  256. {
  257. struct regulator_bulk_data *s = msm_host->supplies;
  258. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  259. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  260. int i, ret;
  261. for (i = 0; i < num; i++)
  262. s[i].supply = regs[i].name;
  263. ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
  264. if (ret < 0) {
  265. pr_err("%s: failed to init regulator, ret=%d\n",
  266. __func__, ret);
  267. return ret;
  268. }
  269. for (i = 0; i < num; i++) {
  270. if (regulator_can_change_voltage(s[i].consumer)) {
  271. ret = regulator_set_voltage(s[i].consumer,
  272. regs[i].min_voltage, regs[i].max_voltage);
  273. if (ret < 0) {
  274. pr_err("regulator %d set voltage failed, %d\n",
  275. i, ret);
  276. return ret;
  277. }
  278. }
  279. }
  280. return 0;
  281. }
  282. static int dsi_clk_init(struct msm_dsi_host *msm_host)
  283. {
  284. struct device *dev = &msm_host->pdev->dev;
  285. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  286. const struct msm_dsi_config *cfg = cfg_hnd->cfg;
  287. int i, ret = 0;
  288. /* get bus clocks */
  289. for (i = 0; i < cfg->num_bus_clks; i++) {
  290. msm_host->bus_clks[i] = devm_clk_get(dev,
  291. cfg->bus_clk_names[i]);
  292. if (IS_ERR(msm_host->bus_clks[i])) {
  293. ret = PTR_ERR(msm_host->bus_clks[i]);
  294. pr_err("%s: Unable to get %s, ret = %d\n",
  295. __func__, cfg->bus_clk_names[i], ret);
  296. goto exit;
  297. }
  298. }
  299. /* get link and source clocks */
  300. msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
  301. if (IS_ERR(msm_host->byte_clk)) {
  302. ret = PTR_ERR(msm_host->byte_clk);
  303. pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
  304. __func__, ret);
  305. msm_host->byte_clk = NULL;
  306. goto exit;
  307. }
  308. msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
  309. if (IS_ERR(msm_host->pixel_clk)) {
  310. ret = PTR_ERR(msm_host->pixel_clk);
  311. pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
  312. __func__, ret);
  313. msm_host->pixel_clk = NULL;
  314. goto exit;
  315. }
  316. msm_host->esc_clk = devm_clk_get(dev, "core_clk");
  317. if (IS_ERR(msm_host->esc_clk)) {
  318. ret = PTR_ERR(msm_host->esc_clk);
  319. pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
  320. __func__, ret);
  321. msm_host->esc_clk = NULL;
  322. goto exit;
  323. }
  324. msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
  325. if (!msm_host->byte_clk_src) {
  326. ret = -ENODEV;
  327. pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
  328. goto exit;
  329. }
  330. msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
  331. if (!msm_host->pixel_clk_src) {
  332. ret = -ENODEV;
  333. pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
  334. goto exit;
  335. }
  336. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
  337. msm_host->src_clk = devm_clk_get(dev, "src_clk");
  338. if (IS_ERR(msm_host->src_clk)) {
  339. ret = PTR_ERR(msm_host->src_clk);
  340. pr_err("%s: can't find dsi_src_clk. ret=%d\n",
  341. __func__, ret);
  342. msm_host->src_clk = NULL;
  343. goto exit;
  344. }
  345. msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
  346. if (!msm_host->esc_clk_src) {
  347. ret = -ENODEV;
  348. pr_err("%s: can't get esc_clk_src. ret=%d\n",
  349. __func__, ret);
  350. goto exit;
  351. }
  352. msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
  353. if (!msm_host->dsi_clk_src) {
  354. ret = -ENODEV;
  355. pr_err("%s: can't get dsi_clk_src. ret=%d\n",
  356. __func__, ret);
  357. }
  358. }
  359. exit:
  360. return ret;
  361. }
  362. static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
  363. {
  364. const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
  365. int i, ret;
  366. DBG("id=%d", msm_host->id);
  367. for (i = 0; i < cfg->num_bus_clks; i++) {
  368. ret = clk_prepare_enable(msm_host->bus_clks[i]);
  369. if (ret) {
  370. pr_err("%s: failed to enable bus clock %d ret %d\n",
  371. __func__, i, ret);
  372. goto err;
  373. }
  374. }
  375. return 0;
  376. err:
  377. for (; i > 0; i--)
  378. clk_disable_unprepare(msm_host->bus_clks[i]);
  379. return ret;
  380. }
  381. static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
  382. {
  383. const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
  384. int i;
  385. DBG("");
  386. for (i = cfg->num_bus_clks - 1; i >= 0; i--)
  387. clk_disable_unprepare(msm_host->bus_clks[i]);
  388. }
  389. static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
  390. {
  391. int ret;
  392. DBG("Set clk rates: pclk=%d, byteclk=%d",
  393. msm_host->mode->clock, msm_host->byte_clk_rate);
  394. ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
  395. if (ret) {
  396. pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
  397. goto error;
  398. }
  399. ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
  400. if (ret) {
  401. pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
  402. goto error;
  403. }
  404. ret = clk_prepare_enable(msm_host->esc_clk);
  405. if (ret) {
  406. pr_err("%s: Failed to enable dsi esc clk\n", __func__);
  407. goto error;
  408. }
  409. ret = clk_prepare_enable(msm_host->byte_clk);
  410. if (ret) {
  411. pr_err("%s: Failed to enable dsi byte clk\n", __func__);
  412. goto byte_clk_err;
  413. }
  414. ret = clk_prepare_enable(msm_host->pixel_clk);
  415. if (ret) {
  416. pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
  417. goto pixel_clk_err;
  418. }
  419. return 0;
  420. pixel_clk_err:
  421. clk_disable_unprepare(msm_host->byte_clk);
  422. byte_clk_err:
  423. clk_disable_unprepare(msm_host->esc_clk);
  424. error:
  425. return ret;
  426. }
  427. static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
  428. {
  429. int ret;
  430. DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
  431. msm_host->mode->clock, msm_host->byte_clk_rate,
  432. msm_host->esc_clk_rate, msm_host->src_clk_rate);
  433. ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
  434. if (ret) {
  435. pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
  436. goto error;
  437. }
  438. ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
  439. if (ret) {
  440. pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
  441. goto error;
  442. }
  443. ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
  444. if (ret) {
  445. pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
  446. goto error;
  447. }
  448. ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
  449. if (ret) {
  450. pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
  451. goto error;
  452. }
  453. ret = clk_prepare_enable(msm_host->byte_clk);
  454. if (ret) {
  455. pr_err("%s: Failed to enable dsi byte clk\n", __func__);
  456. goto error;
  457. }
  458. ret = clk_prepare_enable(msm_host->esc_clk);
  459. if (ret) {
  460. pr_err("%s: Failed to enable dsi esc clk\n", __func__);
  461. goto esc_clk_err;
  462. }
  463. ret = clk_prepare_enable(msm_host->src_clk);
  464. if (ret) {
  465. pr_err("%s: Failed to enable dsi src clk\n", __func__);
  466. goto src_clk_err;
  467. }
  468. ret = clk_prepare_enable(msm_host->pixel_clk);
  469. if (ret) {
  470. pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
  471. goto pixel_clk_err;
  472. }
  473. return 0;
  474. pixel_clk_err:
  475. clk_disable_unprepare(msm_host->src_clk);
  476. src_clk_err:
  477. clk_disable_unprepare(msm_host->esc_clk);
  478. esc_clk_err:
  479. clk_disable_unprepare(msm_host->byte_clk);
  480. error:
  481. return ret;
  482. }
  483. static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
  484. {
  485. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  486. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
  487. return dsi_link_clk_enable_6g(msm_host);
  488. else
  489. return dsi_link_clk_enable_v2(msm_host);
  490. }
  491. static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
  492. {
  493. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  494. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  495. clk_disable_unprepare(msm_host->esc_clk);
  496. clk_disable_unprepare(msm_host->pixel_clk);
  497. clk_disable_unprepare(msm_host->byte_clk);
  498. } else {
  499. clk_disable_unprepare(msm_host->pixel_clk);
  500. clk_disable_unprepare(msm_host->src_clk);
  501. clk_disable_unprepare(msm_host->esc_clk);
  502. clk_disable_unprepare(msm_host->byte_clk);
  503. }
  504. }
  505. static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
  506. {
  507. int ret = 0;
  508. mutex_lock(&msm_host->clk_mutex);
  509. if (enable) {
  510. ret = dsi_bus_clk_enable(msm_host);
  511. if (ret) {
  512. pr_err("%s: Can not enable bus clk, %d\n",
  513. __func__, ret);
  514. goto unlock_ret;
  515. }
  516. ret = dsi_link_clk_enable(msm_host);
  517. if (ret) {
  518. pr_err("%s: Can not enable link clk, %d\n",
  519. __func__, ret);
  520. dsi_bus_clk_disable(msm_host);
  521. goto unlock_ret;
  522. }
  523. } else {
  524. dsi_link_clk_disable(msm_host);
  525. dsi_bus_clk_disable(msm_host);
  526. }
  527. unlock_ret:
  528. mutex_unlock(&msm_host->clk_mutex);
  529. return ret;
  530. }
  531. static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
  532. {
  533. struct drm_display_mode *mode = msm_host->mode;
  534. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  535. u8 lanes = msm_host->lanes;
  536. u32 bpp = dsi_get_bpp(msm_host->format);
  537. u32 pclk_rate;
  538. if (!mode) {
  539. pr_err("%s: mode not set\n", __func__);
  540. return -EINVAL;
  541. }
  542. pclk_rate = mode->clock * 1000;
  543. if (lanes > 0) {
  544. msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
  545. } else {
  546. pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
  547. msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
  548. }
  549. DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
  550. msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
  551. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
  552. unsigned int esc_mhz, esc_div;
  553. unsigned long byte_mhz;
  554. msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
  555. /*
  556. * esc clock is byte clock followed by a 4 bit divider,
  557. * we need to find an escape clock frequency within the
  558. * mipi DSI spec range within the maximum divider limit
  559. * We iterate here between an escape clock frequencey
  560. * between 20 Mhz to 5 Mhz and pick up the first one
  561. * that can be supported by our divider
  562. */
  563. byte_mhz = msm_host->byte_clk_rate / 1000000;
  564. for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
  565. esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
  566. /*
  567. * TODO: Ideally, we shouldn't know what sort of divider
  568. * is available in mmss_cc, we're just assuming that
  569. * it'll always be a 4 bit divider. Need to come up with
  570. * a better way here.
  571. */
  572. if (esc_div >= 1 && esc_div <= 16)
  573. break;
  574. }
  575. if (esc_mhz < 5)
  576. return -EINVAL;
  577. msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
  578. DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
  579. msm_host->src_clk_rate);
  580. }
  581. return 0;
  582. }
  583. static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
  584. {
  585. DBG("");
  586. dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
  587. /* Make sure fully reset */
  588. wmb();
  589. udelay(1000);
  590. dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
  591. udelay(100);
  592. }
  593. static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
  594. {
  595. u32 intr;
  596. unsigned long flags;
  597. spin_lock_irqsave(&msm_host->intr_lock, flags);
  598. intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  599. if (enable)
  600. intr |= mask;
  601. else
  602. intr &= ~mask;
  603. DBG("intr=%x enable=%d", intr, enable);
  604. dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
  605. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  606. }
  607. static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
  608. {
  609. if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
  610. return BURST_MODE;
  611. else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  612. return NON_BURST_SYNCH_PULSE;
  613. return NON_BURST_SYNCH_EVENT;
  614. }
  615. static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
  616. const enum mipi_dsi_pixel_format mipi_fmt)
  617. {
  618. switch (mipi_fmt) {
  619. case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
  620. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
  621. case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
  622. case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
  623. default: return VID_DST_FORMAT_RGB888;
  624. }
  625. }
  626. static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
  627. const enum mipi_dsi_pixel_format mipi_fmt)
  628. {
  629. switch (mipi_fmt) {
  630. case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
  631. case MIPI_DSI_FMT_RGB666_PACKED:
  632. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
  633. case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
  634. default: return CMD_DST_FORMAT_RGB888;
  635. }
  636. }
  637. static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
  638. u32 clk_pre, u32 clk_post)
  639. {
  640. u32 flags = msm_host->mode_flags;
  641. enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
  642. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  643. u32 data = 0;
  644. if (!enable) {
  645. dsi_write(msm_host, REG_DSI_CTRL, 0);
  646. return;
  647. }
  648. if (flags & MIPI_DSI_MODE_VIDEO) {
  649. if (flags & MIPI_DSI_MODE_VIDEO_HSE)
  650. data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
  651. if (flags & MIPI_DSI_MODE_VIDEO_HFP)
  652. data |= DSI_VID_CFG0_HFP_POWER_STOP;
  653. if (flags & MIPI_DSI_MODE_VIDEO_HBP)
  654. data |= DSI_VID_CFG0_HBP_POWER_STOP;
  655. if (flags & MIPI_DSI_MODE_VIDEO_HSA)
  656. data |= DSI_VID_CFG0_HSA_POWER_STOP;
  657. /* Always set low power stop mode for BLLP
  658. * to let command engine send packets
  659. */
  660. data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
  661. DSI_VID_CFG0_BLLP_POWER_STOP;
  662. data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
  663. data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
  664. data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
  665. dsi_write(msm_host, REG_DSI_VID_CFG0, data);
  666. /* Do not swap RGB colors */
  667. data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
  668. dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
  669. } else {
  670. /* Do not swap RGB colors */
  671. data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
  672. data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
  673. dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
  674. data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
  675. DSI_CMD_CFG1_WR_MEM_CONTINUE(
  676. MIPI_DCS_WRITE_MEMORY_CONTINUE);
  677. /* Always insert DCS command */
  678. data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
  679. dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
  680. }
  681. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
  682. DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
  683. DSI_CMD_DMA_CTRL_LOW_POWER);
  684. data = 0;
  685. /* Always assume dedicated TE pin */
  686. data |= DSI_TRIG_CTRL_TE;
  687. data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
  688. data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
  689. data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
  690. if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
  691. (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
  692. data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
  693. dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
  694. data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
  695. DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
  696. dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
  697. data = 0;
  698. if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
  699. data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
  700. dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
  701. /* allow only ack-err-status to generate interrupt */
  702. dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
  703. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  704. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  705. data = DSI_CTRL_CLK_EN;
  706. DBG("lane number=%d", msm_host->lanes);
  707. if (msm_host->lanes == 2) {
  708. data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
  709. /* swap lanes for 2-lane panel for better performance */
  710. dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
  711. DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
  712. } else {
  713. /* Take 4 lanes as default */
  714. data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
  715. DSI_CTRL_LANE3;
  716. /* Do not swap lanes for 4-lane panel */
  717. dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
  718. DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
  719. }
  720. if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
  721. dsi_write(msm_host, REG_DSI_LANE_CTRL,
  722. DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
  723. data |= DSI_CTRL_ENABLE;
  724. dsi_write(msm_host, REG_DSI_CTRL, data);
  725. }
  726. static void dsi_timing_setup(struct msm_dsi_host *msm_host)
  727. {
  728. struct drm_display_mode *mode = msm_host->mode;
  729. u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
  730. u32 h_total = mode->htotal;
  731. u32 v_total = mode->vtotal;
  732. u32 hs_end = mode->hsync_end - mode->hsync_start;
  733. u32 vs_end = mode->vsync_end - mode->vsync_start;
  734. u32 ha_start = h_total - mode->hsync_start;
  735. u32 ha_end = ha_start + mode->hdisplay;
  736. u32 va_start = v_total - mode->vsync_start;
  737. u32 va_end = va_start + mode->vdisplay;
  738. u32 wc;
  739. DBG("");
  740. if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
  741. dsi_write(msm_host, REG_DSI_ACTIVE_H,
  742. DSI_ACTIVE_H_START(ha_start) |
  743. DSI_ACTIVE_H_END(ha_end));
  744. dsi_write(msm_host, REG_DSI_ACTIVE_V,
  745. DSI_ACTIVE_V_START(va_start) |
  746. DSI_ACTIVE_V_END(va_end));
  747. dsi_write(msm_host, REG_DSI_TOTAL,
  748. DSI_TOTAL_H_TOTAL(h_total - 1) |
  749. DSI_TOTAL_V_TOTAL(v_total - 1));
  750. dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
  751. DSI_ACTIVE_HSYNC_START(hs_start) |
  752. DSI_ACTIVE_HSYNC_END(hs_end));
  753. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
  754. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
  755. DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
  756. DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
  757. } else { /* command mode */
  758. /* image data and 1 byte write_memory_start cmd */
  759. wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
  760. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
  761. DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
  762. DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
  763. msm_host->channel) |
  764. DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
  765. MIPI_DSI_DCS_LONG_WRITE));
  766. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
  767. DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
  768. DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
  769. }
  770. }
  771. static void dsi_sw_reset(struct msm_dsi_host *msm_host)
  772. {
  773. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  774. wmb(); /* clocks need to be enabled before reset */
  775. dsi_write(msm_host, REG_DSI_RESET, 1);
  776. wmb(); /* make sure reset happen */
  777. dsi_write(msm_host, REG_DSI_RESET, 0);
  778. }
  779. static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
  780. bool video_mode, bool enable)
  781. {
  782. u32 dsi_ctrl;
  783. dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
  784. if (!enable) {
  785. dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
  786. DSI_CTRL_CMD_MODE_EN);
  787. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
  788. DSI_IRQ_MASK_VIDEO_DONE, 0);
  789. } else {
  790. if (video_mode) {
  791. dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
  792. } else { /* command mode */
  793. dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
  794. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
  795. }
  796. dsi_ctrl |= DSI_CTRL_ENABLE;
  797. }
  798. dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
  799. }
  800. static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
  801. {
  802. u32 data;
  803. data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
  804. if (mode == 0)
  805. data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
  806. else
  807. data |= DSI_CMD_DMA_CTRL_LOW_POWER;
  808. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
  809. }
  810. static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
  811. {
  812. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
  813. reinit_completion(&msm_host->video_comp);
  814. wait_for_completion_timeout(&msm_host->video_comp,
  815. msecs_to_jiffies(70));
  816. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
  817. }
  818. static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
  819. {
  820. if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
  821. return;
  822. if (msm_host->power_on) {
  823. dsi_wait4video_done(msm_host);
  824. /* delay 4 ms to skip BLLP */
  825. usleep_range(2000, 4000);
  826. }
  827. }
  828. /* dsi_cmd */
  829. static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
  830. {
  831. struct drm_device *dev = msm_host->dev;
  832. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  833. int ret;
  834. u32 iova;
  835. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  836. mutex_lock(&dev->struct_mutex);
  837. msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
  838. if (IS_ERR(msm_host->tx_gem_obj)) {
  839. ret = PTR_ERR(msm_host->tx_gem_obj);
  840. pr_err("%s: failed to allocate gem, %d\n",
  841. __func__, ret);
  842. msm_host->tx_gem_obj = NULL;
  843. mutex_unlock(&dev->struct_mutex);
  844. return ret;
  845. }
  846. ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
  847. mutex_unlock(&dev->struct_mutex);
  848. if (ret) {
  849. pr_err("%s: failed to get iova, %d\n", __func__, ret);
  850. return ret;
  851. }
  852. if (iova & 0x07) {
  853. pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
  854. return -EINVAL;
  855. }
  856. msm_host->tx_size = msm_host->tx_gem_obj->size;
  857. } else {
  858. msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
  859. &msm_host->tx_buf_paddr, GFP_KERNEL);
  860. if (!msm_host->tx_buf) {
  861. ret = -ENOMEM;
  862. pr_err("%s: failed to allocate tx buf, %d\n",
  863. __func__, ret);
  864. return ret;
  865. }
  866. msm_host->tx_size = size;
  867. }
  868. return 0;
  869. }
  870. static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
  871. {
  872. struct drm_device *dev = msm_host->dev;
  873. if (msm_host->tx_gem_obj) {
  874. msm_gem_put_iova(msm_host->tx_gem_obj, 0);
  875. mutex_lock(&dev->struct_mutex);
  876. msm_gem_free_object(msm_host->tx_gem_obj);
  877. msm_host->tx_gem_obj = NULL;
  878. mutex_unlock(&dev->struct_mutex);
  879. }
  880. if (msm_host->tx_buf)
  881. dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
  882. msm_host->tx_buf_paddr);
  883. }
  884. /*
  885. * prepare cmd buffer to be txed
  886. */
  887. static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
  888. const struct mipi_dsi_msg *msg)
  889. {
  890. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  891. struct mipi_dsi_packet packet;
  892. int len;
  893. int ret;
  894. u8 *data;
  895. ret = mipi_dsi_create_packet(&packet, msg);
  896. if (ret) {
  897. pr_err("%s: create packet failed, %d\n", __func__, ret);
  898. return ret;
  899. }
  900. len = (packet.size + 3) & (~0x3);
  901. if (len > msm_host->tx_size) {
  902. pr_err("%s: packet size is too big\n", __func__);
  903. return -EINVAL;
  904. }
  905. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  906. data = msm_gem_vaddr(msm_host->tx_gem_obj);
  907. if (IS_ERR(data)) {
  908. ret = PTR_ERR(data);
  909. pr_err("%s: get vaddr failed, %d\n", __func__, ret);
  910. return ret;
  911. }
  912. } else {
  913. data = msm_host->tx_buf;
  914. }
  915. /* MSM specific command format in memory */
  916. data[0] = packet.header[1];
  917. data[1] = packet.header[2];
  918. data[2] = packet.header[0];
  919. data[3] = BIT(7); /* Last packet */
  920. if (mipi_dsi_packet_format_is_long(msg->type))
  921. data[3] |= BIT(6);
  922. if (msg->rx_buf && msg->rx_len)
  923. data[3] |= BIT(5);
  924. /* Long packet */
  925. if (packet.payload && packet.payload_length)
  926. memcpy(data + 4, packet.payload, packet.payload_length);
  927. /* Append 0xff to the end */
  928. if (packet.size < len)
  929. memset(data + packet.size, 0xff, len - packet.size);
  930. return len;
  931. }
  932. /*
  933. * dsi_short_read1_resp: 1 parameter
  934. */
  935. static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  936. {
  937. u8 *data = msg->rx_buf;
  938. if (data && (msg->rx_len >= 1)) {
  939. *data = buf[1]; /* strip out dcs type */
  940. return 1;
  941. } else {
  942. pr_err("%s: read data does not match with rx_buf len %zu\n",
  943. __func__, msg->rx_len);
  944. return -EINVAL;
  945. }
  946. }
  947. /*
  948. * dsi_short_read2_resp: 2 parameter
  949. */
  950. static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  951. {
  952. u8 *data = msg->rx_buf;
  953. if (data && (msg->rx_len >= 2)) {
  954. data[0] = buf[1]; /* strip out dcs type */
  955. data[1] = buf[2];
  956. return 2;
  957. } else {
  958. pr_err("%s: read data does not match with rx_buf len %zu\n",
  959. __func__, msg->rx_len);
  960. return -EINVAL;
  961. }
  962. }
  963. static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  964. {
  965. /* strip out 4 byte dcs header */
  966. if (msg->rx_buf && msg->rx_len)
  967. memcpy(msg->rx_buf, buf + 4, msg->rx_len);
  968. return msg->rx_len;
  969. }
  970. static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
  971. {
  972. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  973. int ret;
  974. u32 dma_base;
  975. bool triggered;
  976. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
  977. ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &dma_base);
  978. if (ret) {
  979. pr_err("%s: failed to get iova: %d\n", __func__, ret);
  980. return ret;
  981. }
  982. } else {
  983. dma_base = msm_host->tx_buf_paddr;
  984. }
  985. reinit_completion(&msm_host->dma_comp);
  986. dsi_wait4video_eng_busy(msm_host);
  987. triggered = msm_dsi_manager_cmd_xfer_trigger(
  988. msm_host->id, dma_base, len);
  989. if (triggered) {
  990. ret = wait_for_completion_timeout(&msm_host->dma_comp,
  991. msecs_to_jiffies(200));
  992. DBG("ret=%d", ret);
  993. if (ret == 0)
  994. ret = -ETIMEDOUT;
  995. else
  996. ret = len;
  997. } else
  998. ret = len;
  999. return ret;
  1000. }
  1001. static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
  1002. u8 *buf, int rx_byte, int pkt_size)
  1003. {
  1004. u32 *lp, *temp, data;
  1005. int i, j = 0, cnt;
  1006. u32 read_cnt;
  1007. u8 reg[16];
  1008. int repeated_bytes = 0;
  1009. int buf_offset = buf - msm_host->rx_buf;
  1010. lp = (u32 *)buf;
  1011. temp = (u32 *)reg;
  1012. cnt = (rx_byte + 3) >> 2;
  1013. if (cnt > 4)
  1014. cnt = 4; /* 4 x 32 bits registers only */
  1015. if (rx_byte == 4)
  1016. read_cnt = 4;
  1017. else
  1018. read_cnt = pkt_size + 6;
  1019. /*
  1020. * In case of multiple reads from the panel, after the first read, there
  1021. * is possibility that there are some bytes in the payload repeating in
  1022. * the RDBK_DATA registers. Since we read all the parameters from the
  1023. * panel right from the first byte for every pass. We need to skip the
  1024. * repeating bytes and then append the new parameters to the rx buffer.
  1025. */
  1026. if (read_cnt > 16) {
  1027. int bytes_shifted;
  1028. /* Any data more than 16 bytes will be shifted out.
  1029. * The temp read buffer should already contain these bytes.
  1030. * The remaining bytes in read buffer are the repeated bytes.
  1031. */
  1032. bytes_shifted = read_cnt - 16;
  1033. repeated_bytes = buf_offset - bytes_shifted;
  1034. }
  1035. for (i = cnt - 1; i >= 0; i--) {
  1036. data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
  1037. *temp++ = ntohl(data); /* to host byte order */
  1038. DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
  1039. }
  1040. for (i = repeated_bytes; i < 16; i++)
  1041. buf[j++] = reg[i];
  1042. return j;
  1043. }
  1044. static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
  1045. const struct mipi_dsi_msg *msg)
  1046. {
  1047. int len, ret;
  1048. int bllp_len = msm_host->mode->hdisplay *
  1049. dsi_get_bpp(msm_host->format) / 8;
  1050. len = dsi_cmd_dma_add(msm_host, msg);
  1051. if (!len) {
  1052. pr_err("%s: failed to add cmd type = 0x%x\n",
  1053. __func__, msg->type);
  1054. return -EINVAL;
  1055. }
  1056. /* for video mode, do not send cmds more than
  1057. * one pixel line, since it only transmit it
  1058. * during BLLP.
  1059. */
  1060. /* TODO: if the command is sent in LP mode, the bit rate is only
  1061. * half of esc clk rate. In this case, if the video is already
  1062. * actively streaming, we need to check more carefully if the
  1063. * command can be fit into one BLLP.
  1064. */
  1065. if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
  1066. pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
  1067. __func__, len);
  1068. return -EINVAL;
  1069. }
  1070. ret = dsi_cmd_dma_tx(msm_host, len);
  1071. if (ret < len) {
  1072. pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
  1073. __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
  1074. return -ECOMM;
  1075. }
  1076. return len;
  1077. }
  1078. static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
  1079. {
  1080. u32 data0, data1;
  1081. data0 = dsi_read(msm_host, REG_DSI_CTRL);
  1082. data1 = data0;
  1083. data1 &= ~DSI_CTRL_ENABLE;
  1084. dsi_write(msm_host, REG_DSI_CTRL, data1);
  1085. /*
  1086. * dsi controller need to be disabled before
  1087. * clocks turned on
  1088. */
  1089. wmb();
  1090. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  1091. wmb(); /* make sure clocks enabled */
  1092. /* dsi controller can only be reset while clocks are running */
  1093. dsi_write(msm_host, REG_DSI_RESET, 1);
  1094. wmb(); /* make sure reset happen */
  1095. dsi_write(msm_host, REG_DSI_RESET, 0);
  1096. wmb(); /* controller out of reset */
  1097. dsi_write(msm_host, REG_DSI_CTRL, data0);
  1098. wmb(); /* make sure dsi controller enabled again */
  1099. }
  1100. static void dsi_err_worker(struct work_struct *work)
  1101. {
  1102. struct msm_dsi_host *msm_host =
  1103. container_of(work, struct msm_dsi_host, err_work);
  1104. u32 status = msm_host->err_work_state;
  1105. pr_err_ratelimited("%s: status=%x\n", __func__, status);
  1106. if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
  1107. dsi_sw_reset_restore(msm_host);
  1108. /* It is safe to clear here because error irq is disabled. */
  1109. msm_host->err_work_state = 0;
  1110. /* enable dsi error interrupt */
  1111. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  1112. }
  1113. static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
  1114. {
  1115. u32 status;
  1116. status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
  1117. if (status) {
  1118. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
  1119. /* Writing of an extra 0 needed to clear error bits */
  1120. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
  1121. msm_host->err_work_state |= DSI_ERR_STATE_ACK;
  1122. }
  1123. }
  1124. static void dsi_timeout_status(struct msm_dsi_host *msm_host)
  1125. {
  1126. u32 status;
  1127. status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
  1128. if (status) {
  1129. dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
  1130. msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
  1131. }
  1132. }
  1133. static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
  1134. {
  1135. u32 status;
  1136. status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
  1137. if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
  1138. DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
  1139. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
  1140. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
  1141. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
  1142. dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
  1143. msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
  1144. }
  1145. }
  1146. static void dsi_fifo_status(struct msm_dsi_host *msm_host)
  1147. {
  1148. u32 status;
  1149. status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
  1150. /* fifo underflow, overflow */
  1151. if (status) {
  1152. dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
  1153. msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
  1154. if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
  1155. msm_host->err_work_state |=
  1156. DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
  1157. }
  1158. }
  1159. static void dsi_status(struct msm_dsi_host *msm_host)
  1160. {
  1161. u32 status;
  1162. status = dsi_read(msm_host, REG_DSI_STATUS0);
  1163. if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
  1164. dsi_write(msm_host, REG_DSI_STATUS0, status);
  1165. msm_host->err_work_state |=
  1166. DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
  1167. }
  1168. }
  1169. static void dsi_clk_status(struct msm_dsi_host *msm_host)
  1170. {
  1171. u32 status;
  1172. status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
  1173. if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
  1174. dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
  1175. msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
  1176. }
  1177. }
  1178. static void dsi_error(struct msm_dsi_host *msm_host)
  1179. {
  1180. /* disable dsi error interrupt */
  1181. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
  1182. dsi_clk_status(msm_host);
  1183. dsi_fifo_status(msm_host);
  1184. dsi_ack_err_status(msm_host);
  1185. dsi_timeout_status(msm_host);
  1186. dsi_status(msm_host);
  1187. dsi_dln0_phy_err(msm_host);
  1188. queue_work(msm_host->workqueue, &msm_host->err_work);
  1189. }
  1190. static irqreturn_t dsi_host_irq(int irq, void *ptr)
  1191. {
  1192. struct msm_dsi_host *msm_host = ptr;
  1193. u32 isr;
  1194. unsigned long flags;
  1195. if (!msm_host->ctrl_base)
  1196. return IRQ_HANDLED;
  1197. spin_lock_irqsave(&msm_host->intr_lock, flags);
  1198. isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  1199. dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
  1200. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  1201. DBG("isr=0x%x, id=%d", isr, msm_host->id);
  1202. if (isr & DSI_IRQ_ERROR)
  1203. dsi_error(msm_host);
  1204. if (isr & DSI_IRQ_VIDEO_DONE)
  1205. complete(&msm_host->video_comp);
  1206. if (isr & DSI_IRQ_CMD_DMA_DONE)
  1207. complete(&msm_host->dma_comp);
  1208. return IRQ_HANDLED;
  1209. }
  1210. static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
  1211. struct device *panel_device)
  1212. {
  1213. msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
  1214. "disp-enable",
  1215. GPIOD_OUT_LOW);
  1216. if (IS_ERR(msm_host->disp_en_gpio)) {
  1217. DBG("cannot get disp-enable-gpios %ld",
  1218. PTR_ERR(msm_host->disp_en_gpio));
  1219. return PTR_ERR(msm_host->disp_en_gpio);
  1220. }
  1221. msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
  1222. GPIOD_IN);
  1223. if (IS_ERR(msm_host->te_gpio)) {
  1224. DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
  1225. return PTR_ERR(msm_host->te_gpio);
  1226. }
  1227. return 0;
  1228. }
  1229. static int dsi_host_attach(struct mipi_dsi_host *host,
  1230. struct mipi_dsi_device *dsi)
  1231. {
  1232. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1233. int ret;
  1234. msm_host->channel = dsi->channel;
  1235. msm_host->lanes = dsi->lanes;
  1236. msm_host->format = dsi->format;
  1237. msm_host->mode_flags = dsi->mode_flags;
  1238. /* Some gpios defined in panel DT need to be controlled by host */
  1239. ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
  1240. if (ret)
  1241. return ret;
  1242. DBG("id=%d", msm_host->id);
  1243. if (msm_host->dev)
  1244. drm_helper_hpd_irq_event(msm_host->dev);
  1245. return 0;
  1246. }
  1247. static int dsi_host_detach(struct mipi_dsi_host *host,
  1248. struct mipi_dsi_device *dsi)
  1249. {
  1250. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1251. msm_host->device_node = NULL;
  1252. DBG("id=%d", msm_host->id);
  1253. if (msm_host->dev)
  1254. drm_helper_hpd_irq_event(msm_host->dev);
  1255. return 0;
  1256. }
  1257. static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
  1258. const struct mipi_dsi_msg *msg)
  1259. {
  1260. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1261. int ret;
  1262. if (!msg || !msm_host->power_on)
  1263. return -EINVAL;
  1264. mutex_lock(&msm_host->cmd_mutex);
  1265. ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
  1266. mutex_unlock(&msm_host->cmd_mutex);
  1267. return ret;
  1268. }
  1269. static struct mipi_dsi_host_ops dsi_host_ops = {
  1270. .attach = dsi_host_attach,
  1271. .detach = dsi_host_detach,
  1272. .transfer = dsi_host_transfer,
  1273. };
  1274. static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
  1275. {
  1276. struct device *dev = &msm_host->pdev->dev;
  1277. struct device_node *np = dev->of_node;
  1278. struct device_node *endpoint, *device_node;
  1279. int ret;
  1280. ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
  1281. if (ret) {
  1282. dev_err(dev, "%s: host index not specified, ret=%d\n",
  1283. __func__, ret);
  1284. return ret;
  1285. }
  1286. /*
  1287. * Get the first endpoint node. In our case, dsi has one output port
  1288. * to which the panel is connected. Don't return an error if a port
  1289. * isn't defined. It's possible that there is nothing connected to
  1290. * the dsi output.
  1291. */
  1292. endpoint = of_graph_get_next_endpoint(np, NULL);
  1293. if (!endpoint) {
  1294. dev_dbg(dev, "%s: no endpoint\n", __func__);
  1295. return 0;
  1296. }
  1297. /* Get panel node from the output port's endpoint data */
  1298. device_node = of_graph_get_remote_port_parent(endpoint);
  1299. if (!device_node) {
  1300. dev_err(dev, "%s: no valid device\n", __func__);
  1301. of_node_put(endpoint);
  1302. return -ENODEV;
  1303. }
  1304. of_node_put(endpoint);
  1305. of_node_put(device_node);
  1306. msm_host->device_node = device_node;
  1307. if (of_property_read_bool(np, "syscon-sfpb")) {
  1308. msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
  1309. "syscon-sfpb");
  1310. if (IS_ERR(msm_host->sfpb)) {
  1311. dev_err(dev, "%s: failed to get sfpb regmap\n",
  1312. __func__);
  1313. return PTR_ERR(msm_host->sfpb);
  1314. }
  1315. }
  1316. return 0;
  1317. }
  1318. int msm_dsi_host_init(struct msm_dsi *msm_dsi)
  1319. {
  1320. struct msm_dsi_host *msm_host = NULL;
  1321. struct platform_device *pdev = msm_dsi->pdev;
  1322. int ret;
  1323. msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
  1324. if (!msm_host) {
  1325. pr_err("%s: FAILED: cannot alloc dsi host\n",
  1326. __func__);
  1327. ret = -ENOMEM;
  1328. goto fail;
  1329. }
  1330. msm_host->pdev = pdev;
  1331. ret = dsi_host_parse_dt(msm_host);
  1332. if (ret) {
  1333. pr_err("%s: failed to parse dt\n", __func__);
  1334. goto fail;
  1335. }
  1336. msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
  1337. if (IS_ERR(msm_host->ctrl_base)) {
  1338. pr_err("%s: unable to map Dsi ctrl base\n", __func__);
  1339. ret = PTR_ERR(msm_host->ctrl_base);
  1340. goto fail;
  1341. }
  1342. msm_host->cfg_hnd = dsi_get_config(msm_host);
  1343. if (!msm_host->cfg_hnd) {
  1344. ret = -EINVAL;
  1345. pr_err("%s: get config failed\n", __func__);
  1346. goto fail;
  1347. }
  1348. /* fixup base address by io offset */
  1349. msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
  1350. ret = dsi_regulator_init(msm_host);
  1351. if (ret) {
  1352. pr_err("%s: regulator init failed\n", __func__);
  1353. goto fail;
  1354. }
  1355. ret = dsi_clk_init(msm_host);
  1356. if (ret) {
  1357. pr_err("%s: unable to initialize dsi clks\n", __func__);
  1358. goto fail;
  1359. }
  1360. msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
  1361. if (!msm_host->rx_buf) {
  1362. pr_err("%s: alloc rx temp buf failed\n", __func__);
  1363. goto fail;
  1364. }
  1365. init_completion(&msm_host->dma_comp);
  1366. init_completion(&msm_host->video_comp);
  1367. mutex_init(&msm_host->dev_mutex);
  1368. mutex_init(&msm_host->cmd_mutex);
  1369. mutex_init(&msm_host->clk_mutex);
  1370. spin_lock_init(&msm_host->intr_lock);
  1371. /* setup workqueue */
  1372. msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
  1373. INIT_WORK(&msm_host->err_work, dsi_err_worker);
  1374. msm_dsi->host = &msm_host->base;
  1375. msm_dsi->id = msm_host->id;
  1376. DBG("Dsi Host %d initialized", msm_host->id);
  1377. return 0;
  1378. fail:
  1379. return ret;
  1380. }
  1381. void msm_dsi_host_destroy(struct mipi_dsi_host *host)
  1382. {
  1383. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1384. DBG("");
  1385. dsi_tx_buf_free(msm_host);
  1386. if (msm_host->workqueue) {
  1387. flush_workqueue(msm_host->workqueue);
  1388. destroy_workqueue(msm_host->workqueue);
  1389. msm_host->workqueue = NULL;
  1390. }
  1391. mutex_destroy(&msm_host->clk_mutex);
  1392. mutex_destroy(&msm_host->cmd_mutex);
  1393. mutex_destroy(&msm_host->dev_mutex);
  1394. }
  1395. int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
  1396. struct drm_device *dev)
  1397. {
  1398. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1399. struct platform_device *pdev = msm_host->pdev;
  1400. int ret;
  1401. msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
  1402. if (msm_host->irq < 0) {
  1403. ret = msm_host->irq;
  1404. dev_err(dev->dev, "failed to get irq: %d\n", ret);
  1405. return ret;
  1406. }
  1407. ret = devm_request_irq(&pdev->dev, msm_host->irq,
  1408. dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  1409. "dsi_isr", msm_host);
  1410. if (ret < 0) {
  1411. dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
  1412. msm_host->irq, ret);
  1413. return ret;
  1414. }
  1415. msm_host->dev = dev;
  1416. ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
  1417. if (ret) {
  1418. pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
  1419. return ret;
  1420. }
  1421. return 0;
  1422. }
  1423. int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
  1424. {
  1425. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1426. int ret;
  1427. /* Register mipi dsi host */
  1428. if (!msm_host->registered) {
  1429. host->dev = &msm_host->pdev->dev;
  1430. host->ops = &dsi_host_ops;
  1431. ret = mipi_dsi_host_register(host);
  1432. if (ret)
  1433. return ret;
  1434. msm_host->registered = true;
  1435. /* If the panel driver has not been probed after host register,
  1436. * we should defer the host's probe.
  1437. * It makes sure panel is connected when fbcon detects
  1438. * connector status and gets the proper display mode to
  1439. * create framebuffer.
  1440. * Don't try to defer if there is nothing connected to the dsi
  1441. * output
  1442. */
  1443. if (check_defer && msm_host->device_node) {
  1444. if (!of_drm_find_panel(msm_host->device_node))
  1445. if (!of_drm_find_bridge(msm_host->device_node))
  1446. return -EPROBE_DEFER;
  1447. }
  1448. }
  1449. return 0;
  1450. }
  1451. void msm_dsi_host_unregister(struct mipi_dsi_host *host)
  1452. {
  1453. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1454. if (msm_host->registered) {
  1455. mipi_dsi_host_unregister(host);
  1456. host->dev = NULL;
  1457. host->ops = NULL;
  1458. msm_host->registered = false;
  1459. }
  1460. }
  1461. int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
  1462. const struct mipi_dsi_msg *msg)
  1463. {
  1464. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1465. /* TODO: make sure dsi_cmd_mdp is idle.
  1466. * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
  1467. * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
  1468. * How to handle the old versions? Wait for mdp cmd done?
  1469. */
  1470. /*
  1471. * mdss interrupt is generated in mdp core clock domain
  1472. * mdp clock need to be enabled to receive dsi interrupt
  1473. */
  1474. dsi_clk_ctrl(msm_host, 1);
  1475. /* TODO: vote for bus bandwidth */
  1476. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1477. dsi_set_tx_power_mode(0, msm_host);
  1478. msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
  1479. dsi_write(msm_host, REG_DSI_CTRL,
  1480. msm_host->dma_cmd_ctrl_restore |
  1481. DSI_CTRL_CMD_MODE_EN |
  1482. DSI_CTRL_ENABLE);
  1483. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
  1484. return 0;
  1485. }
  1486. void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
  1487. const struct mipi_dsi_msg *msg)
  1488. {
  1489. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1490. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
  1491. dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
  1492. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1493. dsi_set_tx_power_mode(1, msm_host);
  1494. /* TODO: unvote for bus bandwidth */
  1495. dsi_clk_ctrl(msm_host, 0);
  1496. }
  1497. int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
  1498. const struct mipi_dsi_msg *msg)
  1499. {
  1500. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1501. return dsi_cmds2buf_tx(msm_host, msg);
  1502. }
  1503. int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
  1504. const struct mipi_dsi_msg *msg)
  1505. {
  1506. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1507. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1508. int data_byte, rx_byte, dlen, end;
  1509. int short_response, diff, pkt_size, ret = 0;
  1510. char cmd;
  1511. int rlen = msg->rx_len;
  1512. u8 *buf;
  1513. if (rlen <= 2) {
  1514. short_response = 1;
  1515. pkt_size = rlen;
  1516. rx_byte = 4;
  1517. } else {
  1518. short_response = 0;
  1519. data_byte = 10; /* first read */
  1520. if (rlen < data_byte)
  1521. pkt_size = rlen;
  1522. else
  1523. pkt_size = data_byte;
  1524. rx_byte = data_byte + 6; /* 4 header + 2 crc */
  1525. }
  1526. buf = msm_host->rx_buf;
  1527. end = 0;
  1528. while (!end) {
  1529. u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
  1530. struct mipi_dsi_msg max_pkt_size_msg = {
  1531. .channel = msg->channel,
  1532. .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
  1533. .tx_len = 2,
  1534. .tx_buf = tx,
  1535. };
  1536. DBG("rlen=%d pkt_size=%d rx_byte=%d",
  1537. rlen, pkt_size, rx_byte);
  1538. ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
  1539. if (ret < 2) {
  1540. pr_err("%s: Set max pkt size failed, %d\n",
  1541. __func__, ret);
  1542. return -EINVAL;
  1543. }
  1544. if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
  1545. (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
  1546. /* Clear the RDBK_DATA registers */
  1547. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
  1548. DSI_RDBK_DATA_CTRL_CLR);
  1549. wmb(); /* make sure the RDBK registers are cleared */
  1550. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
  1551. wmb(); /* release cleared status before transfer */
  1552. }
  1553. ret = dsi_cmds2buf_tx(msm_host, msg);
  1554. if (ret < msg->tx_len) {
  1555. pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
  1556. return ret;
  1557. }
  1558. /*
  1559. * once cmd_dma_done interrupt received,
  1560. * return data from client is ready and stored
  1561. * at RDBK_DATA register already
  1562. * since rx fifo is 16 bytes, dcs header is kept at first loop,
  1563. * after that dcs header lost during shift into registers
  1564. */
  1565. dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
  1566. if (dlen <= 0)
  1567. return 0;
  1568. if (short_response)
  1569. break;
  1570. if (rlen <= data_byte) {
  1571. diff = data_byte - rlen;
  1572. end = 1;
  1573. } else {
  1574. diff = 0;
  1575. rlen -= data_byte;
  1576. }
  1577. if (!end) {
  1578. dlen -= 2; /* 2 crc */
  1579. dlen -= diff;
  1580. buf += dlen; /* next start position */
  1581. data_byte = 14; /* NOT first read */
  1582. if (rlen < data_byte)
  1583. pkt_size += rlen;
  1584. else
  1585. pkt_size += data_byte;
  1586. DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
  1587. }
  1588. }
  1589. /*
  1590. * For single Long read, if the requested rlen < 10,
  1591. * we need to shift the start position of rx
  1592. * data buffer to skip the bytes which are not
  1593. * updated.
  1594. */
  1595. if (pkt_size < 10 && !short_response)
  1596. buf = msm_host->rx_buf + (10 - rlen);
  1597. else
  1598. buf = msm_host->rx_buf;
  1599. cmd = buf[0];
  1600. switch (cmd) {
  1601. case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
  1602. pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
  1603. ret = 0;
  1604. break;
  1605. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
  1606. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
  1607. ret = dsi_short_read1_resp(buf, msg);
  1608. break;
  1609. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
  1610. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
  1611. ret = dsi_short_read2_resp(buf, msg);
  1612. break;
  1613. case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
  1614. case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
  1615. ret = dsi_long_read_resp(buf, msg);
  1616. break;
  1617. default:
  1618. pr_warn("%s:Invalid response cmd\n", __func__);
  1619. ret = 0;
  1620. }
  1621. return ret;
  1622. }
  1623. void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
  1624. u32 len)
  1625. {
  1626. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1627. dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
  1628. dsi_write(msm_host, REG_DSI_DMA_LEN, len);
  1629. dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
  1630. /* Make sure trigger happens */
  1631. wmb();
  1632. }
  1633. int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
  1634. struct msm_dsi_pll *src_pll)
  1635. {
  1636. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1637. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1638. struct clk *byte_clk_provider, *pixel_clk_provider;
  1639. int ret;
  1640. ret = msm_dsi_pll_get_clk_provider(src_pll,
  1641. &byte_clk_provider, &pixel_clk_provider);
  1642. if (ret) {
  1643. pr_info("%s: can't get provider from pll, don't set parent\n",
  1644. __func__);
  1645. return 0;
  1646. }
  1647. ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
  1648. if (ret) {
  1649. pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
  1650. __func__, ret);
  1651. goto exit;
  1652. }
  1653. ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
  1654. if (ret) {
  1655. pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
  1656. __func__, ret);
  1657. goto exit;
  1658. }
  1659. if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
  1660. ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
  1661. if (ret) {
  1662. pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
  1663. __func__, ret);
  1664. goto exit;
  1665. }
  1666. ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
  1667. if (ret) {
  1668. pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
  1669. __func__, ret);
  1670. goto exit;
  1671. }
  1672. }
  1673. exit:
  1674. return ret;
  1675. }
  1676. int msm_dsi_host_enable(struct mipi_dsi_host *host)
  1677. {
  1678. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1679. dsi_op_mode_config(msm_host,
  1680. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
  1681. /* TODO: clock should be turned off for command mode,
  1682. * and only turned on before MDP START.
  1683. * This part of code should be enabled once mdp driver support it.
  1684. */
  1685. /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
  1686. dsi_clk_ctrl(msm_host, 0); */
  1687. return 0;
  1688. }
  1689. int msm_dsi_host_disable(struct mipi_dsi_host *host)
  1690. {
  1691. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1692. dsi_op_mode_config(msm_host,
  1693. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
  1694. /* Since we have disabled INTF, the video engine won't stop so that
  1695. * the cmd engine will be blocked.
  1696. * Reset to disable video engine so that we can send off cmd.
  1697. */
  1698. dsi_sw_reset(msm_host);
  1699. return 0;
  1700. }
  1701. static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
  1702. {
  1703. enum sfpb_ahb_arb_master_port_en en;
  1704. if (!msm_host->sfpb)
  1705. return;
  1706. en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
  1707. regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
  1708. SFPB_GPREG_MASTER_PORT_EN__MASK,
  1709. SFPB_GPREG_MASTER_PORT_EN(en));
  1710. }
  1711. int msm_dsi_host_power_on(struct mipi_dsi_host *host)
  1712. {
  1713. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1714. u32 clk_pre = 0, clk_post = 0;
  1715. int ret = 0;
  1716. mutex_lock(&msm_host->dev_mutex);
  1717. if (msm_host->power_on) {
  1718. DBG("dsi host already on");
  1719. goto unlock_ret;
  1720. }
  1721. msm_dsi_sfpb_config(msm_host, true);
  1722. ret = dsi_calc_clk_rate(msm_host);
  1723. if (ret) {
  1724. pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
  1725. goto unlock_ret;
  1726. }
  1727. ret = dsi_host_regulator_enable(msm_host);
  1728. if (ret) {
  1729. pr_err("%s:Failed to enable vregs.ret=%d\n",
  1730. __func__, ret);
  1731. goto unlock_ret;
  1732. }
  1733. ret = dsi_bus_clk_enable(msm_host);
  1734. if (ret) {
  1735. pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
  1736. goto fail_disable_reg;
  1737. }
  1738. dsi_phy_sw_reset(msm_host);
  1739. ret = msm_dsi_manager_phy_enable(msm_host->id,
  1740. msm_host->byte_clk_rate * 8,
  1741. msm_host->esc_clk_rate,
  1742. &clk_pre, &clk_post);
  1743. dsi_bus_clk_disable(msm_host);
  1744. if (ret) {
  1745. pr_err("%s: failed to enable phy, %d\n", __func__, ret);
  1746. goto fail_disable_reg;
  1747. }
  1748. ret = dsi_clk_ctrl(msm_host, 1);
  1749. if (ret) {
  1750. pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
  1751. goto fail_disable_reg;
  1752. }
  1753. ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
  1754. if (ret) {
  1755. pr_err("%s: failed to set pinctrl default state, %d\n",
  1756. __func__, ret);
  1757. goto fail_disable_clk;
  1758. }
  1759. dsi_timing_setup(msm_host);
  1760. dsi_sw_reset(msm_host);
  1761. dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
  1762. if (msm_host->disp_en_gpio)
  1763. gpiod_set_value(msm_host->disp_en_gpio, 1);
  1764. msm_host->power_on = true;
  1765. mutex_unlock(&msm_host->dev_mutex);
  1766. return 0;
  1767. fail_disable_clk:
  1768. dsi_clk_ctrl(msm_host, 0);
  1769. fail_disable_reg:
  1770. dsi_host_regulator_disable(msm_host);
  1771. unlock_ret:
  1772. mutex_unlock(&msm_host->dev_mutex);
  1773. return ret;
  1774. }
  1775. int msm_dsi_host_power_off(struct mipi_dsi_host *host)
  1776. {
  1777. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1778. mutex_lock(&msm_host->dev_mutex);
  1779. if (!msm_host->power_on) {
  1780. DBG("dsi host already off");
  1781. goto unlock_ret;
  1782. }
  1783. dsi_ctrl_config(msm_host, false, 0, 0);
  1784. if (msm_host->disp_en_gpio)
  1785. gpiod_set_value(msm_host->disp_en_gpio, 0);
  1786. pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
  1787. msm_dsi_manager_phy_disable(msm_host->id);
  1788. dsi_clk_ctrl(msm_host, 0);
  1789. dsi_host_regulator_disable(msm_host);
  1790. msm_dsi_sfpb_config(msm_host, false);
  1791. DBG("-");
  1792. msm_host->power_on = false;
  1793. unlock_ret:
  1794. mutex_unlock(&msm_host->dev_mutex);
  1795. return 0;
  1796. }
  1797. int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
  1798. struct drm_display_mode *mode)
  1799. {
  1800. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1801. if (msm_host->mode) {
  1802. drm_mode_destroy(msm_host->dev, msm_host->mode);
  1803. msm_host->mode = NULL;
  1804. }
  1805. msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
  1806. if (IS_ERR(msm_host->mode)) {
  1807. pr_err("%s: cannot duplicate mode\n", __func__);
  1808. return PTR_ERR(msm_host->mode);
  1809. }
  1810. return 0;
  1811. }
  1812. struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
  1813. unsigned long *panel_flags)
  1814. {
  1815. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1816. struct drm_panel *panel;
  1817. panel = of_drm_find_panel(msm_host->device_node);
  1818. if (panel_flags)
  1819. *panel_flags = msm_host->mode_flags;
  1820. return panel;
  1821. }
  1822. struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
  1823. {
  1824. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1825. return of_drm_find_bridge(msm_host->device_node);
  1826. }