cdns-mhdp.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Cadence MHDP DP bridge driver.
  4. *
  5. * Copyright: 2018 Cadence Design Systems, Inc.
  6. *
  7. * Author: Quentin Schulz <quentin.schulz@free-electrons.com>
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/err.h>
  12. #include <linux/firmware.h>
  13. #include <linux/io.h>
  14. #include <linux/iopoll.h>
  15. #include <linux/module.h>
  16. #include <linux/of.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/slab.h>
  19. #include <linux/phy/phy.h>
  20. #include <linux/phy/phy-dp.h>
  21. #include <drm/drm_atomic_helper.h>
  22. #include <drm/drm_bridge.h>
  23. #include <drm/drm_connector.h>
  24. #include <drm/drm_crtc_helper.h>
  25. #include <drm/drm_dp_helper.h>
  26. #include <drm/drm_modeset_helper_vtables.h>
  27. #include <drm/drm_print.h>
  28. #include <drm/drm_crtc_helper.h>
  29. #include <linux/irq.h>
  30. #include <linux/of_irq.h>
  31. #include <asm/unaligned.h>
  32. #include "cdns-mhdp.h"
  33. #include "cdns-mhdp-j721e.h"
  34. /* CDNS MHDP Helpers */
  35. #define MAILBOX_RETRY_US 1000
  36. #define MAILBOX_TIMEOUT_US 5000000
  37. /* mailbox */
  38. #define MB_OPCODE_ID 0
  39. #define MB_MODULE_ID 1
  40. #define MB_SIZE_MSB_ID 2
  41. #define MB_SIZE_LSB_ID 3
  42. #define MB_DATA_ID 4
  43. #define MB_MODULE_ID_DP_TX 0x01
  44. #define MB_MODULE_ID_HDCP_TX 0x07
  45. #define MB_MODULE_ID_HDCP_RX 0x08
  46. #define MB_MODULE_ID_HDCP_GENERAL 0x09
  47. #define MB_MODULE_ID_GENERAL 0x0a
  48. /* general opcode */
  49. #define GENERAL_MAIN_CONTROL 0x01
  50. #define GENERAL_TEST_ECHO 0x02
  51. #define GENERAL_BUS_SETTINGS 0x03
  52. #define GENERAL_TEST_ACCESS 0x04
  53. #define GENERAL_REGISTER_READ 0x07
  54. #define DPTX_SET_POWER_MNG 0x00
  55. #define DPTX_SET_HOST_CAPABILITIES 0x01
  56. #define DPTX_GET_EDID 0x02
  57. #define DPTX_READ_DPCD 0x03
  58. #define DPTX_WRITE_DPCD 0x04
  59. #define DPTX_ENABLE_EVENT 0x05
  60. #define DPTX_WRITE_REGISTER 0x06
  61. #define DPTX_READ_REGISTER 0x07
  62. #define DPTX_WRITE_FIELD 0x08
  63. #define DPTX_TRAINING_CONTROL 0x09
  64. #define DPTX_READ_EVENT 0x0a
  65. #define DPTX_READ_LINK_STAT 0x0b
  66. #define DPTX_SET_VIDEO 0x0c
  67. #define DPTX_SET_AUDIO 0x0d
  68. #define DPTX_GET_LAST_AUX_STAUS 0x0e
  69. #define DPTX_SET_LINK_BREAK_POINT 0x0f
  70. #define DPTX_FORCE_LANES 0x10
  71. #define DPTX_HPD_STATE 0x11
  72. #define DPTX_ADJUST_LT 0x12
  73. #define FW_STANDBY 0
  74. #define FW_ACTIVE 1
  75. static inline u32 get_unaligned_be24(const void *p)
  76. {
  77. const u8 *_p = p;
  78. return _p[0] << 16 | _p[1] << 8 | _p[2];
  79. }
  80. static inline void put_unaligned_be24(u32 val, void *p)
  81. {
  82. u8 *_p = p;
  83. _p[0] = val >> 16;
  84. _p[1] = val >> 8;
  85. _p[2] = val;
  86. }
  87. static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
  88. {
  89. int val, ret;
  90. ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
  91. val, !val, MAILBOX_RETRY_US,
  92. MAILBOX_TIMEOUT_US);
  93. if (ret < 0)
  94. return ret;
  95. return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
  96. }
  97. static int cdp_dp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
  98. {
  99. int ret, full;
  100. ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
  101. full, !full, MAILBOX_RETRY_US,
  102. MAILBOX_TIMEOUT_US);
  103. if (ret < 0)
  104. return ret;
  105. writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
  106. return 0;
  107. }
  108. static int cdns_mhdp_mailbox_validate_receive(struct cdns_mhdp_device *mhdp,
  109. u8 module_id, u8 opcode,
  110. u16 req_size)
  111. {
  112. u32 mbox_size, i;
  113. u8 header[4];
  114. int ret;
  115. /* read the header of the message */
  116. for (i = 0; i < 4; i++) {
  117. ret = cdns_mhdp_mailbox_read(mhdp);
  118. if (ret < 0)
  119. return ret;
  120. header[i] = ret;
  121. }
  122. mbox_size = get_unaligned_be16(header + 2);
  123. if (opcode != header[0] || module_id != header[1] ||
  124. req_size != mbox_size) {
  125. /*
  126. * If the message in mailbox is not what we want, we need to
  127. * clear the mailbox by reading its contents.
  128. */
  129. for (i = 0; i < mbox_size; i++)
  130. if (cdns_mhdp_mailbox_read(mhdp) < 0)
  131. break;
  132. return -EINVAL;
  133. }
  134. return 0;
  135. }
  136. static int cdns_mhdp_mailbox_read_receive(struct cdns_mhdp_device *mhdp,
  137. u8 *buff, u16 buff_size)
  138. {
  139. u32 i;
  140. int ret;
  141. for (i = 0; i < buff_size; i++) {
  142. ret = cdns_mhdp_mailbox_read(mhdp);
  143. if (ret < 0)
  144. return ret;
  145. buff[i] = ret;
  146. }
  147. return 0;
  148. }
  149. static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
  150. u8 opcode, u16 size, u8 *message)
  151. {
  152. u8 header[4];
  153. int ret, i;
  154. header[0] = opcode;
  155. header[1] = module_id;
  156. put_unaligned_be16(size, header + 2);
  157. for (i = 0; i < 4; i++) {
  158. ret = cdp_dp_mailbox_write(mhdp, header[i]);
  159. if (ret)
  160. return ret;
  161. }
  162. for (i = 0; i < size; i++) {
  163. ret = cdp_dp_mailbox_write(mhdp, message[i]);
  164. if (ret)
  165. return ret;
  166. }
  167. return 0;
  168. }
  169. static
  170. int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
  171. {
  172. u8 msg[4], resp[8];
  173. int ret;
  174. if (addr == 0) {
  175. ret = -EINVAL;
  176. goto err_reg_read;
  177. }
  178. put_unaligned_be32(addr, msg);
  179. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
  180. GENERAL_REGISTER_READ,
  181. sizeof(msg), msg);
  182. if (ret)
  183. goto err_reg_read;
  184. ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_GENERAL,
  185. GENERAL_REGISTER_READ,
  186. sizeof(resp));
  187. if (ret)
  188. goto err_reg_read;
  189. ret = cdns_mhdp_mailbox_read_receive(mhdp, resp, sizeof(resp));
  190. if (ret)
  191. goto err_reg_read;
  192. /* Returned address value should be the same as requested */
  193. if (memcmp(msg, resp, sizeof(msg))) {
  194. ret = -EINVAL;
  195. goto err_reg_read;
  196. }
  197. *value = get_unaligned_be32(resp + 4);
  198. err_reg_read:
  199. if (ret) {
  200. DRM_DEV_ERROR(mhdp->dev, "Failed to read register.\n");
  201. *value = 0;
  202. }
  203. return ret;
  204. }
  205. static
  206. int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
  207. {
  208. u8 msg[6];
  209. put_unaligned_be16(addr, msg);
  210. put_unaligned_be32(val, msg + 2);
  211. return cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  212. DPTX_WRITE_REGISTER, sizeof(msg), msg);
  213. }
  214. static
  215. int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
  216. u8 start_bit, u8 bits_no, u32 val)
  217. {
  218. u8 field[8];
  219. put_unaligned_be16(addr, field);
  220. field[2] = start_bit;
  221. field[3] = bits_no;
  222. put_unaligned_be32(val, field + 4);
  223. return cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  224. DPTX_WRITE_FIELD, sizeof(field), field);
  225. }
  226. static
  227. int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
  228. u32 addr, u8 *data, u16 len)
  229. {
  230. u8 msg[5], reg[5];
  231. int ret;
  232. put_unaligned_be16(len, msg);
  233. put_unaligned_be24(addr, msg + 2);
  234. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  235. DPTX_READ_DPCD, sizeof(msg), msg);
  236. if (ret)
  237. goto err_dpcd_read;
  238. ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
  239. DPTX_READ_DPCD,
  240. sizeof(reg) + len);
  241. if (ret)
  242. goto err_dpcd_read;
  243. ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
  244. if (ret)
  245. goto err_dpcd_read;
  246. ret = cdns_mhdp_mailbox_read_receive(mhdp, data, len);
  247. err_dpcd_read:
  248. return ret;
  249. }
  250. static
  251. int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
  252. {
  253. u8 msg[6], reg[5];
  254. int ret;
  255. put_unaligned_be16(1, msg);
  256. put_unaligned_be24(addr, msg + 2);
  257. msg[5] = value;
  258. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  259. DPTX_WRITE_DPCD, sizeof(msg), msg);
  260. if (ret)
  261. goto err_dpcd_write;
  262. ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
  263. DPTX_WRITE_DPCD, sizeof(reg));
  264. if (ret)
  265. goto err_dpcd_write;
  266. ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
  267. if (ret)
  268. goto err_dpcd_write;
  269. if (addr != get_unaligned_be24(reg + 2))
  270. ret = -EINVAL;
  271. err_dpcd_write:
  272. if (ret)
  273. DRM_DEV_ERROR(mhdp->dev, "dpcd write failed: %d\n", ret);
  274. return ret;
  275. }
  276. static
  277. int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
  278. {
  279. u8 msg[5];
  280. int ret, i;
  281. msg[0] = GENERAL_MAIN_CONTROL;
  282. msg[1] = MB_MODULE_ID_GENERAL;
  283. msg[2] = 0;
  284. msg[3] = 1;
  285. msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
  286. for (i = 0; i < sizeof(msg); i++) {
  287. ret = cdp_dp_mailbox_write(mhdp, msg[i]);
  288. if (ret)
  289. goto err_set_firmware_active;
  290. }
  291. /* read the firmware state */
  292. for (i = 0; i < sizeof(msg); i++) {
  293. ret = cdns_mhdp_mailbox_read(mhdp);
  294. if (ret < 0)
  295. goto err_set_firmware_active;
  296. msg[i] = ret;
  297. }
  298. ret = 0;
  299. err_set_firmware_active:
  300. if (ret < 0)
  301. DRM_DEV_ERROR(mhdp->dev, "set firmware active failed\n");
  302. return ret;
  303. }
  304. static
  305. int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
  306. {
  307. u8 status;
  308. int ret;
  309. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  310. DPTX_HPD_STATE, 0, NULL);
  311. if (ret)
  312. goto err_get_hpd;
  313. ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
  314. DPTX_HPD_STATE,
  315. sizeof(status));
  316. if (ret)
  317. goto err_get_hpd;
  318. ret = cdns_mhdp_mailbox_read_receive(mhdp, &status, sizeof(status));
  319. if (ret)
  320. goto err_get_hpd;
  321. return status;
  322. err_get_hpd:
  323. DRM_DEV_ERROR(mhdp->dev, "get hpd status failed: %d\n", ret);
  324. return ret;
  325. }
  326. static
  327. int cdns_mhdp_get_edid_block(void *data, u8 *edid,
  328. unsigned int block, size_t length)
  329. {
  330. struct cdns_mhdp_device *mhdp = data;
  331. u8 msg[2], reg[2], i;
  332. int ret;
  333. for (i = 0; i < 4; i++) {
  334. msg[0] = block / 2;
  335. msg[1] = block % 2;
  336. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  337. DPTX_GET_EDID, sizeof(msg), msg);
  338. if (ret)
  339. continue;
  340. ret = cdns_mhdp_mailbox_validate_receive(mhdp,
  341. MB_MODULE_ID_DP_TX,
  342. DPTX_GET_EDID,
  343. sizeof(reg) + length);
  344. if (ret)
  345. continue;
  346. ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
  347. if (ret)
  348. continue;
  349. ret = cdns_mhdp_mailbox_read_receive(mhdp, edid, length);
  350. if (ret)
  351. continue;
  352. if (reg[0] == length && reg[1] == block / 2)
  353. break;
  354. }
  355. if (ret)
  356. DRM_DEV_ERROR(mhdp->dev, "get block[%d] edid failed: %d\n",
  357. block, ret);
  358. return ret;
  359. }
  360. static __maybe_unused
  361. int cdns_mhdp_read_event(struct cdns_mhdp_device *mhdp)
  362. {
  363. u8 event = 0;
  364. int ret;
  365. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  366. DPTX_READ_EVENT, 0, NULL);
  367. if (ret)
  368. return ret;
  369. ret = cdns_mhdp_mailbox_validate_receive(mhdp,
  370. MB_MODULE_ID_DP_TX,
  371. DPTX_READ_EVENT,
  372. sizeof(event));
  373. if (ret < 0)
  374. return ret;
  375. ret = cdns_mhdp_mailbox_read_receive(mhdp, &event,
  376. sizeof(event));
  377. if (ret < 0)
  378. return ret;
  379. return event;
  380. }
  381. static
  382. int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp,
  383. u8 nlanes, u16 udelay, u8 *lanes_data, u8 *dpcd)
  384. {
  385. u8 payload[7];
  386. u8 hdr[5]; /* For DPCD read response header */
  387. u32 addr;
  388. u8 const nregs = 6; /* Registers 0x202-0x207 */
  389. int ret;
  390. if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
  391. DRM_DEV_ERROR(mhdp->dev, "invalid number of lanes: %d\n",
  392. nlanes);
  393. ret = -EINVAL;
  394. goto err_adjust_lt;
  395. }
  396. payload[0] = nlanes;
  397. put_unaligned_be16(udelay, payload + 1);
  398. memcpy(payload + 3, lanes_data, nlanes);
  399. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  400. DPTX_ADJUST_LT,
  401. sizeof(payload), payload);
  402. if (ret)
  403. goto err_adjust_lt;
  404. /* Yes, read the DPCD read command response */
  405. ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
  406. DPTX_READ_DPCD,
  407. sizeof(hdr) + nregs);
  408. if (ret)
  409. goto err_adjust_lt;
  410. ret = cdns_mhdp_mailbox_read_receive(mhdp, hdr, sizeof(hdr));
  411. if (ret)
  412. goto err_adjust_lt;
  413. addr = get_unaligned_be24(hdr + 2);
  414. if (addr != DP_LANE0_1_STATUS)
  415. goto err_adjust_lt;
  416. ret = cdns_mhdp_mailbox_read_receive(mhdp, dpcd, nregs);
  417. err_adjust_lt:
  418. if (ret)
  419. DRM_DEV_ERROR(mhdp->dev, "Failed to adjust Link Training.\n");
  420. return ret;
  421. }
  422. /* EOF CDNS MHDP Helpers */
  423. #define FW_NAME "cadence/mhdp8546.bin"
  424. #define CDNS_MHDP_IMEM 0x10000
  425. #define CDNS_DP_TRAINING_PATTERN_4 0x7
  426. #define CDNS_KEEP_ALIVE_TIMEOUT 2000
  427. static const struct of_device_id mhdp_ids[] = {
  428. { .compatible = "cdns,mhdp8546", },
  429. { /* sentinel */ }
  430. };
  431. MODULE_DEVICE_TABLE(of, mhdp_ids);
  432. #define CDNS_LANE_1 BIT(0)
  433. #define CDNS_LANE_2 BIT(1)
  434. #define CDNS_LANE_4 BIT(2)
  435. #define CDNS_VOLT_SWING(x) ((x) & GENMASK(1, 0))
  436. #define CDNS_FORCE_VOLT_SWING BIT(2)
  437. #define CDNS_PRE_EMPHASIS(x) ((x) & GENMASK(1, 0))
  438. #define CDNS_FORCE_PRE_EMPHASIS BIT(2)
  439. #define CDNS_SUPPORT_TPS(x) BIT((x) - 1)
  440. #define CDNS_FAST_LINK_TRAINING BIT(0)
  441. #define CDNS_LANE_MAPPING_TYPE_C_LANE_0(x) ((x) & GENMASK(1, 0))
  442. #define CDNS_LANE_MAPPING_TYPE_C_LANE_1(x) ((x) & GENMASK(3, 2))
  443. #define CDNS_LANE_MAPPING_TYPE_C_LANE_2(x) ((x) & GENMASK(5, 4))
  444. #define CDNS_LANE_MAPPING_TYPE_C_LANE_3(x) ((x) & GENMASK(7, 6))
  445. #define CDNS_LANE_MAPPING_NORMAL 0xe4
  446. #define CDNS_LANE_MAPPING_FLIPPED 0x1b
  447. #define CDNS_DP_MAX_NUM_LANES 4
  448. #define CDNS_DP_TEST_VSC_SDP (1 << 6) /* 1.3+ */
  449. #define CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY (1 << 7)
  450. static unsigned int max_link_rate(struct cdns_mhdp_host host,
  451. struct cdns_mhdp_sink sink)
  452. {
  453. return min(host.link_rate, sink.link_rate);
  454. }
  455. static u8 eq_training_pattern_supported(struct cdns_mhdp_host host,
  456. struct cdns_mhdp_sink sink)
  457. {
  458. return fls(host.pattern_supp & sink.pattern_supp);
  459. }
  460. static irqreturn_t mhdp_irq_handler(int irq, void *data)
  461. {
  462. struct cdns_mhdp_device *mhdp = (struct cdns_mhdp_device *)data;
  463. u32 mbox_stat, apb_stat, sw_ev0, sw_ev1, sw_ev2, sw_ev3;
  464. apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
  465. mbox_stat = readl(mhdp->regs + CDNS_MB_INT_STATUS);
  466. sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
  467. sw_ev1 = readl(mhdp->regs + CDNS_SW_EVENT1);
  468. sw_ev2 = readl(mhdp->regs + CDNS_SW_EVENT2);
  469. sw_ev3 = readl(mhdp->regs + CDNS_SW_EVENT3);
  470. //dev_dbg(mhdp->dev, "MHDP IRQ apb %x, mbox %x, sw_ev %x/%x/%x/%x\n", apb_stat, mbox_stat, sw_ev0, sw_ev1, sw_ev2, sw_ev3);
  471. if (sw_ev0 & CDNS_DPTX_HPD)
  472. drm_kms_helper_hotplug_event(mhdp->bridge.dev);
  473. return IRQ_HANDLED;
  474. }
  475. static ssize_t mhdp_transfer(struct drm_dp_aux *aux,
  476. struct drm_dp_aux_msg *msg)
  477. {
  478. struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
  479. int ret;
  480. if (msg->request != DP_AUX_NATIVE_WRITE &&
  481. msg->request != DP_AUX_NATIVE_READ)
  482. return -ENOTSUPP;
  483. if (msg->request == DP_AUX_NATIVE_WRITE) {
  484. const u8 *buf = msg->buffer;
  485. int i;
  486. for (i = 0; i < msg->size; ++i) {
  487. ret = cdns_mhdp_dpcd_write(mhdp,
  488. msg->address + i, buf[i]);
  489. if (!ret)
  490. continue;
  491. DRM_DEV_ERROR(mhdp->dev, "Failed to write DPCD\n");
  492. return ret;
  493. }
  494. } else {
  495. ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
  496. msg->buffer, msg->size);
  497. if (ret) {
  498. DRM_DEV_ERROR(mhdp->dev, "Failed to read DPCD\n");
  499. return ret;
  500. }
  501. }
  502. return msg->size;
  503. }
  504. static int cdns_mhdp_get_modes(struct drm_connector *connector)
  505. {
  506. struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
  507. struct edid *edid;
  508. int num_modes;
  509. edid = drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
  510. if (!edid) {
  511. DRM_DEV_ERROR(mhdp->dev, "Failed to read EDID\n");
  512. return 0;
  513. }
  514. drm_connector_update_edid_property(connector, edid);
  515. num_modes = drm_add_edid_modes(connector, edid);
  516. kfree(edid);
  517. /*
  518. * HACK: Warn about unsupported display formats until we deal
  519. * with them correctly.
  520. */
  521. if (!(connector->display_info.color_formats &
  522. mhdp->display_fmt.color_format))
  523. dev_warn(mhdp->dev,
  524. "%s: No supported color_format found (0x%08x)\n",
  525. __func__, connector->display_info.color_formats);
  526. if (connector->display_info.bpc < mhdp->display_fmt.bpc)
  527. dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
  528. __func__, connector->display_info.bpc,
  529. mhdp->display_fmt.bpc);
  530. return num_modes;
  531. }
  532. static int cdns_mhdp_detect(struct drm_connector *conn,
  533. struct drm_modeset_acquire_ctx *ctx,
  534. bool force)
  535. {
  536. struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
  537. int ret;
  538. ret = cdns_mhdp_get_hpd_status(mhdp);
  539. if (ret > 0) {
  540. mhdp->plugged = true;
  541. return connector_status_connected;
  542. }
  543. if (ret < 0)
  544. dev_err(mhdp->dev, "Failed to obtain HPD state\n");
  545. mhdp->plugged = false;
  546. return connector_status_disconnected;
  547. }
  548. static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
  549. .detect_ctx = cdns_mhdp_detect,
  550. .get_modes = cdns_mhdp_get_modes,
  551. };
  552. static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
  553. .fill_modes = drm_helper_probe_single_connector_modes,
  554. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  555. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  556. .reset = drm_atomic_helper_connector_reset,
  557. .destroy = drm_connector_cleanup,
  558. };
  559. static int cdns_mhdp_attach(struct drm_bridge *bridge)
  560. {
  561. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  562. u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
  563. struct drm_connector *conn = &mhdp->connector;
  564. int ret;
  565. if (&mhdp->bridge != bridge)
  566. return -ENODEV;
  567. conn->polled = DRM_CONNECTOR_POLL_HPD;
  568. ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
  569. DRM_MODE_CONNECTOR_DisplayPort);
  570. if (ret) {
  571. dev_err(mhdp->dev, "failed to init connector\n");
  572. return ret;
  573. }
  574. drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
  575. ret = drm_display_info_set_bus_formats(&conn->display_info,
  576. &bus_format, 1);
  577. if (ret)
  578. return ret;
  579. conn->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH;
  580. /*
  581. * HACK: DP is internal to J7 SoC and we need to use DRIVE_POSEDGE
  582. * in the display controller. This is achieved for the time being
  583. * by defining SAMPLE_NEGEDGE here.
  584. */
  585. conn->display_info.bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
  586. DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
  587. ret = drm_connector_attach_encoder(conn, bridge->encoder);
  588. if (ret) {
  589. dev_err(mhdp->dev, "failed to attach connector to encoder\n");
  590. return ret;
  591. }
  592. /* enable interrupts */
  593. //writel(~CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
  594. writel(0, mhdp->regs + CDNS_APB_INT_MASK);
  595. writel(0, mhdp->regs + CDNS_MB_INT_MASK);
  596. return 0;
  597. }
  598. static void mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
  599. {
  600. u32 reg32;
  601. u8 i;
  602. union phy_configure_opts phy_cfg;
  603. drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
  604. DP_TRAINING_PATTERN_DISABLE);
  605. /* Reset PHY configuration */
  606. reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
  607. if (!mhdp->host.scrambler)
  608. reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
  609. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
  610. cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
  611. mhdp->sink.enhanced & mhdp->host.enhanced);
  612. cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
  613. CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
  614. drm_dp_link_configure(&mhdp->aux, &mhdp->link);
  615. phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
  616. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  617. for (i = 0; i < 4; i++) {
  618. phy_cfg.dp.voltage[i] = 0;
  619. phy_cfg.dp.pre[i] = 0;
  620. }
  621. phy_cfg.dp.ssc = false;
  622. phy_cfg.dp.set_lanes = true;
  623. phy_cfg.dp.set_rate = true;
  624. phy_cfg.dp.set_voltages = true;
  625. phy_configure(mhdp->phy, &phy_cfg);
  626. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
  627. CDNS_PHY_COMMON_CONFIG |
  628. CDNS_PHY_TRAINING_EN |
  629. CDNS_PHY_TRAINING_TYPE(1) |
  630. CDNS_PHY_SCRAMBLER_BYPASS);
  631. drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
  632. DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
  633. }
  634. static void mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
  635. u8 link_status[DP_LINK_STATUS_SIZE],
  636. u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
  637. union phy_configure_opts *phy_cfg)
  638. {
  639. unsigned int i;
  640. u8 adjust, max_pre_emphasis, max_volt_swing;
  641. u8 set_volt, set_pre;
  642. max_pre_emphasis = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
  643. << DP_TRAIN_PRE_EMPHASIS_SHIFT;
  644. max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
  645. for (i = 0; i < mhdp->link.num_lanes; i++) {
  646. /* Check if Voltage swing and pre-emphasis are within limits */
  647. adjust = drm_dp_get_adjust_request_voltage(link_status, i);
  648. set_volt = min_t(u8, adjust, max_volt_swing);
  649. adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
  650. set_pre = min_t(u8, adjust, max_pre_emphasis) >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
  651. /* Voltage swing level and pre-emphasis level combination is not allowed:
  652. * leaving pre-emphasis as-is, and adjusting voltage swing.
  653. */
  654. if (set_volt + set_pre > 3)
  655. set_volt = 3 - set_pre;
  656. phy_cfg->dp.voltage[i] = set_volt;
  657. lanes_data[i] = set_volt;
  658. if (set_volt == max_volt_swing)
  659. lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
  660. phy_cfg->dp.pre[i] = set_pre;
  661. lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
  662. if (set_pre == (max_pre_emphasis >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
  663. lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
  664. }
  665. }
  666. static void mhdp_set_adjust_request_voltage(
  667. u8 link_status[DP_LINK_STATUS_SIZE], int lane, u8 volt)
  668. {
  669. int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
  670. int s = ((lane & 1) ?
  671. DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
  672. DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
  673. int idx = i - DP_LANE0_1_STATUS;
  674. link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
  675. link_status[idx] |= volt << s;
  676. }
  677. static void mhdp_set_adjust_request_pre_emphasis(
  678. u8 link_status[DP_LINK_STATUS_SIZE], int lane, u8 pre_emphasis)
  679. {
  680. int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
  681. int s = ((lane & 1) ?
  682. DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
  683. DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
  684. int idx = i - DP_LANE0_1_STATUS;
  685. link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
  686. link_status[idx] |= pre_emphasis << s;
  687. }
  688. static void mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
  689. u8 link_status[DP_LINK_STATUS_SIZE])
  690. {
  691. unsigned int i;
  692. u8 volt, pre, max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing),
  693. max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
  694. for (i = 0; i < mhdp->link.num_lanes; i++) {
  695. volt = drm_dp_get_adjust_request_voltage(link_status, i);
  696. pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
  697. if (volt + pre > 3)
  698. mhdp_set_adjust_request_voltage(link_status, i,
  699. 3 - pre);
  700. if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
  701. mhdp_set_adjust_request_voltage(link_status, i,
  702. max_volt);
  703. if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
  704. mhdp_set_adjust_request_pre_emphasis(link_status, i,
  705. max_pre);
  706. }
  707. }
  708. static bool mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
  709. u8 eq_tps,
  710. unsigned int training_interval)
  711. {
  712. u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
  713. u8 dpcd[DP_LINK_STATUS_SIZE];
  714. u32 reg32;
  715. union phy_configure_opts phy_cfg;
  716. dev_dbg(mhdp->dev, "Starting EQ phase\n");
  717. /* Enable link training TPS[eq_tps] in PHY */
  718. reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
  719. CDNS_PHY_TRAINING_TYPE(eq_tps);
  720. if (eq_tps != 4)
  721. reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
  722. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
  723. drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
  724. (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
  725. CDNS_DP_TRAINING_PATTERN_4);
  726. drm_dp_dpcd_read_link_status(&mhdp->aux, dpcd);
  727. do {
  728. mhdp_get_adjust_train(mhdp, dpcd, lanes_data, &phy_cfg);
  729. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  730. phy_cfg.dp.ssc = false;
  731. phy_cfg.dp.set_lanes = false;
  732. phy_cfg.dp.set_rate = false;
  733. phy_cfg.dp.set_voltages = true;
  734. phy_configure(mhdp->phy, &phy_cfg);
  735. cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
  736. training_interval, lanes_data, dpcd);
  737. if (!drm_dp_clock_recovery_ok(dpcd, mhdp->link.num_lanes))
  738. goto err;
  739. if (drm_dp_channel_eq_ok(dpcd, mhdp->link.num_lanes)) {
  740. dev_dbg(mhdp->dev, "EQ phase succeeded\n");
  741. return true;
  742. }
  743. fail_counter_short++;
  744. mhdp_adjust_requested_eq(mhdp, dpcd);
  745. } while (fail_counter_short < 5);
  746. err:
  747. dev_dbg(mhdp->dev, "EQ phase failed for %d lanes and %d rate\n",
  748. mhdp->link.num_lanes, mhdp->link.rate);
  749. return false;
  750. }
  751. static void mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
  752. u8 link_status[DP_LINK_STATUS_SIZE],
  753. u8 *req_volt, u8 *req_pre)
  754. {
  755. const u32 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing),
  756. max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
  757. unsigned int i;
  758. for (i = 0; i < mhdp->link.num_lanes; i++) {
  759. unsigned int val;
  760. val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
  761. max_volt : req_volt[i];
  762. mhdp_set_adjust_request_voltage(link_status, i, val);
  763. val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
  764. max_pre : req_pre[i];
  765. mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
  766. }
  767. }
  768. static void mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
  769. bool *same_before_adjust, bool *max_swing_reached,
  770. u8 before_cr[DP_LINK_STATUS_SIZE],
  771. u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
  772. u8 *req_pre)
  773. {
  774. const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing),
  775. max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
  776. bool same_pre, same_volt;
  777. unsigned int i;
  778. *same_before_adjust = false;
  779. *max_swing_reached = false;
  780. *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
  781. for (i = 0; i < mhdp->link.num_lanes; i++) {
  782. u8 tmp;
  783. tmp = drm_dp_get_adjust_request_voltage(after_cr, i);
  784. req_volt[i] = min_t(u8, tmp, max_volt);
  785. tmp = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
  786. DP_TRAIN_PRE_EMPHASIS_SHIFT;
  787. req_pre[i] = min_t(u8, tmp, max_pre);
  788. same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
  789. req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
  790. same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
  791. req_volt[i];
  792. if (same_pre && same_volt)
  793. *same_before_adjust = true;
  794. /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
  795. if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
  796. *max_swing_reached = true;
  797. return;
  798. }
  799. }
  800. }
  801. static bool mhdp_link_training_clock_recovery(struct cdns_mhdp_device *mhdp)
  802. {
  803. u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
  804. fail_counter_short = 0, fail_counter_cr_long = 0;
  805. u8 dpcd[DP_LINK_STATUS_SIZE];
  806. bool cr_done;
  807. union phy_configure_opts phy_cfg;
  808. dev_dbg(mhdp->dev, "Starting CR phase\n");
  809. mhdp_link_training_init(mhdp);
  810. drm_dp_dpcd_read_link_status(&mhdp->aux, dpcd);
  811. do {
  812. u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {},
  813. requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
  814. bool same_before_adjust, max_swing_reached;
  815. mhdp_get_adjust_train(mhdp, dpcd, lanes_data, &phy_cfg);
  816. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  817. phy_cfg.dp.ssc = false;
  818. phy_cfg.dp.set_lanes = false;
  819. phy_cfg.dp.set_rate = false;
  820. phy_cfg.dp.set_voltages = true;
  821. phy_configure(mhdp->phy, &phy_cfg);
  822. cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
  823. lanes_data, dpcd);
  824. mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
  825. &max_swing_reached, lanes_data, dpcd,
  826. requested_adjust_volt_swing,
  827. requested_adjust_pre_emphasis);
  828. if (max_swing_reached) {
  829. dev_err(mhdp->dev, "CR: max swing reached\n");
  830. goto err;
  831. }
  832. if (cr_done) {
  833. dev_dbg(mhdp->dev, "CR phase succeeded\n");
  834. return true;
  835. }
  836. /* Not all CR_DONE bits set */
  837. fail_counter_cr_long++;
  838. if (same_before_adjust) {
  839. fail_counter_short++;
  840. continue;
  841. }
  842. fail_counter_short = 0;
  843. /*
  844. * Voltage swing/pre-emphasis adjust requested
  845. * during CR phase
  846. */
  847. mhdp_adjust_requested_cr(mhdp, dpcd,
  848. requested_adjust_volt_swing,
  849. requested_adjust_pre_emphasis);
  850. } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
  851. err:
  852. dev_dbg(mhdp->dev, "CR phase failed for %d lanes and %d rate\n",
  853. mhdp->link.num_lanes, mhdp->link.rate);
  854. return false;
  855. }
  856. static void lower_link_rate(struct drm_dp_link *link)
  857. {
  858. switch (drm_dp_link_rate_to_bw_code(link->rate)) {
  859. case DP_LINK_BW_2_7:
  860. link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
  861. break;
  862. case DP_LINK_BW_5_4:
  863. link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
  864. break;
  865. case DP_LINK_BW_8_1:
  866. link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
  867. break;
  868. }
  869. }
  870. static int mhdp_link_training(struct cdns_mhdp_device *mhdp,
  871. unsigned int training_interval)
  872. {
  873. u32 reg32;
  874. union phy_configure_opts phy_cfg;
  875. const u8 eq_tps = eq_training_pattern_supported(mhdp->host, mhdp->sink);
  876. while (1) {
  877. if (!mhdp_link_training_clock_recovery(mhdp)) {
  878. if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
  879. DP_LINK_BW_1_62) {
  880. dev_dbg(mhdp->dev,
  881. "Reducing link rate during CR phase\n");
  882. lower_link_rate(&mhdp->link);
  883. drm_dp_link_configure(&mhdp->aux, &mhdp->link);
  884. phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
  885. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  886. phy_cfg.dp.ssc = false;
  887. phy_cfg.dp.set_lanes = false;
  888. phy_cfg.dp.set_rate = true;
  889. phy_cfg.dp.set_voltages = false;
  890. phy_configure(mhdp->phy, &phy_cfg);
  891. continue;
  892. } else if (mhdp->link.num_lanes > 1) {
  893. dev_dbg(mhdp->dev,
  894. "Reducing lanes number during CR phase\n");
  895. mhdp->link.num_lanes >>= 1;
  896. mhdp->link.rate = max_link_rate(mhdp->host,
  897. mhdp->sink);
  898. drm_dp_link_configure(&mhdp->aux, &mhdp->link);
  899. phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
  900. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  901. phy_cfg.dp.ssc = false;
  902. phy_cfg.dp.set_lanes = true;
  903. phy_cfg.dp.set_rate = false;
  904. phy_cfg.dp.set_voltages = false;
  905. phy_configure(mhdp->phy, &phy_cfg);
  906. continue;
  907. }
  908. dev_dbg(mhdp->dev,
  909. "Link training failed during CR phase\n");
  910. goto err;
  911. }
  912. if (mhdp_link_training_channel_eq(mhdp, eq_tps,
  913. training_interval))
  914. break;
  915. if (mhdp->link.num_lanes > 1) {
  916. dev_dbg(mhdp->dev,
  917. "Reducing lanes number during EQ phase\n");
  918. mhdp->link.num_lanes >>= 1;
  919. drm_dp_link_configure(&mhdp->aux, &mhdp->link);
  920. phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
  921. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  922. phy_cfg.dp.ssc = false;
  923. phy_cfg.dp.set_lanes = true;
  924. phy_cfg.dp.set_rate = false;
  925. phy_cfg.dp.set_voltages = false;
  926. phy_configure(mhdp->phy, &phy_cfg);
  927. continue;
  928. } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
  929. DP_LINK_BW_1_62) {
  930. dev_dbg(mhdp->dev,
  931. "Reducing link rate during EQ phase\n");
  932. lower_link_rate(&mhdp->link);
  933. drm_dp_link_configure(&mhdp->aux, &mhdp->link);
  934. phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
  935. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  936. phy_cfg.dp.ssc = false;
  937. phy_cfg.dp.set_lanes = false;
  938. phy_cfg.dp.set_rate = true;
  939. phy_cfg.dp.set_voltages = false;
  940. phy_configure(mhdp->phy, &phy_cfg);
  941. continue;
  942. }
  943. dev_dbg(mhdp->dev, "Link training failed during EQ phase\n");
  944. goto err;
  945. }
  946. dev_dbg(mhdp->dev, "Link training successful\n");
  947. drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
  948. mhdp->host.scrambler ? 0 :
  949. DP_LINK_SCRAMBLING_DISABLE);
  950. cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32);
  951. reg32 &= ~GENMASK(1, 0);
  952. reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
  953. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
  954. /* Reset PHY config */
  955. reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
  956. if (!mhdp->host.scrambler)
  957. reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
  958. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
  959. return 0;
  960. err:
  961. /* Reset PHY config */
  962. reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
  963. if (!mhdp->host.scrambler)
  964. reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
  965. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
  966. drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
  967. DP_TRAINING_PATTERN_DISABLE);
  968. return -EIO;
  969. }
  970. static void cdns_mhdp_disable(struct drm_bridge *bridge)
  971. {
  972. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  973. u32 resp;
  974. dev_dbg(mhdp->dev, "bridge disable\n");
  975. cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
  976. resp &= ~CDNS_DP_FRAMER_EN;
  977. resp |= CDNS_DP_NO_VIDEO_MODE;
  978. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
  979. mhdp->link_up = false;
  980. if (mhdp->plugged)
  981. drm_dp_link_power_down(&mhdp->aux, &mhdp->link);
  982. /* Disable VIF clock for stream 0 */
  983. cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
  984. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
  985. resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
  986. cdns_mhdp_j721e_disable(mhdp);
  987. }
  988. static u32 get_training_interval_us(struct cdns_mhdp_device *mhdp,
  989. u32 interval)
  990. {
  991. if (interval == 0)
  992. return 400;
  993. if (interval < 5)
  994. return 4000 << (interval - 1);
  995. dev_err(mhdp->dev,
  996. "wrong training interval returned by DPCD: %d\n", interval);
  997. return 0;
  998. }
  999. static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
  1000. {
  1001. u32 resp;
  1002. u8 reg0[DP_RECEIVER_CAP_SIZE], amp[2];
  1003. /*
  1004. * Upon power-on reset/device disconnection: [2:0] bits should be 0b001
  1005. * and [7:5] bits 0b000.
  1006. */
  1007. drm_dp_dpcd_writeb(&mhdp->aux, DP_SET_POWER, 1);
  1008. drm_dp_link_probe(&mhdp->aux, &mhdp->link);
  1009. dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
  1010. drm_dp_link_power_up(&mhdp->aux, &mhdp->link);
  1011. /* FIXME (CDNS): do we have to wait for 100ms before going on? */
  1012. mdelay(100);
  1013. mhdp->sink.link_rate = mhdp->link.rate;
  1014. mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
  1015. mhdp->sink.enhanced = !!(mhdp->link.capabilities &
  1016. DP_LINK_CAP_ENHANCED_FRAMING);
  1017. drm_dp_dpcd_read(&mhdp->aux, DP_DPCD_REV, reg0, DP_RECEIVER_CAP_SIZE);
  1018. mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
  1019. if (drm_dp_tps3_supported(reg0))
  1020. mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
  1021. if (drm_dp_tps4_supported(reg0))
  1022. mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
  1023. mhdp->sink.fast_link = !!(reg0[DP_MAX_DOWNSPREAD] &
  1024. DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
  1025. mhdp->link.rate = max_link_rate(mhdp->host, mhdp->sink);
  1026. mhdp->link.num_lanes = min_t(u8, mhdp->sink.lanes_cnt,
  1027. mhdp->host.lanes_cnt & GENMASK(2, 0));
  1028. /* Disable framer for link training */
  1029. cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
  1030. resp &= ~CDNS_DP_FRAMER_EN;
  1031. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
  1032. /* Spread AMP if required, enable 8b/10b coding */
  1033. amp[0] = mhdp->host.ssc ? DP_SPREAD_AMP_0_5 : 0;
  1034. amp[1] = DP_SET_ANSI_8B10B;
  1035. drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
  1036. if (mhdp->host.fast_link & mhdp->sink.fast_link) {
  1037. /* FIXME: implement fastlink */
  1038. dev_err(mhdp->dev, "fastlink not supported\n");
  1039. return -ENOTSUPP;
  1040. } else {
  1041. const u32 interval = reg0[DP_TRAINING_AUX_RD_INTERVAL] &
  1042. DP_TRAINING_AUX_RD_MASK;
  1043. const u32 interval_us = get_training_interval_us(mhdp,
  1044. interval);
  1045. if (!interval_us ||
  1046. mhdp_link_training(mhdp, interval_us)) {
  1047. dev_err(mhdp->dev, "Link training failed. Exiting.\n");
  1048. return -EIO;
  1049. }
  1050. }
  1051. mhdp->link_up = true;
  1052. return 0;
  1053. }
  1054. u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
  1055. {
  1056. u32 bpp;
  1057. if (fmt->y_only)
  1058. return fmt->bpc;
  1059. switch (fmt->color_format) {
  1060. case DRM_COLOR_FORMAT_RGB444:
  1061. case DRM_COLOR_FORMAT_YCRCB444:
  1062. bpp = fmt->bpc * 3;
  1063. break;
  1064. case DRM_COLOR_FORMAT_YCRCB422:
  1065. bpp = fmt->bpc * 2;
  1066. break;
  1067. case DRM_COLOR_FORMAT_YCRCB420:
  1068. bpp = fmt->bpc * 3 / 2;
  1069. break;
  1070. default:
  1071. bpp = fmt->bpc * 3;
  1072. WARN_ON(1);
  1073. }
  1074. return bpp;
  1075. }
  1076. static int cdns_mhdp_sst_enable(struct drm_bridge *bridge)
  1077. {
  1078. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  1079. u32 rate, vs, vs_f, required_bandwidth, available_bandwidth;
  1080. u32 tu_size = 30, line_thresh1, line_thresh2, line_thresh = 0;
  1081. struct drm_display_mode *mode;
  1082. int pxlclock;
  1083. u32 bpp, bpc, pxlfmt;
  1084. pxlfmt = mhdp->display_fmt.color_format;
  1085. bpc = mhdp->display_fmt.bpc;
  1086. mode = &bridge->encoder->crtc->state->mode;
  1087. pxlclock = mode->crtc_clock;
  1088. mhdp->stream_id = 0;
  1089. rate = mhdp->link.rate / 1000;
  1090. bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
  1091. /* find optimal tu_size */
  1092. required_bandwidth = pxlclock * bpp / 8;
  1093. available_bandwidth = mhdp->link.num_lanes * rate;
  1094. do {
  1095. tu_size += 2;
  1096. vs_f = tu_size * required_bandwidth / available_bandwidth;
  1097. vs = vs_f / 1000;
  1098. vs_f = vs_f % 1000;
  1099. /*
  1100. * FIXME (CDNS): downspreading?
  1101. * It's unused is what I've been told.
  1102. */
  1103. } while ((vs == 1 || ((vs_f > 850 || vs_f < 100) && vs_f != 0) ||
  1104. tu_size - vs < 2) && tu_size < 64);
  1105. if (vs > 64)
  1106. return -EINVAL;
  1107. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
  1108. CDNS_DP_FRAMER_TU_VS(vs) |
  1109. CDNS_DP_FRAMER_TU_SIZE(tu_size) |
  1110. CDNS_DP_FRAMER_TU_CNT_RST_EN);
  1111. line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
  1112. line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
  1113. line_thresh = line_thresh1 - line_thresh2 / mhdp->link.num_lanes;
  1114. line_thresh = (line_thresh >> 5) + 2;
  1115. cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
  1116. line_thresh & GENMASK(5, 0));
  1117. cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
  1118. CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
  1119. 0 : tu_size - vs));
  1120. cdns_mhdp_configure_video(bridge);
  1121. return 0;
  1122. }
  1123. void cdns_mhdp_configure_video(struct drm_bridge *bridge)
  1124. {
  1125. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  1126. unsigned int dp_framer_sp = 0, msa_horizontal_1,
  1127. msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
  1128. misc0 = 0, misc1 = 0, pxl_repr,
  1129. front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
  1130. dp_vertical_1;
  1131. struct drm_display_mode *mode;
  1132. u32 bpp, bpc, pxlfmt;
  1133. u32 tmp;
  1134. u8 stream_id = mhdp->stream_id;
  1135. mode = &bridge->encoder->crtc->state->mode;
  1136. pxlfmt = mhdp->display_fmt.color_format;
  1137. bpc = mhdp->display_fmt.bpc;
  1138. /* if YCBCR supported and stream not SD, use ITU709 */
  1139. /* FIXME: handle ITU version with YCBCR420 when supported */
  1140. if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 ||
  1141. pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720)
  1142. misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
  1143. bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
  1144. switch (pxlfmt) {
  1145. case DRM_COLOR_FORMAT_RGB444:
  1146. pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
  1147. misc0 |= DP_COLOR_FORMAT_RGB;
  1148. break;
  1149. case DRM_COLOR_FORMAT_YCRCB444:
  1150. pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
  1151. misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
  1152. break;
  1153. case DRM_COLOR_FORMAT_YCRCB422:
  1154. pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
  1155. misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
  1156. break;
  1157. case DRM_COLOR_FORMAT_YCRCB420:
  1158. pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
  1159. break;
  1160. default:
  1161. pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
  1162. }
  1163. switch (bpc) {
  1164. case 6:
  1165. misc0 |= DP_TEST_BIT_DEPTH_6;
  1166. pxl_repr |= CDNS_DP_FRAMER_6_BPC;
  1167. break;
  1168. case 8:
  1169. misc0 |= DP_TEST_BIT_DEPTH_8;
  1170. pxl_repr |= CDNS_DP_FRAMER_8_BPC;
  1171. break;
  1172. case 10:
  1173. misc0 |= DP_TEST_BIT_DEPTH_10;
  1174. pxl_repr |= CDNS_DP_FRAMER_10_BPC;
  1175. break;
  1176. case 12:
  1177. misc0 |= DP_TEST_BIT_DEPTH_12;
  1178. pxl_repr |= CDNS_DP_FRAMER_12_BPC;
  1179. break;
  1180. case 16:
  1181. misc0 |= DP_TEST_BIT_DEPTH_16;
  1182. pxl_repr |= CDNS_DP_FRAMER_16_BPC;
  1183. break;
  1184. }
  1185. bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
  1186. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  1187. bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
  1188. cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
  1189. bnd_hsync2vsync);
  1190. hsync2vsync_pol_ctrl = 0;
  1191. if (mode->flags & DRM_MODE_FLAG_NHSYNC)
  1192. hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
  1193. if (mode->flags & DRM_MODE_FLAG_NVSYNC)
  1194. hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
  1195. cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
  1196. hsync2vsync_pol_ctrl);
  1197. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
  1198. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  1199. dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
  1200. if (mode->flags & DRM_MODE_FLAG_NHSYNC)
  1201. dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
  1202. if (mode->flags & DRM_MODE_FLAG_NVSYNC)
  1203. dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
  1204. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
  1205. front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
  1206. back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
  1207. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
  1208. CDNS_DP_FRONT_PORCH(front_porch) |
  1209. CDNS_DP_BACK_PORCH(back_porch));
  1210. cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
  1211. mode->crtc_hdisplay * bpp / 8);
  1212. msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
  1213. cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
  1214. CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
  1215. CDNS_DP_MSAH0_HSYNC_START(msa_h0));
  1216. hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
  1217. msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
  1218. CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
  1219. if (mode->flags & DRM_MODE_FLAG_NHSYNC)
  1220. msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
  1221. cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
  1222. msa_horizontal_1);
  1223. msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
  1224. cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
  1225. CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
  1226. CDNS_DP_MSAV0_VSYNC_START(msa_v0));
  1227. vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
  1228. msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
  1229. CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
  1230. if (mode->flags & DRM_MODE_FLAG_NVSYNC)
  1231. msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
  1232. cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
  1233. msa_vertical_1);
  1234. if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
  1235. mode->crtc_vtotal % 2 == 0)
  1236. misc1 = DP_TEST_INTERLACED;
  1237. if (mhdp->display_fmt.y_only)
  1238. misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
  1239. /* FIXME: use VSC SDP for Y420 */
  1240. /* FIXME: (CDNS) no code for Y420 in bare metal test */
  1241. if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420)
  1242. misc1 = CDNS_DP_TEST_VSC_SDP;
  1243. cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
  1244. misc0 | (misc1 << 8));
  1245. cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
  1246. CDNS_DP_H_HSYNC_WIDTH(hsync) |
  1247. CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
  1248. cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
  1249. CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
  1250. CDNS_DP_V0_VSTART(msa_v0));
  1251. dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
  1252. if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
  1253. mode->crtc_vtotal % 2 == 0)
  1254. dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
  1255. cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
  1256. cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
  1257. (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
  1258. CDNS_DP_VB_ID_INTERLACED : 0);
  1259. cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &tmp);
  1260. tmp |= CDNS_DP_FRAMER_EN;
  1261. tmp &= ~CDNS_DP_NO_VIDEO_MODE;
  1262. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, tmp);
  1263. }
  1264. void cdns_mhdp_enable(struct drm_bridge *bridge)
  1265. {
  1266. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  1267. u32 resp;
  1268. dev_dbg(mhdp->dev, "bridge enable\n");
  1269. cdns_mhdp_j721e_enable(mhdp);
  1270. /* Enable VIF clock for stream 0 */
  1271. cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
  1272. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
  1273. resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
  1274. if (!mhdp->link_up)
  1275. cdns_mhdp_link_up(mhdp);
  1276. cdns_mhdp_sst_enable(bridge);
  1277. }
  1278. static void cdns_mhdp_detach(struct drm_bridge *bridge)
  1279. {
  1280. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  1281. writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
  1282. writel(~0, mhdp->regs + CDNS_MB_INT_MASK);
  1283. }
  1284. static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
  1285. .enable = cdns_mhdp_enable,
  1286. .disable = cdns_mhdp_disable,
  1287. .attach = cdns_mhdp_attach,
  1288. .detach = cdns_mhdp_detach,
  1289. };
  1290. static int load_firmware(struct cdns_mhdp_device *mhdp, const char *name,
  1291. unsigned int addr)
  1292. {
  1293. const struct firmware *fw;
  1294. int ret;
  1295. ret = request_firmware(&fw, name, mhdp->dev);
  1296. if (ret) {
  1297. dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
  1298. name, ret);
  1299. return ret;
  1300. }
  1301. memcpy_toio(mhdp->regs + addr, fw->data, fw->size);
  1302. release_firmware(fw);
  1303. return 0;
  1304. }
  1305. static int mhdp_probe(struct platform_device *pdev)
  1306. {
  1307. struct resource *regs;
  1308. struct cdns_mhdp_device *mhdp;
  1309. struct clk *clk;
  1310. int ret;
  1311. unsigned int reg;
  1312. unsigned long rate;
  1313. int irq;
  1314. u32 lanes_prop;
  1315. mhdp = devm_kzalloc(&pdev->dev, sizeof(struct cdns_mhdp_device),
  1316. GFP_KERNEL);
  1317. if (!mhdp)
  1318. return -ENOMEM;
  1319. clk = devm_clk_get(&pdev->dev, NULL);
  1320. if (IS_ERR(clk)) {
  1321. dev_err(&pdev->dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
  1322. return PTR_ERR(clk);
  1323. }
  1324. mhdp->clk = clk;
  1325. mhdp->dev = &pdev->dev;
  1326. dev_set_drvdata(&pdev->dev, mhdp);
  1327. drm_dp_aux_init(&mhdp->aux);
  1328. mhdp->aux.dev = &pdev->dev;
  1329. mhdp->aux.transfer = mhdp_transfer;
  1330. regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1331. mhdp->regs = devm_ioremap_resource(&pdev->dev, regs);
  1332. if (IS_ERR(mhdp->regs))
  1333. return PTR_ERR(mhdp->regs);
  1334. mhdp->phy = devm_phy_get(&pdev->dev, "dpphy");
  1335. if (IS_ERR(mhdp->phy)) {
  1336. dev_err(&pdev->dev, "no PHY configured\n");
  1337. return PTR_ERR(mhdp->phy);
  1338. }
  1339. platform_set_drvdata(pdev, mhdp);
  1340. clk_prepare_enable(clk);
  1341. pm_runtime_enable(&pdev->dev);
  1342. ret = pm_runtime_get_sync(&pdev->dev);
  1343. if (ret < 0) {
  1344. dev_err(&pdev->dev, "pm_runtime_get_sync failed\n");
  1345. pm_runtime_disable(&pdev->dev);
  1346. return ret;
  1347. }
  1348. ret = cdns_mhdp_j721e_init(mhdp);
  1349. if (ret != 0) {
  1350. dev_err(&pdev->dev, "J721E Wrapper initialization failed: %d\n",
  1351. ret);
  1352. goto runtime_put;
  1353. }
  1354. /* Release uCPU reset and stall it. */
  1355. writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
  1356. ret = load_firmware(mhdp, FW_NAME, CDNS_MHDP_IMEM);
  1357. if (ret)
  1358. return ret;
  1359. rate = clk_get_rate(clk);
  1360. writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
  1361. writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
  1362. dev_dbg(&pdev->dev, "func clk rate %lu Hz\n", rate);
  1363. /* Leave debug mode, release stall */
  1364. writel(0, mhdp->regs + CDNS_APB_CTRL);
  1365. writel(~0, mhdp->regs + CDNS_MB_INT_MASK);
  1366. writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
  1367. irq = platform_get_irq(pdev, 0);
  1368. ret = devm_request_threaded_irq(mhdp->dev, irq, NULL, mhdp_irq_handler,
  1369. IRQF_ONESHOT, "mhdp8546", mhdp);
  1370. if (ret) {
  1371. dev_err(&pdev->dev,
  1372. "cannot install IRQ %d\n", irq);
  1373. ret = -EIO;
  1374. goto runtime_put;
  1375. }
  1376. /*
  1377. * Wait for the KEEP_ALIVE "message" on the first 8 bits.
  1378. * Updated each sched "tick" (~2ms)
  1379. */
  1380. ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
  1381. reg & CDNS_KEEP_ALIVE_MASK, 500,
  1382. CDNS_KEEP_ALIVE_TIMEOUT);
  1383. if (ret) {
  1384. dev_err(&pdev->dev,
  1385. "device didn't give any life sign: reg %d\n", reg);
  1386. return -EIO;
  1387. }
  1388. /* Read source capabilities, based on PHY's device tree properties. */
  1389. ret = device_property_read_u32(&(mhdp->phy->dev), "num_lanes",
  1390. &(lanes_prop));
  1391. if (ret)
  1392. mhdp->host.lanes_cnt = CDNS_LANE_4;
  1393. else
  1394. mhdp->host.lanes_cnt = lanes_prop;
  1395. ret = device_property_read_u32(&(mhdp->phy->dev), "max_bit_rate",
  1396. &(mhdp->host.link_rate));
  1397. if (ret)
  1398. mhdp->host.link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
  1399. else
  1400. /* PHY uses Mb/s, DRM uses tens of kb/s. */
  1401. mhdp->host.link_rate *= 100;
  1402. mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
  1403. mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
  1404. mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
  1405. CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
  1406. CDNS_SUPPORT_TPS(4);
  1407. mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
  1408. mhdp->host.fast_link = false;
  1409. mhdp->host.enhanced = true;
  1410. mhdp->host.scrambler = true;
  1411. mhdp->host.ssc = false;
  1412. /* The only currently supported format */
  1413. mhdp->display_fmt.y_only = false;
  1414. mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
  1415. mhdp->display_fmt.bpc = 8;
  1416. mhdp->bridge.of_node = pdev->dev.of_node;
  1417. mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
  1418. /* Init events to 0 as it's not cleared by FW at boot but on read */
  1419. readl(mhdp->regs + CDNS_SW_EVENT0);
  1420. readl(mhdp->regs + CDNS_SW_EVENT1);
  1421. readl(mhdp->regs + CDNS_SW_EVENT2);
  1422. readl(mhdp->regs + CDNS_SW_EVENT3);
  1423. /* Activate uCPU */
  1424. ret = cdns_mhdp_set_firmware_active(mhdp, true);
  1425. if (ret) {
  1426. dev_err(mhdp->dev, "Failed to activate DP\n");
  1427. return ret;
  1428. }
  1429. ret = phy_init(mhdp->phy);
  1430. if (ret) {
  1431. dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
  1432. goto runtime_put;
  1433. }
  1434. drm_bridge_add(&mhdp->bridge);
  1435. return 0;
  1436. runtime_put:
  1437. pm_runtime_put_sync(&pdev->dev);
  1438. pm_runtime_disable(&pdev->dev);
  1439. return ret;
  1440. }
  1441. MODULE_FIRMWARE(FW_NAME);
  1442. static int mhdp_remove(struct platform_device *pdev)
  1443. {
  1444. struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev);
  1445. int ret;
  1446. drm_bridge_remove(&mhdp->bridge);
  1447. ret = cdns_mhdp_set_firmware_active(mhdp, false);
  1448. if (ret) {
  1449. dev_err(mhdp->dev, "Failed to de-activate DP\n");
  1450. return ret;
  1451. }
  1452. phy_exit(mhdp->phy);
  1453. pm_runtime_put_sync(&pdev->dev);
  1454. pm_runtime_disable(&pdev->dev);
  1455. clk_disable_unprepare(mhdp->clk);
  1456. /* FIXME: check for missing functions */
  1457. return 0;
  1458. }
  1459. static struct platform_driver mhdp_driver = {
  1460. .driver = {
  1461. .name = "cdns-mhdp",
  1462. .of_match_table = of_match_ptr(mhdp_ids),
  1463. },
  1464. .probe = mhdp_probe,
  1465. .remove = mhdp_remove,
  1466. };
  1467. module_platform_driver(mhdp_driver);
  1468. MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
  1469. MODULE_AUTHOR("Przemyslaw Gaj <pgaj@cadence.com>");
  1470. MODULE_AUTHOR("Damian Kos <dkos@cadence.com>");
  1471. MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
  1472. MODULE_DESCRIPTION("Cadence MHDP DP bridge driver");
  1473. MODULE_LICENSE("GPL");
  1474. MODULE_ALIAS("platform:cdns-mhdp");