cdns-mhdp.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Cadence MHDP DP bridge driver.
  4. *
  5. * Copyright: 2018 Cadence Design Systems, Inc.
  6. *
  7. * Author: Quentin Schulz <quentin.schulz@free-electrons.com>
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/err.h>
  12. #include <linux/firmware.h>
  13. #include <linux/io.h>
  14. #include <linux/iopoll.h>
  15. #include <linux/module.h>
  16. #include <linux/of.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/slab.h>
  19. #include <linux/phy/phy.h>
  20. #include <linux/phy/phy-dp.h>
  21. #include <drm/drm_atomic_helper.h>
  22. #include <drm/drm_bridge.h>
  23. #include <drm/drm_connector.h>
  24. #include <drm/drm_crtc_helper.h>
  25. #include <drm/drm_dp_helper.h>
  26. #include <drm/drm_modeset_helper_vtables.h>
  27. #include <drm/drm_print.h>
  28. #include <drm/drm_crtc_helper.h>
  29. #include <linux/irq.h>
  30. #include <linux/of_irq.h>
  31. #include <linux/of_device.h>
  32. #include <asm/unaligned.h>
  33. #include "cdns-mhdp.h"
  34. #include "cdns-mhdp-j721e.h"
  35. /* CDNS MHDP Helpers */
  36. #define MAILBOX_RETRY_US 1000
  37. #define MAILBOX_TIMEOUT_US 5000000
  38. /* mailbox */
  39. #define MB_OPCODE_ID 0
  40. #define MB_MODULE_ID 1
  41. #define MB_SIZE_MSB_ID 2
  42. #define MB_SIZE_LSB_ID 3
  43. #define MB_DATA_ID 4
  44. #define MB_MODULE_ID_DP_TX 0x01
  45. #define MB_MODULE_ID_HDCP_TX 0x07
  46. #define MB_MODULE_ID_HDCP_RX 0x08
  47. #define MB_MODULE_ID_HDCP_GENERAL 0x09
  48. #define MB_MODULE_ID_GENERAL 0x0a
  49. /* general opcode */
  50. #define GENERAL_MAIN_CONTROL 0x01
  51. #define GENERAL_TEST_ECHO 0x02
  52. #define GENERAL_BUS_SETTINGS 0x03
  53. #define GENERAL_TEST_ACCESS 0x04
  54. #define GENERAL_REGISTER_READ 0x07
  55. #define DPTX_SET_POWER_MNG 0x00
  56. #define DPTX_SET_HOST_CAPABILITIES 0x01
  57. #define DPTX_GET_EDID 0x02
  58. #define DPTX_READ_DPCD 0x03
  59. #define DPTX_WRITE_DPCD 0x04
  60. #define DPTX_ENABLE_EVENT 0x05
  61. #define DPTX_WRITE_REGISTER 0x06
  62. #define DPTX_READ_REGISTER 0x07
  63. #define DPTX_WRITE_FIELD 0x08
  64. #define DPTX_TRAINING_CONTROL 0x09
  65. #define DPTX_READ_EVENT 0x0a
  66. #define DPTX_READ_LINK_STAT 0x0b
  67. #define DPTX_SET_VIDEO 0x0c
  68. #define DPTX_SET_AUDIO 0x0d
  69. #define DPTX_GET_LAST_AUX_STAUS 0x0e
  70. #define DPTX_SET_LINK_BREAK_POINT 0x0f
  71. #define DPTX_FORCE_LANES 0x10
  72. #define DPTX_HPD_STATE 0x11
  73. #define DPTX_ADJUST_LT 0x12
  74. #define FW_STANDBY 0
  75. #define FW_ACTIVE 1
  76. #define DPTX_READ_EVENT_HPD_TO_HIGH BIT(0)
  77. #define DPTX_READ_EVENT_HPD_TO_LOW BIT(1)
  78. #define DPTX_READ_EVENT_HPD_PULSE BIT(2)
  79. #define DPTX_READ_EVENT_HPD_STATE BIT(3)
  80. static inline u32 get_unaligned_be24(const void *p)
  81. {
  82. const u8 *_p = p;
  83. return _p[0] << 16 | _p[1] << 8 | _p[2];
  84. }
  85. static inline void put_unaligned_be24(u32 val, void *p)
  86. {
  87. u8 *_p = p;
  88. _p[0] = val >> 16;
  89. _p[1] = val >> 8;
  90. _p[2] = val;
  91. }
  92. static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
  93. {
  94. int val, ret;
  95. WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
  96. ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
  97. val, !val, MAILBOX_RETRY_US,
  98. MAILBOX_TIMEOUT_US);
  99. if (ret < 0)
  100. return ret;
  101. return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
  102. }
  103. static int cdp_dp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
  104. {
  105. int ret, full;
  106. WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
  107. ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
  108. full, !full, MAILBOX_RETRY_US,
  109. MAILBOX_TIMEOUT_US);
  110. if (ret < 0)
  111. return ret;
  112. writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
  113. return 0;
  114. }
  115. static int cdns_mhdp_mailbox_validate_receive(struct cdns_mhdp_device *mhdp,
  116. u8 module_id, u8 opcode,
  117. u16 req_size)
  118. {
  119. u32 mbox_size, i;
  120. u8 header[4];
  121. int ret;
  122. /* read the header of the message */
  123. for (i = 0; i < 4; i++) {
  124. ret = cdns_mhdp_mailbox_read(mhdp);
  125. if (ret < 0)
  126. return ret;
  127. header[i] = ret;
  128. }
  129. mbox_size = get_unaligned_be16(header + 2);
  130. if (opcode != header[0] || module_id != header[1] ||
  131. req_size != mbox_size) {
  132. /*
  133. * If the message in mailbox is not what we want, we need to
  134. * clear the mailbox by reading its contents.
  135. */
  136. for (i = 0; i < mbox_size; i++)
  137. if (cdns_mhdp_mailbox_read(mhdp) < 0)
  138. break;
  139. return -EINVAL;
  140. }
  141. return 0;
  142. }
  143. static int cdns_mhdp_mailbox_read_receive(struct cdns_mhdp_device *mhdp,
  144. u8 *buff, u16 buff_size)
  145. {
  146. u32 i;
  147. int ret;
  148. for (i = 0; i < buff_size; i++) {
  149. ret = cdns_mhdp_mailbox_read(mhdp);
  150. if (ret < 0)
  151. return ret;
  152. buff[i] = ret;
  153. }
  154. return 0;
  155. }
  156. static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
  157. u8 opcode, u16 size, u8 *message)
  158. {
  159. u8 header[4];
  160. int ret, i;
  161. header[0] = opcode;
  162. header[1] = module_id;
  163. put_unaligned_be16(size, header + 2);
  164. for (i = 0; i < 4; i++) {
  165. ret = cdp_dp_mailbox_write(mhdp, header[i]);
  166. if (ret)
  167. return ret;
  168. }
  169. for (i = 0; i < size; i++) {
  170. ret = cdp_dp_mailbox_write(mhdp, message[i]);
  171. if (ret)
  172. return ret;
  173. }
  174. return 0;
  175. }
  176. static
  177. int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
  178. {
  179. u8 msg[4], resp[8];
  180. int ret;
  181. if (addr == 0) {
  182. ret = -EINVAL;
  183. goto err_reg_read;
  184. }
  185. put_unaligned_be32(addr, msg);
  186. mutex_lock(&mhdp->mbox_mutex);
  187. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
  188. GENERAL_REGISTER_READ,
  189. sizeof(msg), msg);
  190. if (ret)
  191. goto err_reg_read;
  192. ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_GENERAL,
  193. GENERAL_REGISTER_READ,
  194. sizeof(resp));
  195. if (ret)
  196. goto err_reg_read;
  197. ret = cdns_mhdp_mailbox_read_receive(mhdp, resp, sizeof(resp));
  198. if (ret)
  199. goto err_reg_read;
  200. /* Returned address value should be the same as requested */
  201. if (memcmp(msg, resp, sizeof(msg))) {
  202. ret = -EINVAL;
  203. goto err_reg_read;
  204. }
  205. *value = get_unaligned_be32(resp + 4);
  206. err_reg_read:
  207. mutex_unlock(&mhdp->mbox_mutex);
  208. if (ret) {
  209. DRM_DEV_ERROR(mhdp->dev, "Failed to read register.\n");
  210. *value = 0;
  211. }
  212. return ret;
  213. }
  214. static
  215. int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
  216. {
  217. u8 msg[6];
  218. int ret;
  219. put_unaligned_be16(addr, msg);
  220. put_unaligned_be32(val, msg + 2);
  221. mutex_lock(&mhdp->mbox_mutex);
  222. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  223. DPTX_WRITE_REGISTER, sizeof(msg), msg);
  224. mutex_unlock(&mhdp->mbox_mutex);
  225. return ret;
  226. }
  227. static
  228. int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
  229. u8 start_bit, u8 bits_no, u32 val)
  230. {
  231. u8 field[8];
  232. int ret;
  233. put_unaligned_be16(addr, field);
  234. field[2] = start_bit;
  235. field[3] = bits_no;
  236. put_unaligned_be32(val, field + 4);
  237. mutex_lock(&mhdp->mbox_mutex);
  238. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  239. DPTX_WRITE_FIELD, sizeof(field), field);
  240. mutex_unlock(&mhdp->mbox_mutex);
  241. return ret;
  242. }
  243. static
  244. int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
  245. u32 addr, u8 *data, u16 len)
  246. {
  247. u8 msg[5], reg[5];
  248. int ret;
  249. put_unaligned_be16(len, msg);
  250. put_unaligned_be24(addr, msg + 2);
  251. mutex_lock(&mhdp->mbox_mutex);
  252. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  253. DPTX_READ_DPCD, sizeof(msg), msg);
  254. if (ret)
  255. goto err_dpcd_read;
  256. ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
  257. DPTX_READ_DPCD,
  258. sizeof(reg) + len);
  259. if (ret)
  260. goto err_dpcd_read;
  261. ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
  262. if (ret)
  263. goto err_dpcd_read;
  264. ret = cdns_mhdp_mailbox_read_receive(mhdp, data, len);
  265. err_dpcd_read:
  266. mutex_unlock(&mhdp->mbox_mutex);
  267. return ret;
  268. }
  269. static
  270. int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
  271. {
  272. u8 msg[6], reg[5];
  273. int ret;
  274. put_unaligned_be16(1, msg);
  275. put_unaligned_be24(addr, msg + 2);
  276. msg[5] = value;
  277. mutex_lock(&mhdp->mbox_mutex);
  278. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  279. DPTX_WRITE_DPCD, sizeof(msg), msg);
  280. if (ret)
  281. goto err_dpcd_write;
  282. ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
  283. DPTX_WRITE_DPCD, sizeof(reg));
  284. if (ret)
  285. goto err_dpcd_write;
  286. ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
  287. if (ret)
  288. goto err_dpcd_write;
  289. if (addr != get_unaligned_be24(reg + 2))
  290. ret = -EINVAL;
  291. err_dpcd_write:
  292. mutex_unlock(&mhdp->mbox_mutex);
  293. if (ret)
  294. DRM_DEV_ERROR(mhdp->dev, "dpcd write failed: %d\n", ret);
  295. return ret;
  296. }
  297. static
  298. int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
  299. {
  300. u8 msg[5];
  301. int ret, i;
  302. msg[0] = GENERAL_MAIN_CONTROL;
  303. msg[1] = MB_MODULE_ID_GENERAL;
  304. msg[2] = 0;
  305. msg[3] = 1;
  306. msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
  307. mutex_lock(&mhdp->mbox_mutex);
  308. for (i = 0; i < sizeof(msg); i++) {
  309. ret = cdp_dp_mailbox_write(mhdp, msg[i]);
  310. if (ret)
  311. goto err_set_firmware_active;
  312. }
  313. /* read the firmware state */
  314. for (i = 0; i < sizeof(msg); i++) {
  315. ret = cdns_mhdp_mailbox_read(mhdp);
  316. if (ret < 0)
  317. goto err_set_firmware_active;
  318. msg[i] = ret;
  319. }
  320. ret = 0;
  321. err_set_firmware_active:
  322. mutex_unlock(&mhdp->mbox_mutex);
  323. if (ret < 0)
  324. DRM_DEV_ERROR(mhdp->dev, "set firmware active failed\n");
  325. return ret;
  326. }
  327. static
  328. int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
  329. {
  330. u8 status;
  331. int ret;
  332. mutex_lock(&mhdp->mbox_mutex);
  333. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  334. DPTX_HPD_STATE, 0, NULL);
  335. if (ret)
  336. goto err_get_hpd;
  337. ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
  338. DPTX_HPD_STATE,
  339. sizeof(status));
  340. if (ret)
  341. goto err_get_hpd;
  342. ret = cdns_mhdp_mailbox_read_receive(mhdp, &status, sizeof(status));
  343. if (ret)
  344. goto err_get_hpd;
  345. mutex_unlock(&mhdp->mbox_mutex);
  346. return status;
  347. err_get_hpd:
  348. mutex_unlock(&mhdp->mbox_mutex);
  349. DRM_DEV_ERROR(mhdp->dev, "get hpd status failed: %d\n", ret);
  350. return ret;
  351. }
  352. static
  353. int cdns_mhdp_get_edid_block(void *data, u8 *edid,
  354. unsigned int block, size_t length)
  355. {
  356. struct cdns_mhdp_device *mhdp = data;
  357. u8 msg[2], reg[2], i;
  358. int ret;
  359. mutex_lock(&mhdp->mbox_mutex);
  360. for (i = 0; i < 4; i++) {
  361. msg[0] = block / 2;
  362. msg[1] = block % 2;
  363. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  364. DPTX_GET_EDID, sizeof(msg), msg);
  365. if (ret)
  366. continue;
  367. ret = cdns_mhdp_mailbox_validate_receive(mhdp,
  368. MB_MODULE_ID_DP_TX,
  369. DPTX_GET_EDID,
  370. sizeof(reg) + length);
  371. if (ret)
  372. continue;
  373. ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
  374. if (ret)
  375. continue;
  376. ret = cdns_mhdp_mailbox_read_receive(mhdp, edid, length);
  377. if (ret)
  378. continue;
  379. if (reg[0] == length && reg[1] == block / 2)
  380. break;
  381. }
  382. mutex_unlock(&mhdp->mbox_mutex);
  383. if (ret)
  384. DRM_DEV_ERROR(mhdp->dev, "get block[%d] edid failed: %d\n",
  385. block, ret);
  386. return ret;
  387. }
  388. static
  389. int cdns_mhdp_read_event(struct cdns_mhdp_device *mhdp)
  390. {
  391. u8 event = 0;
  392. int ret;
  393. mutex_lock(&mhdp->mbox_mutex);
  394. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  395. DPTX_READ_EVENT, 0, NULL);
  396. if (ret)
  397. goto out;
  398. ret = cdns_mhdp_mailbox_validate_receive(mhdp,
  399. MB_MODULE_ID_DP_TX,
  400. DPTX_READ_EVENT,
  401. sizeof(event));
  402. if (ret < 0)
  403. goto out;
  404. ret = cdns_mhdp_mailbox_read_receive(mhdp, &event,
  405. sizeof(event));
  406. out:
  407. mutex_unlock(&mhdp->mbox_mutex);
  408. if (ret < 0)
  409. return ret;
  410. return event;
  411. }
  412. static
  413. int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp,
  414. u8 nlanes, u16 udelay, u8 *lanes_data, u8 *link_status)
  415. {
  416. u8 payload[7];
  417. u8 hdr[5]; /* For DPCD read response header */
  418. u32 addr;
  419. u8 const nregs = 6; /* Registers 0x202-0x207 */
  420. int ret;
  421. if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
  422. DRM_DEV_ERROR(mhdp->dev, "invalid number of lanes: %d\n",
  423. nlanes);
  424. ret = -EINVAL;
  425. goto err_adjust_lt;
  426. }
  427. payload[0] = nlanes;
  428. put_unaligned_be16(udelay, payload + 1);
  429. memcpy(payload + 3, lanes_data, nlanes);
  430. mutex_lock(&mhdp->mbox_mutex);
  431. ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
  432. DPTX_ADJUST_LT,
  433. sizeof(payload), payload);
  434. if (ret)
  435. goto err_adjust_lt;
  436. /* Yes, read the DPCD read command response */
  437. ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_DP_TX,
  438. DPTX_READ_DPCD,
  439. sizeof(hdr) + nregs);
  440. if (ret)
  441. goto err_adjust_lt;
  442. ret = cdns_mhdp_mailbox_read_receive(mhdp, hdr, sizeof(hdr));
  443. if (ret)
  444. goto err_adjust_lt;
  445. addr = get_unaligned_be24(hdr + 2);
  446. if (addr != DP_LANE0_1_STATUS)
  447. goto err_adjust_lt;
  448. ret = cdns_mhdp_mailbox_read_receive(mhdp, link_status, nregs);
  449. err_adjust_lt:
  450. mutex_unlock(&mhdp->mbox_mutex);
  451. if (ret)
  452. DRM_DEV_ERROR(mhdp->dev, "Failed to adjust Link Training.\n");
  453. return ret;
  454. }
  455. /* EOF CDNS MHDP Helpers */
  456. #define FW_NAME "cadence/mhdp8546.bin"
  457. #define CDNS_MHDP_IMEM 0x10000
  458. #define CDNS_DP_TRAINING_PATTERN_4 0x7
  459. #define CDNS_KEEP_ALIVE_TIMEOUT 2000
  460. #ifdef CONFIG_DRM_CDNS_MHDP_J721E
  461. static const struct mhdp_platform_ops mhdp_ti_j721e_ops = {
  462. .init = cdns_mhdp_j721e_init,
  463. .exit = cdns_mhdp_j721e_fini,
  464. .enable = cdns_mhdp_j721e_enable,
  465. .disable = cdns_mhdp_j721e_disable,
  466. };
  467. #endif
  468. static const struct of_device_id mhdp_ids[] = {
  469. { .compatible = "cdns,mhdp8546", },
  470. #ifdef CONFIG_DRM_CDNS_MHDP_J721E
  471. { .compatible = "ti,j721e-mhdp8546", .data = &mhdp_ti_j721e_ops },
  472. #endif
  473. { /* sentinel */ }
  474. };
  475. MODULE_DEVICE_TABLE(of, mhdp_ids);
  476. #define CDNS_LANE_1 BIT(0)
  477. #define CDNS_LANE_2 BIT(1)
  478. #define CDNS_LANE_4 BIT(2)
  479. #define CDNS_VOLT_SWING(x) ((x) & GENMASK(1, 0))
  480. #define CDNS_FORCE_VOLT_SWING BIT(2)
  481. #define CDNS_PRE_EMPHASIS(x) ((x) & GENMASK(1, 0))
  482. #define CDNS_FORCE_PRE_EMPHASIS BIT(2)
  483. #define CDNS_SUPPORT_TPS(x) BIT((x) - 1)
  484. #define CDNS_FAST_LINK_TRAINING BIT(0)
  485. #define CDNS_LANE_MAPPING_TYPE_C_LANE_0(x) ((x) & GENMASK(1, 0))
  486. #define CDNS_LANE_MAPPING_TYPE_C_LANE_1(x) ((x) & GENMASK(3, 2))
  487. #define CDNS_LANE_MAPPING_TYPE_C_LANE_2(x) ((x) & GENMASK(5, 4))
  488. #define CDNS_LANE_MAPPING_TYPE_C_LANE_3(x) ((x) & GENMASK(7, 6))
  489. #define CDNS_LANE_MAPPING_NORMAL 0xe4
  490. #define CDNS_LANE_MAPPING_FLIPPED 0x1b
  491. #define CDNS_DP_MAX_NUM_LANES 4
  492. #define CDNS_DP_TEST_VSC_SDP (1 << 6) /* 1.3+ */
  493. #define CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY (1 << 7)
  494. static unsigned int max_link_rate(struct cdns_mhdp_host host,
  495. struct cdns_mhdp_sink sink)
  496. {
  497. return min(host.link_rate, sink.link_rate);
  498. }
  499. static u8 eq_training_pattern_supported(struct cdns_mhdp_host host,
  500. struct cdns_mhdp_sink sink)
  501. {
  502. return fls(host.pattern_supp & sink.pattern_supp);
  503. }
  504. static int mhdp_fw_activate(const struct firmware *fw,
  505. struct cdns_mhdp_device *mhdp)
  506. {
  507. unsigned int reg;
  508. int ret = 0;
  509. dev_dbg(mhdp->dev, "%s\n", __func__);
  510. if (!fw || !fw->data) {
  511. dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
  512. return -EINVAL;
  513. }
  514. spin_lock(&mhdp->start_lock);
  515. if (mhdp->hw_state != MHDP_HW_INACTIVE) {
  516. spin_unlock(&mhdp->start_lock);
  517. if (mhdp->hw_state != MHDP_HW_STOPPED)
  518. dev_err(mhdp->dev, "%s: Bad HW state: %d\n",
  519. __func__, mhdp->hw_state);
  520. return -EBUSY;
  521. }
  522. mhdp->hw_state = MHDP_HW_LOADING;
  523. spin_unlock(&mhdp->start_lock);
  524. /* Release uCPU reset and stall it. */
  525. writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
  526. memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
  527. /* Leave debug mode, release stall */
  528. writel(0, mhdp->regs + CDNS_APB_CTRL);
  529. /*
  530. * Wait for the KEEP_ALIVE "message" on the first 8 bits.
  531. * Updated each sched "tick" (~2ms)
  532. */
  533. ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
  534. reg & CDNS_KEEP_ALIVE_MASK, 500,
  535. CDNS_KEEP_ALIVE_TIMEOUT);
  536. if (ret) {
  537. dev_err(mhdp->dev,
  538. "device didn't give any life sign: reg %d\n", reg);
  539. goto error;
  540. }
  541. /* Init events to 0 as it's not cleared by FW at boot but on read */
  542. readl(mhdp->regs + CDNS_SW_EVENT0);
  543. readl(mhdp->regs + CDNS_SW_EVENT1);
  544. readl(mhdp->regs + CDNS_SW_EVENT2);
  545. readl(mhdp->regs + CDNS_SW_EVENT3);
  546. /* Activate uCPU */
  547. ret = cdns_mhdp_set_firmware_active(mhdp, true);
  548. if (ret) {
  549. dev_err(mhdp->dev, "%s: Failed to activate FW: %d\n",
  550. __func__, ret);
  551. goto error;
  552. }
  553. spin_lock(&mhdp->start_lock);
  554. mhdp->hw_state = MHDP_HW_READY;
  555. /*
  556. * Here we must keep the lock while enabling the interrupts
  557. * since it would otherwise be possible that interrupt enable
  558. * code is executed after the bridge is detached. The similar
  559. * situation is not possible in attach()/detach() callbacks
  560. * since the hw_state changes from MHDP_HW_READY to
  561. * MHDP_HW_STOPPED happens only due to driver removal when
  562. * bridge should already be detached.
  563. */
  564. if (mhdp->bridge_attached) {
  565. /* enable interrupts */
  566. writel(0, mhdp->regs + CDNS_APB_INT_MASK);
  567. writel(0, mhdp->regs + CDNS_MB_INT_MASK);
  568. }
  569. spin_unlock(&mhdp->start_lock);
  570. dev_dbg(mhdp->dev, "DP FW activated\n");
  571. return 0;
  572. error:
  573. spin_lock(&mhdp->start_lock);
  574. mhdp->hw_state = MHDP_HW_INACTIVE;
  575. spin_unlock(&mhdp->start_lock);
  576. return ret;
  577. }
  578. static void mhdp_fw_cb(const struct firmware *fw, void *context)
  579. {
  580. struct cdns_mhdp_device *mhdp = context;
  581. bool bridge_attached;
  582. int ret;
  583. dev_dbg(mhdp->dev, "firmware callback\n");
  584. ret = mhdp_fw_activate(fw, mhdp);
  585. release_firmware(fw);
  586. if (ret)
  587. return;
  588. /*
  589. * XXX how to make sure the bridge is still attached when
  590. * calling drm_kms_helper_hotplug_event() after releasing
  591. * the lock? We should not hold the spin lock when
  592. * calling drm_kms_helper_hotplug_event() since it may
  593. * cause a dead lock. FB-dev console calls detect from the
  594. * same thread just down the call stack started here.
  595. */
  596. spin_lock(&mhdp->start_lock);
  597. bridge_attached = mhdp->bridge_attached;
  598. spin_unlock(&mhdp->start_lock);
  599. if (bridge_attached)
  600. drm_kms_helper_hotplug_event(mhdp->bridge.dev);
  601. }
  602. static int load_firmware(struct cdns_mhdp_device *mhdp)
  603. {
  604. int ret;
  605. ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
  606. GFP_KERNEL, mhdp, mhdp_fw_cb);
  607. if (ret) {
  608. dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
  609. FW_NAME, ret);
  610. return ret;
  611. }
  612. return 0;
  613. }
  614. static void mhdp_check_link(struct cdns_mhdp_device *mhdp)
  615. {
  616. struct drm_connector *conn = &mhdp->connector;
  617. u8 status[DP_LINK_STATUS_SIZE];
  618. bool hpd_state;
  619. int hpd_event;
  620. int ret;
  621. /* Nothing to check if there is no link */
  622. if (!mhdp->link_up)
  623. return;
  624. hpd_event = cdns_mhdp_read_event(mhdp);
  625. /* Geting event bits failed, bail out */
  626. if (hpd_event < 0) {
  627. dev_warn(mhdp->dev, "%s: read event failed: %d\n",
  628. __func__, hpd_event);
  629. return;
  630. }
  631. hpd_state = !!(hpd_event & DPTX_READ_EVENT_HPD_STATE);
  632. /* No point the check the link if HPD is down (cable is unplugged) */
  633. if (!hpd_state)
  634. return;
  635. /*
  636. * Prevent display reconfiguration between link check and link
  637. * status property setting. We must use the legacy giant-lock
  638. * since drm_connector_set_link_status_property()'s fine
  639. * grained DRM locking implementation is broken.
  640. */
  641. mutex_lock(&conn->dev->mode_config.mutex);
  642. /* Check if the link is still up */
  643. ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
  644. if (ret < 0 || /* If dpcd read fails, assume the link is down too */
  645. !drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) ||
  646. !drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
  647. /* Link is broken, indicate it with the link status property */
  648. drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
  649. mutex_unlock(&conn->dev->mode_config.mutex);
  650. }
  651. static irqreturn_t mhdp_irq_handler(int irq, void *data)
  652. {
  653. struct cdns_mhdp_device *mhdp = (struct cdns_mhdp_device *)data;
  654. u32 mbox_stat, apb_stat, sw_ev0, sw_ev1, sw_ev2, sw_ev3;
  655. bool bridge_attached;
  656. apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
  657. mbox_stat = readl(mhdp->regs + CDNS_MB_INT_STATUS);
  658. sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
  659. sw_ev1 = readl(mhdp->regs + CDNS_SW_EVENT1);
  660. sw_ev2 = readl(mhdp->regs + CDNS_SW_EVENT2);
  661. sw_ev3 = readl(mhdp->regs + CDNS_SW_EVENT3);
  662. //dev_dbg(mhdp->dev, "MHDP IRQ apb %x, mbox %x, sw_ev %x/%x/%x/%x\n", apb_stat, mbox_stat, sw_ev0, sw_ev1, sw_ev2, sw_ev3);
  663. /*
  664. * Calling drm_kms_helper_hotplug_event() when not attached
  665. * to drm device causes an oops because the drm_bridge->dev
  666. * is NULL. See mhdp_fw_cb() comments for details about the
  667. * problems related drm_kms_helper_hotplug_event() call.
  668. */
  669. spin_lock(&mhdp->start_lock);
  670. bridge_attached = mhdp->bridge_attached;
  671. spin_unlock(&mhdp->start_lock);
  672. if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
  673. mhdp_check_link(mhdp);
  674. drm_kms_helper_hotplug_event(mhdp->bridge.dev);
  675. }
  676. return IRQ_HANDLED;
  677. }
  678. static ssize_t mhdp_transfer(struct drm_dp_aux *aux,
  679. struct drm_dp_aux_msg *msg)
  680. {
  681. struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
  682. int ret;
  683. if (msg->request != DP_AUX_NATIVE_WRITE &&
  684. msg->request != DP_AUX_NATIVE_READ)
  685. return -ENOTSUPP;
  686. if (msg->request == DP_AUX_NATIVE_WRITE) {
  687. const u8 *buf = msg->buffer;
  688. int i;
  689. for (i = 0; i < msg->size; ++i) {
  690. ret = cdns_mhdp_dpcd_write(mhdp,
  691. msg->address + i, buf[i]);
  692. if (!ret)
  693. continue;
  694. DRM_DEV_ERROR(mhdp->dev, "Failed to write DPCD\n");
  695. return ret;
  696. }
  697. } else {
  698. ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
  699. msg->buffer, msg->size);
  700. if (ret) {
  701. DRM_DEV_ERROR(mhdp->dev, "Failed to read DPCD\n");
  702. return ret;
  703. }
  704. }
  705. return msg->size;
  706. }
  707. static int cdns_mhdp_get_modes(struct drm_connector *connector)
  708. {
  709. struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
  710. struct edid *edid;
  711. int num_modes;
  712. edid = drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
  713. if (!edid) {
  714. DRM_DEV_ERROR(mhdp->dev, "Failed to read EDID\n");
  715. return 0;
  716. }
  717. drm_connector_update_edid_property(connector, edid);
  718. num_modes = drm_add_edid_modes(connector, edid);
  719. kfree(edid);
  720. /*
  721. * HACK: Warn about unsupported display formats until we deal
  722. * with them correctly.
  723. */
  724. if (connector->display_info.color_formats &&
  725. !(connector->display_info.color_formats &
  726. mhdp->display_fmt.color_format))
  727. dev_warn(mhdp->dev,
  728. "%s: No supported color_format found (0x%08x)\n",
  729. __func__, connector->display_info.color_formats);
  730. if (connector->display_info.bpc &&
  731. connector->display_info.bpc < mhdp->display_fmt.bpc)
  732. dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
  733. __func__, connector->display_info.bpc,
  734. mhdp->display_fmt.bpc);
  735. return num_modes;
  736. }
  737. static int cdns_mhdp_detect(struct drm_connector *conn,
  738. struct drm_modeset_acquire_ctx *ctx,
  739. bool force)
  740. {
  741. struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
  742. bool hw_ready;
  743. int ret;
  744. dev_dbg(mhdp->dev, "%s\n", __func__);
  745. spin_lock(&mhdp->start_lock);
  746. hw_ready = mhdp->hw_state == MHDP_HW_READY;
  747. spin_unlock(&mhdp->start_lock);
  748. if (!hw_ready || WARN_ON(!mhdp->bridge_attached))
  749. return connector_status_disconnected;
  750. ret = cdns_mhdp_get_hpd_status(mhdp);
  751. if (ret > 0) {
  752. mhdp->plugged = true;
  753. return connector_status_connected;
  754. }
  755. if (ret < 0)
  756. dev_err(mhdp->dev, "Failed to obtain HPD state\n");
  757. mhdp->plugged = false;
  758. return connector_status_disconnected;
  759. }
  760. static
  761. bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
  762. const struct drm_display_mode *mode,
  763. int lanes, int rate)
  764. {
  765. u32 max_bw, req_bw, bpp;
  766. bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
  767. req_bw = mode->clock * bpp / 8;
  768. max_bw = lanes * rate;
  769. if (req_bw > max_bw) {
  770. dev_dbg(mhdp->dev, "%s: %s (%u * %u/8 =) %u > %u (= %u * %u)\n",
  771. __func__, mode->name, mode->clock, bpp, req_bw,
  772. max_bw, lanes, rate);
  773. return false;
  774. }
  775. return true;
  776. }
  777. static
  778. enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
  779. struct drm_display_mode *mode)
  780. {
  781. struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
  782. if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->host.lanes_cnt,
  783. mhdp->host.link_rate))
  784. return MODE_CLOCK_HIGH;
  785. return MODE_OK;
  786. }
  787. static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
  788. .detect_ctx = cdns_mhdp_detect,
  789. .get_modes = cdns_mhdp_get_modes,
  790. .mode_valid = cdns_mhdp_mode_valid,
  791. };
  792. static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
  793. .fill_modes = drm_helper_probe_single_connector_modes,
  794. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  795. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  796. .reset = drm_atomic_helper_connector_reset,
  797. .destroy = drm_connector_cleanup,
  798. };
  799. static int cdns_mhdp_attach(struct drm_bridge *bridge)
  800. {
  801. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  802. u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
  803. struct drm_connector *conn = &mhdp->connector;
  804. bool hw_ready;
  805. int ret;
  806. dev_dbg(mhdp->dev, "%s\n", __func__);
  807. if (&mhdp->bridge != bridge)
  808. return -ENODEV;
  809. conn->polled = DRM_CONNECTOR_POLL_HPD;
  810. ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
  811. DRM_MODE_CONNECTOR_DisplayPort);
  812. if (ret) {
  813. dev_err(mhdp->dev, "failed to init connector\n");
  814. return ret;
  815. }
  816. drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
  817. ret = drm_display_info_set_bus_formats(&conn->display_info,
  818. &bus_format, 1);
  819. if (ret)
  820. return ret;
  821. conn->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH;
  822. /*
  823. * HACK: DP is internal to J7 SoC and we need to use DRIVE_POSEDGE
  824. * in the display controller. This is achieved for the time being
  825. * by defining SAMPLE_NEGEDGE here.
  826. */
  827. conn->display_info.bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
  828. DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
  829. ret = drm_connector_attach_encoder(conn, bridge->encoder);
  830. if (ret) {
  831. dev_err(mhdp->dev, "failed to attach connector to encoder\n");
  832. return ret;
  833. }
  834. spin_lock(&mhdp->start_lock);
  835. mhdp->bridge_attached = true;
  836. hw_ready = mhdp->hw_state == MHDP_HW_READY;
  837. spin_unlock(&mhdp->start_lock);
  838. if (hw_ready) {
  839. /* enable interrupts */
  840. writel(0, mhdp->regs + CDNS_APB_INT_MASK);
  841. writel(0, mhdp->regs + CDNS_MB_INT_MASK);
  842. }
  843. //writel(~CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
  844. return 0;
  845. }
  846. static void mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
  847. {
  848. u32 reg32;
  849. u8 i;
  850. union phy_configure_opts phy_cfg;
  851. drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
  852. DP_TRAINING_PATTERN_DISABLE);
  853. /* Reset PHY configuration */
  854. reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
  855. if (!mhdp->host.scrambler)
  856. reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
  857. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
  858. cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
  859. mhdp->sink.enhanced & mhdp->host.enhanced);
  860. cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
  861. CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
  862. drm_dp_link_configure(&mhdp->aux, &mhdp->link);
  863. phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
  864. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  865. for (i = 0; i < 4; i++) {
  866. phy_cfg.dp.voltage[i] = 0;
  867. phy_cfg.dp.pre[i] = 0;
  868. }
  869. phy_cfg.dp.ssc = false;
  870. phy_cfg.dp.set_lanes = true;
  871. phy_cfg.dp.set_rate = true;
  872. phy_cfg.dp.set_voltages = true;
  873. phy_configure(mhdp->phy, &phy_cfg);
  874. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
  875. CDNS_PHY_COMMON_CONFIG |
  876. CDNS_PHY_TRAINING_EN |
  877. CDNS_PHY_TRAINING_TYPE(1) |
  878. CDNS_PHY_SCRAMBLER_BYPASS);
  879. drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
  880. DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
  881. }
  882. static void mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
  883. u8 link_status[DP_LINK_STATUS_SIZE],
  884. u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
  885. union phy_configure_opts *phy_cfg)
  886. {
  887. unsigned int i;
  888. u8 adjust, max_pre_emphasis, max_volt_swing;
  889. u8 set_volt, set_pre;
  890. max_pre_emphasis = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
  891. << DP_TRAIN_PRE_EMPHASIS_SHIFT;
  892. max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
  893. for (i = 0; i < mhdp->link.num_lanes; i++) {
  894. /* Check if Voltage swing and pre-emphasis are within limits */
  895. adjust = drm_dp_get_adjust_request_voltage(link_status, i);
  896. set_volt = min_t(u8, adjust, max_volt_swing);
  897. adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
  898. set_pre = min_t(u8, adjust, max_pre_emphasis) >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
  899. /* Voltage swing level and pre-emphasis level combination is not allowed:
  900. * leaving pre-emphasis as-is, and adjusting voltage swing.
  901. */
  902. if (set_volt + set_pre > 3)
  903. set_volt = 3 - set_pre;
  904. phy_cfg->dp.voltage[i] = set_volt;
  905. lanes_data[i] = set_volt;
  906. if (set_volt == max_volt_swing)
  907. lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
  908. phy_cfg->dp.pre[i] = set_pre;
  909. lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
  910. if (set_pre == (max_pre_emphasis >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
  911. lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
  912. }
  913. }
  914. static void mhdp_set_adjust_request_voltage(
  915. u8 link_status[DP_LINK_STATUS_SIZE], int lane, u8 volt)
  916. {
  917. int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
  918. int s = ((lane & 1) ?
  919. DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
  920. DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
  921. int idx = i - DP_LANE0_1_STATUS;
  922. link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
  923. link_status[idx] |= volt << s;
  924. }
  925. static void mhdp_set_adjust_request_pre_emphasis(
  926. u8 link_status[DP_LINK_STATUS_SIZE], int lane, u8 pre_emphasis)
  927. {
  928. int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
  929. int s = ((lane & 1) ?
  930. DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
  931. DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
  932. int idx = i - DP_LANE0_1_STATUS;
  933. link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
  934. link_status[idx] |= pre_emphasis << s;
  935. }
  936. static void mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
  937. u8 link_status[DP_LINK_STATUS_SIZE])
  938. {
  939. unsigned int i;
  940. u8 volt, pre, max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing),
  941. max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
  942. for (i = 0; i < mhdp->link.num_lanes; i++) {
  943. volt = drm_dp_get_adjust_request_voltage(link_status, i);
  944. pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
  945. if (volt + pre > 3)
  946. mhdp_set_adjust_request_voltage(link_status, i,
  947. 3 - pre);
  948. if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
  949. mhdp_set_adjust_request_voltage(link_status, i,
  950. max_volt);
  951. if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
  952. mhdp_set_adjust_request_pre_emphasis(link_status, i,
  953. max_pre);
  954. }
  955. }
  956. static bool mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
  957. u8 eq_tps,
  958. unsigned int training_interval)
  959. {
  960. u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
  961. u8 link_status[DP_LINK_STATUS_SIZE];
  962. u32 reg32;
  963. union phy_configure_opts phy_cfg;
  964. dev_dbg(mhdp->dev, "Starting EQ phase\n");
  965. /* Enable link training TPS[eq_tps] in PHY */
  966. reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
  967. CDNS_PHY_TRAINING_TYPE(eq_tps);
  968. if (eq_tps != 4)
  969. reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
  970. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
  971. drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
  972. (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
  973. CDNS_DP_TRAINING_PATTERN_4);
  974. drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
  975. do {
  976. mhdp_get_adjust_train(mhdp, link_status, lanes_data, &phy_cfg);
  977. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  978. phy_cfg.dp.ssc = false;
  979. phy_cfg.dp.set_lanes = false;
  980. phy_cfg.dp.set_rate = false;
  981. phy_cfg.dp.set_voltages = true;
  982. phy_configure(mhdp->phy, &phy_cfg);
  983. cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
  984. training_interval, lanes_data, link_status);
  985. if (!drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes))
  986. goto err;
  987. if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
  988. dev_dbg(mhdp->dev, "EQ phase succeeded\n");
  989. return true;
  990. }
  991. fail_counter_short++;
  992. mhdp_adjust_requested_eq(mhdp, link_status);
  993. } while (fail_counter_short < 5);
  994. err:
  995. dev_dbg(mhdp->dev, "EQ phase failed for %d lanes and %d rate\n",
  996. mhdp->link.num_lanes, mhdp->link.rate);
  997. return false;
  998. }
  999. static void mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
  1000. u8 link_status[DP_LINK_STATUS_SIZE],
  1001. u8 *req_volt, u8 *req_pre)
  1002. {
  1003. const u32 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing),
  1004. max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
  1005. unsigned int i;
  1006. for (i = 0; i < mhdp->link.num_lanes; i++) {
  1007. unsigned int val;
  1008. val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
  1009. max_volt : req_volt[i];
  1010. mhdp_set_adjust_request_voltage(link_status, i, val);
  1011. val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
  1012. max_pre : req_pre[i];
  1013. mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
  1014. }
  1015. }
  1016. static void mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
  1017. bool *same_before_adjust, bool *max_swing_reached,
  1018. u8 before_cr[DP_LINK_STATUS_SIZE],
  1019. u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
  1020. u8 *req_pre)
  1021. {
  1022. const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing),
  1023. max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
  1024. bool same_pre, same_volt;
  1025. unsigned int i;
  1026. *same_before_adjust = false;
  1027. *max_swing_reached = false;
  1028. *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
  1029. for (i = 0; i < mhdp->link.num_lanes; i++) {
  1030. u8 tmp;
  1031. tmp = drm_dp_get_adjust_request_voltage(after_cr, i);
  1032. req_volt[i] = min_t(u8, tmp, max_volt);
  1033. tmp = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
  1034. DP_TRAIN_PRE_EMPHASIS_SHIFT;
  1035. req_pre[i] = min_t(u8, tmp, max_pre);
  1036. same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
  1037. req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
  1038. same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
  1039. req_volt[i];
  1040. if (same_pre && same_volt)
  1041. *same_before_adjust = true;
  1042. /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
  1043. if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
  1044. *max_swing_reached = true;
  1045. return;
  1046. }
  1047. }
  1048. }
  1049. static bool mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
  1050. {
  1051. u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
  1052. fail_counter_short = 0, fail_counter_cr_long = 0;
  1053. u8 link_status[DP_LINK_STATUS_SIZE];
  1054. bool cr_done;
  1055. union phy_configure_opts phy_cfg;
  1056. dev_dbg(mhdp->dev, "Starting CR phase\n");
  1057. mhdp_link_training_init(mhdp);
  1058. drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
  1059. do {
  1060. u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
  1061. u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
  1062. bool same_before_adjust, max_swing_reached;
  1063. mhdp_get_adjust_train(mhdp, link_status, lanes_data, &phy_cfg);
  1064. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  1065. phy_cfg.dp.ssc = false;
  1066. phy_cfg.dp.set_lanes = false;
  1067. phy_cfg.dp.set_rate = false;
  1068. phy_cfg.dp.set_voltages = true;
  1069. phy_configure(mhdp->phy, &phy_cfg);
  1070. cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
  1071. lanes_data, link_status);
  1072. mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
  1073. &max_swing_reached, lanes_data, link_status,
  1074. requested_adjust_volt_swing,
  1075. requested_adjust_pre_emphasis);
  1076. if (max_swing_reached) {
  1077. dev_err(mhdp->dev, "CR: max swing reached\n");
  1078. goto err;
  1079. }
  1080. if (cr_done) {
  1081. dev_dbg(mhdp->dev, "CR phase succeeded\n");
  1082. return true;
  1083. }
  1084. /* Not all CR_DONE bits set */
  1085. fail_counter_cr_long++;
  1086. if (same_before_adjust) {
  1087. fail_counter_short++;
  1088. continue;
  1089. }
  1090. fail_counter_short = 0;
  1091. /*
  1092. * Voltage swing/pre-emphasis adjust requested
  1093. * during CR phase
  1094. */
  1095. mhdp_adjust_requested_cr(mhdp, link_status,
  1096. requested_adjust_volt_swing,
  1097. requested_adjust_pre_emphasis);
  1098. } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
  1099. err:
  1100. dev_dbg(mhdp->dev, "CR phase failed for %d lanes and %d rate\n",
  1101. mhdp->link.num_lanes, mhdp->link.rate);
  1102. return false;
  1103. }
  1104. static void lower_link_rate(struct drm_dp_link *link)
  1105. {
  1106. switch (drm_dp_link_rate_to_bw_code(link->rate)) {
  1107. case DP_LINK_BW_2_7:
  1108. link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
  1109. break;
  1110. case DP_LINK_BW_5_4:
  1111. link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
  1112. break;
  1113. case DP_LINK_BW_8_1:
  1114. link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
  1115. break;
  1116. }
  1117. }
  1118. static int mhdp_link_training(struct cdns_mhdp_device *mhdp,
  1119. unsigned int training_interval)
  1120. {
  1121. u32 reg32;
  1122. union phy_configure_opts phy_cfg;
  1123. const u8 eq_tps = eq_training_pattern_supported(mhdp->host, mhdp->sink);
  1124. while (1) {
  1125. if (!mhdp_link_training_cr(mhdp)) {
  1126. if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
  1127. DP_LINK_BW_1_62) {
  1128. dev_dbg(mhdp->dev,
  1129. "Reducing link rate during CR phase\n");
  1130. lower_link_rate(&mhdp->link);
  1131. drm_dp_link_configure(&mhdp->aux, &mhdp->link);
  1132. phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
  1133. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  1134. phy_cfg.dp.ssc = false;
  1135. phy_cfg.dp.set_lanes = false;
  1136. phy_cfg.dp.set_rate = true;
  1137. phy_cfg.dp.set_voltages = false;
  1138. phy_configure(mhdp->phy, &phy_cfg);
  1139. continue;
  1140. } else if (mhdp->link.num_lanes > 1) {
  1141. dev_dbg(mhdp->dev,
  1142. "Reducing lanes number during CR phase\n");
  1143. mhdp->link.num_lanes >>= 1;
  1144. mhdp->link.rate = max_link_rate(mhdp->host,
  1145. mhdp->sink);
  1146. drm_dp_link_configure(&mhdp->aux, &mhdp->link);
  1147. phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
  1148. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  1149. phy_cfg.dp.ssc = false;
  1150. phy_cfg.dp.set_lanes = true;
  1151. phy_cfg.dp.set_rate = false;
  1152. phy_cfg.dp.set_voltages = false;
  1153. phy_configure(mhdp->phy, &phy_cfg);
  1154. continue;
  1155. }
  1156. dev_dbg(mhdp->dev,
  1157. "Link training failed during CR phase\n");
  1158. goto err;
  1159. }
  1160. if (mhdp_link_training_channel_eq(mhdp, eq_tps,
  1161. training_interval))
  1162. break;
  1163. if (mhdp->link.num_lanes > 1) {
  1164. dev_dbg(mhdp->dev,
  1165. "Reducing lanes number during EQ phase\n");
  1166. mhdp->link.num_lanes >>= 1;
  1167. drm_dp_link_configure(&mhdp->aux, &mhdp->link);
  1168. phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
  1169. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  1170. phy_cfg.dp.ssc = false;
  1171. phy_cfg.dp.set_lanes = true;
  1172. phy_cfg.dp.set_rate = false;
  1173. phy_cfg.dp.set_voltages = false;
  1174. phy_configure(mhdp->phy, &phy_cfg);
  1175. continue;
  1176. } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
  1177. DP_LINK_BW_1_62) {
  1178. dev_dbg(mhdp->dev,
  1179. "Reducing link rate during EQ phase\n");
  1180. lower_link_rate(&mhdp->link);
  1181. drm_dp_link_configure(&mhdp->aux, &mhdp->link);
  1182. phy_cfg.dp.link_rate = (mhdp->link.rate / 100);
  1183. phy_cfg.dp.lanes = (mhdp->link.num_lanes);
  1184. phy_cfg.dp.ssc = false;
  1185. phy_cfg.dp.set_lanes = false;
  1186. phy_cfg.dp.set_rate = true;
  1187. phy_cfg.dp.set_voltages = false;
  1188. phy_configure(mhdp->phy, &phy_cfg);
  1189. continue;
  1190. }
  1191. dev_dbg(mhdp->dev, "Link training failed during EQ phase\n");
  1192. goto err;
  1193. }
  1194. dev_dbg(mhdp->dev, "Link training successful\n");
  1195. drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
  1196. mhdp->host.scrambler ? 0 :
  1197. DP_LINK_SCRAMBLING_DISABLE);
  1198. cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32);
  1199. reg32 &= ~GENMASK(1, 0);
  1200. reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
  1201. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
  1202. /* Reset PHY config */
  1203. reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
  1204. if (!mhdp->host.scrambler)
  1205. reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
  1206. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
  1207. return 0;
  1208. err:
  1209. /* Reset PHY config */
  1210. reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
  1211. if (!mhdp->host.scrambler)
  1212. reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
  1213. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
  1214. drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
  1215. DP_TRAINING_PATTERN_DISABLE);
  1216. return -EIO;
  1217. }
  1218. static void cdns_mhdp_disable(struct drm_bridge *bridge)
  1219. {
  1220. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  1221. u32 resp;
  1222. dev_dbg(mhdp->dev, "%s\n", __func__);
  1223. cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
  1224. resp &= ~CDNS_DP_FRAMER_EN;
  1225. resp |= CDNS_DP_NO_VIDEO_MODE;
  1226. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
  1227. mhdp->link_up = false;
  1228. if (mhdp->plugged)
  1229. drm_dp_link_power_down(&mhdp->aux, &mhdp->link);
  1230. /* Disable VIF clock for stream 0 */
  1231. cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
  1232. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
  1233. resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
  1234. if (mhdp->ops && mhdp->ops->disable)
  1235. mhdp->ops->disable(mhdp);
  1236. }
  1237. static u32 get_training_interval_us(struct cdns_mhdp_device *mhdp,
  1238. u32 interval)
  1239. {
  1240. if (interval == 0)
  1241. return 400;
  1242. if (interval < 5)
  1243. return 4000 << (interval - 1);
  1244. dev_err(mhdp->dev,
  1245. "wrong training interval returned by DPCD: %d\n", interval);
  1246. return 0;
  1247. }
  1248. static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
  1249. {
  1250. u32 resp;
  1251. u8 reg0[DP_RECEIVER_CAP_SIZE], amp[2];
  1252. drm_dp_link_probe(&mhdp->aux, &mhdp->link);
  1253. dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
  1254. drm_dp_link_power_up(&mhdp->aux, &mhdp->link);
  1255. /* FIXME (CDNS): do we have to wait for 100ms before going on? */
  1256. mdelay(100);
  1257. mhdp->sink.link_rate = mhdp->link.rate;
  1258. mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
  1259. mhdp->sink.enhanced = !!(mhdp->link.capabilities &
  1260. DP_LINK_CAP_ENHANCED_FRAMING);
  1261. drm_dp_dpcd_read(&mhdp->aux, DP_DPCD_REV, reg0, DP_RECEIVER_CAP_SIZE);
  1262. mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
  1263. if (drm_dp_tps3_supported(reg0))
  1264. mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
  1265. if (drm_dp_tps4_supported(reg0))
  1266. mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
  1267. mhdp->sink.fast_link = !!(reg0[DP_MAX_DOWNSPREAD] &
  1268. DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
  1269. mhdp->link.rate = max_link_rate(mhdp->host, mhdp->sink);
  1270. mhdp->link.num_lanes = min_t(u8, mhdp->sink.lanes_cnt,
  1271. mhdp->host.lanes_cnt & GENMASK(2, 0));
  1272. /* Disable framer for link training */
  1273. cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
  1274. resp &= ~CDNS_DP_FRAMER_EN;
  1275. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
  1276. /* Spread AMP if required, enable 8b/10b coding */
  1277. amp[0] = mhdp->host.ssc ? DP_SPREAD_AMP_0_5 : 0;
  1278. amp[1] = DP_SET_ANSI_8B10B;
  1279. drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
  1280. if (mhdp->host.fast_link & mhdp->sink.fast_link) {
  1281. /* FIXME: implement fastlink */
  1282. dev_err(mhdp->dev, "fastlink not supported\n");
  1283. return -ENOTSUPP;
  1284. } else {
  1285. const u32 interval = reg0[DP_TRAINING_AUX_RD_INTERVAL] &
  1286. DP_TRAINING_AUX_RD_MASK;
  1287. const u32 interval_us = get_training_interval_us(mhdp,
  1288. interval);
  1289. if (!interval_us ||
  1290. mhdp_link_training(mhdp, interval_us)) {
  1291. dev_err(mhdp->dev, "Link training failed. Exiting.\n");
  1292. return -EIO;
  1293. }
  1294. }
  1295. mhdp->link_up = true;
  1296. return 0;
  1297. }
  1298. u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
  1299. {
  1300. u32 bpp;
  1301. if (fmt->y_only)
  1302. return fmt->bpc;
  1303. switch (fmt->color_format) {
  1304. case DRM_COLOR_FORMAT_RGB444:
  1305. case DRM_COLOR_FORMAT_YCRCB444:
  1306. bpp = fmt->bpc * 3;
  1307. break;
  1308. case DRM_COLOR_FORMAT_YCRCB422:
  1309. bpp = fmt->bpc * 2;
  1310. break;
  1311. case DRM_COLOR_FORMAT_YCRCB420:
  1312. bpp = fmt->bpc * 3 / 2;
  1313. break;
  1314. default:
  1315. bpp = fmt->bpc * 3;
  1316. WARN_ON(1);
  1317. }
  1318. return bpp;
  1319. }
  1320. static int cdns_mhdp_sst_enable(struct drm_bridge *bridge)
  1321. {
  1322. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  1323. u32 rate, vs, vs_f, required_bandwidth, available_bandwidth;
  1324. u32 tu_size = 30, line_thresh1, line_thresh2, line_thresh = 0;
  1325. struct drm_display_mode *mode;
  1326. int pxlclock;
  1327. u32 bpp, bpc, pxlfmt;
  1328. pxlfmt = mhdp->display_fmt.color_format;
  1329. bpc = mhdp->display_fmt.bpc;
  1330. mode = &bridge->encoder->crtc->state->mode;
  1331. pxlclock = mode->crtc_clock;
  1332. mhdp->stream_id = 0;
  1333. rate = mhdp->link.rate / 1000;
  1334. bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
  1335. if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
  1336. mhdp->link.rate)) {
  1337. dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
  1338. __func__, mode->name, mhdp->link.num_lanes,
  1339. mhdp->link.rate / 100);
  1340. return -EINVAL;
  1341. }
  1342. /* find optimal tu_size */
  1343. required_bandwidth = pxlclock * bpp / 8;
  1344. available_bandwidth = mhdp->link.num_lanes * rate;
  1345. do {
  1346. tu_size += 2;
  1347. vs_f = tu_size * required_bandwidth / available_bandwidth;
  1348. vs = vs_f / 1000;
  1349. vs_f = vs_f % 1000;
  1350. /*
  1351. * FIXME (CDNS): downspreading?
  1352. * It's unused is what I've been told.
  1353. */
  1354. } while ((vs == 1 || ((vs_f > 850 || vs_f < 100) && vs_f != 0) ||
  1355. tu_size - vs < 2) && tu_size < 64);
  1356. if (vs > 64) {
  1357. dev_err(mhdp->dev,
  1358. "%s: No space for framing %s (%u lanes at %u Mbps)\n",
  1359. __func__, mode->name, mhdp->link.num_lanes,
  1360. mhdp->link.rate / 100);
  1361. return -EINVAL;
  1362. }
  1363. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
  1364. CDNS_DP_FRAMER_TU_VS(vs) |
  1365. CDNS_DP_FRAMER_TU_SIZE(tu_size) |
  1366. CDNS_DP_FRAMER_TU_CNT_RST_EN);
  1367. line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
  1368. line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
  1369. line_thresh = line_thresh1 - line_thresh2 / mhdp->link.num_lanes;
  1370. line_thresh = (line_thresh >> 5) + 2;
  1371. cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
  1372. line_thresh & GENMASK(5, 0));
  1373. cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
  1374. CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
  1375. 0 : tu_size - vs));
  1376. cdns_mhdp_configure_video(bridge);
  1377. return 0;
  1378. }
  1379. void cdns_mhdp_configure_video(struct drm_bridge *bridge)
  1380. {
  1381. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  1382. unsigned int dp_framer_sp = 0, msa_horizontal_1,
  1383. msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
  1384. misc0 = 0, misc1 = 0, pxl_repr,
  1385. front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
  1386. dp_vertical_1;
  1387. struct drm_display_mode *mode;
  1388. u32 bpp, bpc, pxlfmt;
  1389. u32 tmp;
  1390. u8 stream_id = mhdp->stream_id;
  1391. mode = &bridge->encoder->crtc->state->mode;
  1392. pxlfmt = mhdp->display_fmt.color_format;
  1393. bpc = mhdp->display_fmt.bpc;
  1394. /* if YCBCR supported and stream not SD, use ITU709 */
  1395. /* FIXME: handle ITU version with YCBCR420 when supported */
  1396. if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 ||
  1397. pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720)
  1398. misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
  1399. bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
  1400. switch (pxlfmt) {
  1401. case DRM_COLOR_FORMAT_RGB444:
  1402. pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
  1403. misc0 |= DP_COLOR_FORMAT_RGB;
  1404. break;
  1405. case DRM_COLOR_FORMAT_YCRCB444:
  1406. pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
  1407. misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
  1408. break;
  1409. case DRM_COLOR_FORMAT_YCRCB422:
  1410. pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
  1411. misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
  1412. break;
  1413. case DRM_COLOR_FORMAT_YCRCB420:
  1414. pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
  1415. break;
  1416. default:
  1417. pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
  1418. }
  1419. switch (bpc) {
  1420. case 6:
  1421. misc0 |= DP_TEST_BIT_DEPTH_6;
  1422. pxl_repr |= CDNS_DP_FRAMER_6_BPC;
  1423. break;
  1424. case 8:
  1425. misc0 |= DP_TEST_BIT_DEPTH_8;
  1426. pxl_repr |= CDNS_DP_FRAMER_8_BPC;
  1427. break;
  1428. case 10:
  1429. misc0 |= DP_TEST_BIT_DEPTH_10;
  1430. pxl_repr |= CDNS_DP_FRAMER_10_BPC;
  1431. break;
  1432. case 12:
  1433. misc0 |= DP_TEST_BIT_DEPTH_12;
  1434. pxl_repr |= CDNS_DP_FRAMER_12_BPC;
  1435. break;
  1436. case 16:
  1437. misc0 |= DP_TEST_BIT_DEPTH_16;
  1438. pxl_repr |= CDNS_DP_FRAMER_16_BPC;
  1439. break;
  1440. }
  1441. bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
  1442. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  1443. bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
  1444. cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
  1445. bnd_hsync2vsync);
  1446. hsync2vsync_pol_ctrl = 0;
  1447. if (mode->flags & DRM_MODE_FLAG_NHSYNC)
  1448. hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
  1449. if (mode->flags & DRM_MODE_FLAG_NVSYNC)
  1450. hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
  1451. cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
  1452. hsync2vsync_pol_ctrl);
  1453. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
  1454. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  1455. dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
  1456. if (mode->flags & DRM_MODE_FLAG_NHSYNC)
  1457. dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
  1458. if (mode->flags & DRM_MODE_FLAG_NVSYNC)
  1459. dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
  1460. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
  1461. front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
  1462. back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
  1463. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
  1464. CDNS_DP_FRONT_PORCH(front_porch) |
  1465. CDNS_DP_BACK_PORCH(back_porch));
  1466. cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
  1467. mode->crtc_hdisplay * bpp / 8);
  1468. msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
  1469. cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
  1470. CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
  1471. CDNS_DP_MSAH0_HSYNC_START(msa_h0));
  1472. hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
  1473. msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
  1474. CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
  1475. if (mode->flags & DRM_MODE_FLAG_NHSYNC)
  1476. msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
  1477. cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
  1478. msa_horizontal_1);
  1479. msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
  1480. cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
  1481. CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
  1482. CDNS_DP_MSAV0_VSYNC_START(msa_v0));
  1483. vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
  1484. msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
  1485. CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
  1486. if (mode->flags & DRM_MODE_FLAG_NVSYNC)
  1487. msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
  1488. cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
  1489. msa_vertical_1);
  1490. if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
  1491. mode->crtc_vtotal % 2 == 0)
  1492. misc1 = DP_TEST_INTERLACED;
  1493. if (mhdp->display_fmt.y_only)
  1494. misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
  1495. /* FIXME: use VSC SDP for Y420 */
  1496. /* FIXME: (CDNS) no code for Y420 in bare metal test */
  1497. if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420)
  1498. misc1 = CDNS_DP_TEST_VSC_SDP;
  1499. cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
  1500. misc0 | (misc1 << 8));
  1501. cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
  1502. CDNS_DP_H_HSYNC_WIDTH(hsync) |
  1503. CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
  1504. cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
  1505. CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
  1506. CDNS_DP_V0_VSTART(msa_v0));
  1507. dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
  1508. if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
  1509. mode->crtc_vtotal % 2 == 0)
  1510. dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
  1511. cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
  1512. cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
  1513. (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
  1514. CDNS_DP_VB_ID_INTERLACED : 0);
  1515. cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &tmp);
  1516. tmp |= CDNS_DP_FRAMER_EN;
  1517. tmp &= ~CDNS_DP_NO_VIDEO_MODE;
  1518. cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, tmp);
  1519. }
  1520. void cdns_mhdp_enable(struct drm_bridge *bridge)
  1521. {
  1522. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  1523. u32 resp;
  1524. dev_dbg(mhdp->dev, "bridge enable\n");
  1525. if (mhdp->ops && mhdp->ops->enable)
  1526. mhdp->ops->enable(mhdp);
  1527. /* Enable VIF clock for stream 0 */
  1528. cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
  1529. cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
  1530. resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
  1531. if (!mhdp->link_up)
  1532. cdns_mhdp_link_up(mhdp);
  1533. cdns_mhdp_sst_enable(bridge);
  1534. }
  1535. static void cdns_mhdp_detach(struct drm_bridge *bridge)
  1536. {
  1537. struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
  1538. dev_dbg(mhdp->dev, "%s\n", __func__);
  1539. spin_lock(&mhdp->start_lock);
  1540. mhdp->bridge_attached = false;
  1541. spin_unlock(&mhdp->start_lock);
  1542. writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
  1543. writel(~0, mhdp->regs + CDNS_MB_INT_MASK);
  1544. }
  1545. static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
  1546. .enable = cdns_mhdp_enable,
  1547. .disable = cdns_mhdp_disable,
  1548. .attach = cdns_mhdp_attach,
  1549. .detach = cdns_mhdp_detach,
  1550. };
  1551. static int mhdp_probe(struct platform_device *pdev)
  1552. {
  1553. const struct of_device_id *match;
  1554. struct resource *regs;
  1555. struct cdns_mhdp_device *mhdp;
  1556. struct clk *clk;
  1557. int ret;
  1558. unsigned long rate;
  1559. int irq;
  1560. u32 lanes_prop;
  1561. mhdp = devm_kzalloc(&pdev->dev, sizeof(struct cdns_mhdp_device),
  1562. GFP_KERNEL);
  1563. if (!mhdp)
  1564. return -ENOMEM;
  1565. clk = devm_clk_get(&pdev->dev, NULL);
  1566. if (IS_ERR(clk)) {
  1567. dev_err(&pdev->dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
  1568. return PTR_ERR(clk);
  1569. }
  1570. mhdp->clk = clk;
  1571. mhdp->dev = &pdev->dev;
  1572. mutex_init(&mhdp->mbox_mutex);
  1573. spin_lock_init(&mhdp->start_lock);
  1574. dev_set_drvdata(&pdev->dev, mhdp);
  1575. drm_dp_aux_init(&mhdp->aux);
  1576. mhdp->aux.dev = &pdev->dev;
  1577. mhdp->aux.transfer = mhdp_transfer;
  1578. regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1579. mhdp->regs = devm_ioremap_resource(&pdev->dev, regs);
  1580. if (IS_ERR(mhdp->regs))
  1581. return PTR_ERR(mhdp->regs);
  1582. mhdp->phy = devm_phy_get(&pdev->dev, "dpphy");
  1583. if (IS_ERR(mhdp->phy)) {
  1584. dev_err(&pdev->dev, "no PHY configured\n");
  1585. return PTR_ERR(mhdp->phy);
  1586. }
  1587. platform_set_drvdata(pdev, mhdp);
  1588. clk_prepare_enable(clk);
  1589. match = of_match_device(mhdp_ids, &pdev->dev);
  1590. if (!match)
  1591. return -ENODEV;
  1592. mhdp->ops = (struct mhdp_platform_ops *)match->data;
  1593. pm_runtime_enable(&pdev->dev);
  1594. ret = pm_runtime_get_sync(&pdev->dev);
  1595. if (ret < 0) {
  1596. dev_err(&pdev->dev, "pm_runtime_get_sync failed\n");
  1597. pm_runtime_disable(&pdev->dev);
  1598. goto clk_disable;
  1599. }
  1600. if (mhdp->ops && mhdp->ops->init) {
  1601. ret = mhdp->ops->init(mhdp);
  1602. if (ret != 0) {
  1603. dev_err(&pdev->dev, "MHDP platform initialization failed: %d\n",
  1604. ret);
  1605. goto runtime_put;
  1606. }
  1607. }
  1608. rate = clk_get_rate(clk);
  1609. writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
  1610. writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
  1611. dev_dbg(&pdev->dev, "func clk rate %lu Hz\n", rate);
  1612. writel(~0, mhdp->regs + CDNS_MB_INT_MASK);
  1613. writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
  1614. irq = platform_get_irq(pdev, 0);
  1615. ret = devm_request_threaded_irq(mhdp->dev, irq, NULL, mhdp_irq_handler,
  1616. IRQF_ONESHOT, "mhdp8546", mhdp);
  1617. if (ret) {
  1618. dev_err(&pdev->dev, "cannot install IRQ %d\n", irq);
  1619. ret = -EIO;
  1620. goto j721e_fini;
  1621. }
  1622. /* Read source capabilities, based on PHY's device tree properties. */
  1623. ret = device_property_read_u32(&(mhdp->phy->dev), "num_lanes",
  1624. &(lanes_prop));
  1625. if (ret)
  1626. mhdp->host.lanes_cnt = CDNS_LANE_4;
  1627. else
  1628. mhdp->host.lanes_cnt = lanes_prop;
  1629. ret = device_property_read_u32(&(mhdp->phy->dev), "max_bit_rate",
  1630. &(mhdp->host.link_rate));
  1631. if (ret)
  1632. mhdp->host.link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
  1633. else
  1634. /* PHY uses Mb/s, DRM uses tens of kb/s. */
  1635. mhdp->host.link_rate *= 100;
  1636. mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
  1637. mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
  1638. mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
  1639. CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
  1640. CDNS_SUPPORT_TPS(4);
  1641. mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
  1642. mhdp->host.fast_link = false;
  1643. mhdp->host.enhanced = true;
  1644. mhdp->host.scrambler = true;
  1645. mhdp->host.ssc = false;
  1646. /* The only currently supported format */
  1647. mhdp->display_fmt.y_only = false;
  1648. mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
  1649. mhdp->display_fmt.bpc = 8;
  1650. mhdp->bridge.of_node = pdev->dev.of_node;
  1651. mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
  1652. ret = phy_init(mhdp->phy);
  1653. if (ret) {
  1654. dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
  1655. goto runtime_put;
  1656. }
  1657. drm_bridge_add(&mhdp->bridge);
  1658. ret = load_firmware(mhdp);
  1659. if (ret)
  1660. goto phy_exit;
  1661. return 0;
  1662. phy_exit:
  1663. phy_exit(mhdp->phy);
  1664. j721e_fini:
  1665. cdns_mhdp_j721e_fini(mhdp);
  1666. runtime_put:
  1667. pm_runtime_put_sync(&pdev->dev);
  1668. pm_runtime_disable(&pdev->dev);
  1669. clk_disable:
  1670. clk_disable_unprepare(mhdp->clk);
  1671. return ret;
  1672. }
  1673. MODULE_FIRMWARE(FW_NAME);
  1674. static int mhdp_remove(struct platform_device *pdev)
  1675. {
  1676. struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev);
  1677. unsigned int timeout = 10;
  1678. bool stop_fw = false;
  1679. int ret = 0;
  1680. if (mhdp->ops && mhdp->ops->exit)
  1681. mhdp->ops->exit(mhdp);
  1682. drm_bridge_remove(&mhdp->bridge);
  1683. wait_loading:
  1684. spin_lock(&mhdp->start_lock);
  1685. if (mhdp->hw_state == MHDP_HW_LOADING && timeout-- > 0) {
  1686. spin_unlock(&mhdp->start_lock);
  1687. msleep(100);
  1688. goto wait_loading;
  1689. } else if (mhdp->hw_state == MHDP_HW_READY) {
  1690. stop_fw = true;
  1691. timeout = 1; /* We were succesful even if counter reached 0 */
  1692. }
  1693. mhdp->hw_state = MHDP_HW_STOPPED;
  1694. spin_unlock(&mhdp->start_lock);
  1695. if (timeout == 0)
  1696. dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
  1697. __func__);
  1698. if (stop_fw) {
  1699. ret = cdns_mhdp_set_firmware_active(mhdp, false);
  1700. if (ret)
  1701. dev_err(mhdp->dev, "%s: De-activate FW failed: %d\n",
  1702. __func__, ret);
  1703. }
  1704. phy_exit(mhdp->phy);
  1705. cdns_mhdp_j721e_fini(mhdp);
  1706. pm_runtime_put_sync(&pdev->dev);
  1707. pm_runtime_disable(&pdev->dev);
  1708. clk_disable_unprepare(mhdp->clk);
  1709. /* FIXME: check for missing functions */
  1710. return ret;
  1711. }
  1712. static struct platform_driver mhdp_driver = {
  1713. .driver = {
  1714. .name = "cdns-mhdp",
  1715. .of_match_table = of_match_ptr(mhdp_ids),
  1716. },
  1717. .probe = mhdp_probe,
  1718. .remove = mhdp_remove,
  1719. };
  1720. module_platform_driver(mhdp_driver);
  1721. MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
  1722. MODULE_AUTHOR("Przemyslaw Gaj <pgaj@cadence.com>");
  1723. MODULE_AUTHOR("Damian Kos <dkos@cadence.com>");
  1724. MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
  1725. MODULE_DESCRIPTION("Cadence MHDP DP bridge driver");
  1726. MODULE_LICENSE("GPL");
  1727. MODULE_ALIAS("platform:cdns-mhdp");