cdn-dp-core.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author: Chris Zhong <zyw@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drmP.h>
  15. #include <drm/drm_atomic_helper.h>
  16. #include <drm/drm_crtc_helper.h>
  17. #include <drm/drm_dp_helper.h>
  18. #include <drm/drm_edid.h>
  19. #include <drm/drm_of.h>
  20. #include <linux/clk.h>
  21. #include <linux/component.h>
  22. #include <linux/extcon.h>
  23. #include <linux/firmware.h>
  24. #include <linux/regmap.h>
  25. #include <linux/reset.h>
  26. #include <linux/mfd/syscon.h>
  27. #include <linux/phy/phy.h>
  28. #include <sound/hdmi-codec.h>
  29. #include "cdn-dp-core.h"
  30. #include "cdn-dp-reg.h"
  31. #include "rockchip_drm_vop.h"
  32. #define connector_to_dp(c) \
  33. container_of(c, struct cdn_dp_device, connector)
  34. #define encoder_to_dp(c) \
  35. container_of(c, struct cdn_dp_device, encoder)
  36. #define GRF_SOC_CON9 0x6224
  37. #define DP_SEL_VOP_LIT BIT(12)
  38. #define GRF_SOC_CON26 0x6268
  39. #define UPHY_SEL_BIT 3
  40. #define UPHY_SEL_MASK BIT(19)
  41. #define DPTX_HPD_SEL (3 << 12)
  42. #define DPTX_HPD_DEL (2 << 12)
  43. #define DPTX_HPD_SEL_MASK (3 << 28)
  44. #define CDN_FW_TIMEOUT_MS (64 * 1000)
  45. #define CDN_DPCD_TIMEOUT_MS 5000
  46. #define CDN_DP_FIRMWARE "rockchip/dptx.bin"
  47. struct cdn_dp_data {
  48. u8 max_phy;
  49. };
  50. struct cdn_dp_data rk3399_cdn_dp = {
  51. .max_phy = 2,
  52. };
  53. static const struct of_device_id cdn_dp_dt_ids[] = {
  54. { .compatible = "rockchip,rk3399-cdn-dp",
  55. .data = (void *)&rk3399_cdn_dp },
  56. {}
  57. };
  58. MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
  59. static int cdn_dp_grf_write(struct cdn_dp_device *dp,
  60. unsigned int reg, unsigned int val)
  61. {
  62. int ret;
  63. ret = clk_prepare_enable(dp->grf_clk);
  64. if (ret) {
  65. DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
  66. return ret;
  67. }
  68. ret = regmap_write(dp->grf, reg, val);
  69. if (ret) {
  70. DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
  71. return ret;
  72. }
  73. clk_disable_unprepare(dp->grf_clk);
  74. return 0;
  75. }
  76. static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
  77. {
  78. int ret;
  79. unsigned long rate;
  80. ret = clk_prepare_enable(dp->pclk);
  81. if (ret < 0) {
  82. DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
  83. goto err_pclk;
  84. }
  85. ret = clk_prepare_enable(dp->core_clk);
  86. if (ret < 0) {
  87. DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
  88. goto err_core_clk;
  89. }
  90. ret = pm_runtime_get_sync(dp->dev);
  91. if (ret < 0) {
  92. DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
  93. goto err_pm_runtime_get;
  94. }
  95. reset_control_assert(dp->core_rst);
  96. reset_control_assert(dp->dptx_rst);
  97. reset_control_assert(dp->apb_rst);
  98. reset_control_deassert(dp->core_rst);
  99. reset_control_deassert(dp->dptx_rst);
  100. reset_control_deassert(dp->apb_rst);
  101. rate = clk_get_rate(dp->core_clk);
  102. if (!rate) {
  103. DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
  104. ret = -EINVAL;
  105. goto err_set_rate;
  106. }
  107. cdn_dp_set_fw_clk(dp, rate);
  108. cdn_dp_clock_reset(dp);
  109. return 0;
  110. err_set_rate:
  111. pm_runtime_put(dp->dev);
  112. err_pm_runtime_get:
  113. clk_disable_unprepare(dp->core_clk);
  114. err_core_clk:
  115. clk_disable_unprepare(dp->pclk);
  116. err_pclk:
  117. return ret;
  118. }
  119. static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
  120. {
  121. pm_runtime_put_sync(dp->dev);
  122. clk_disable_unprepare(dp->pclk);
  123. clk_disable_unprepare(dp->core_clk);
  124. }
  125. static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
  126. {
  127. struct extcon_dev *edev = port->extcon;
  128. union extcon_property_value property;
  129. int dptx;
  130. u8 lanes;
  131. dptx = extcon_get_state(edev, EXTCON_DISP_DP);
  132. if (dptx > 0) {
  133. extcon_get_property(edev, EXTCON_DISP_DP,
  134. EXTCON_PROP_USB_SS, &property);
  135. if (property.intval)
  136. lanes = 2;
  137. else
  138. lanes = 4;
  139. } else {
  140. lanes = 0;
  141. }
  142. return lanes;
  143. }
  144. static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
  145. {
  146. int ret;
  147. u8 value;
  148. *sink_count = 0;
  149. ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
  150. if (ret)
  151. return ret;
  152. *sink_count = DP_GET_SINK_COUNT(value);
  153. return 0;
  154. }
  155. static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
  156. {
  157. struct cdn_dp_port *port;
  158. int i, lanes;
  159. for (i = 0; i < dp->ports; i++) {
  160. port = dp->port[i];
  161. lanes = cdn_dp_get_port_lanes(port);
  162. if (lanes)
  163. return port;
  164. }
  165. return NULL;
  166. }
  167. static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
  168. {
  169. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
  170. struct cdn_dp_port *port;
  171. u8 sink_count = 0;
  172. if (dp->active_port < 0 || dp->active_port >= dp->ports) {
  173. DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
  174. return false;
  175. }
  176. port = dp->port[dp->active_port];
  177. /*
  178. * Attempt to read sink count, retry in case the sink may not be ready.
  179. *
  180. * Sinks are *supposed* to come up within 1ms from an off state, but
  181. * some docks need more time to power up.
  182. */
  183. while (time_before(jiffies, timeout)) {
  184. if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
  185. return false;
  186. if (!cdn_dp_get_sink_count(dp, &sink_count))
  187. return sink_count ? true : false;
  188. usleep_range(5000, 10000);
  189. }
  190. DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
  191. return false;
  192. }
  193. static enum drm_connector_status
  194. cdn_dp_connector_detect(struct drm_connector *connector, bool force)
  195. {
  196. struct cdn_dp_device *dp = connector_to_dp(connector);
  197. enum drm_connector_status status = connector_status_disconnected;
  198. mutex_lock(&dp->lock);
  199. if (dp->connected)
  200. status = connector_status_connected;
  201. mutex_unlock(&dp->lock);
  202. return status;
  203. }
  204. static void cdn_dp_connector_destroy(struct drm_connector *connector)
  205. {
  206. drm_connector_unregister(connector);
  207. drm_connector_cleanup(connector);
  208. }
  209. static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
  210. .detect = cdn_dp_connector_detect,
  211. .destroy = cdn_dp_connector_destroy,
  212. .fill_modes = drm_helper_probe_single_connector_modes,
  213. .reset = drm_atomic_helper_connector_reset,
  214. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  215. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  216. };
  217. static int cdn_dp_connector_get_modes(struct drm_connector *connector)
  218. {
  219. struct cdn_dp_device *dp = connector_to_dp(connector);
  220. struct edid *edid;
  221. int ret = 0;
  222. mutex_lock(&dp->lock);
  223. edid = dp->edid;
  224. if (edid) {
  225. DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
  226. edid->width_cm, edid->height_cm);
  227. dp->sink_has_audio = drm_detect_monitor_audio(edid);
  228. ret = drm_add_edid_modes(connector, edid);
  229. if (ret) {
  230. drm_mode_connector_update_edid_property(connector,
  231. edid);
  232. drm_edid_to_eld(connector, edid);
  233. }
  234. }
  235. mutex_unlock(&dp->lock);
  236. return ret;
  237. }
  238. static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
  239. struct drm_display_mode *mode)
  240. {
  241. struct cdn_dp_device *dp = connector_to_dp(connector);
  242. struct drm_display_info *display_info = &dp->connector.display_info;
  243. u32 requested, actual, rate, sink_max, source_max = 0;
  244. u8 lanes, bpc;
  245. /* If DP is disconnected, every mode is invalid */
  246. if (!dp->connected)
  247. return MODE_BAD;
  248. switch (display_info->bpc) {
  249. case 10:
  250. bpc = 10;
  251. break;
  252. case 6:
  253. bpc = 6;
  254. break;
  255. default:
  256. bpc = 8;
  257. break;
  258. }
  259. requested = mode->clock * bpc * 3 / 1000;
  260. source_max = dp->lanes;
  261. sink_max = drm_dp_max_lane_count(dp->dpcd);
  262. lanes = min(source_max, sink_max);
  263. source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
  264. sink_max = drm_dp_max_link_rate(dp->dpcd);
  265. rate = min(source_max, sink_max);
  266. actual = rate * lanes / 100;
  267. /* efficiency is about 0.8 */
  268. actual = actual * 8 / 10;
  269. if (requested > actual) {
  270. DRM_DEV_DEBUG_KMS(dp->dev,
  271. "requested=%d, actual=%d, clock=%d\n",
  272. requested, actual, mode->clock);
  273. return MODE_CLOCK_HIGH;
  274. }
  275. return MODE_OK;
  276. }
  277. static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
  278. .get_modes = cdn_dp_connector_get_modes,
  279. .mode_valid = cdn_dp_connector_mode_valid,
  280. };
  281. static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
  282. {
  283. int ret;
  284. const u32 *iram_data, *dram_data;
  285. const struct firmware *fw = dp->fw;
  286. const struct cdn_firmware_header *hdr;
  287. hdr = (struct cdn_firmware_header *)fw->data;
  288. if (fw->size != le32_to_cpu(hdr->size_bytes)) {
  289. DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
  290. return -EINVAL;
  291. }
  292. iram_data = (const u32 *)(fw->data + hdr->header_size);
  293. dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
  294. ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
  295. dram_data, hdr->dram_size);
  296. if (ret)
  297. return ret;
  298. ret = cdn_dp_set_firmware_active(dp, true);
  299. if (ret) {
  300. DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
  301. return ret;
  302. }
  303. return cdn_dp_event_config(dp);
  304. }
  305. static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
  306. {
  307. int ret;
  308. if (!cdn_dp_check_sink_connection(dp))
  309. return -ENODEV;
  310. ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
  311. DP_RECEIVER_CAP_SIZE);
  312. if (ret) {
  313. DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
  314. return ret;
  315. }
  316. kfree(dp->edid);
  317. dp->edid = drm_do_get_edid(&dp->connector,
  318. cdn_dp_get_edid_block, dp);
  319. return 0;
  320. }
  321. static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
  322. {
  323. union extcon_property_value property;
  324. int ret;
  325. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  326. (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK);
  327. if (ret)
  328. return ret;
  329. if (!port->phy_enabled) {
  330. ret = phy_power_on(port->phy);
  331. if (ret) {
  332. DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
  333. ret);
  334. goto err_phy;
  335. }
  336. port->phy_enabled = true;
  337. }
  338. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  339. DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
  340. if (ret) {
  341. DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
  342. goto err_power_on;
  343. }
  344. ret = cdn_dp_get_hpd_status(dp);
  345. if (ret <= 0) {
  346. if (!ret)
  347. DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
  348. goto err_power_on;
  349. }
  350. ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
  351. EXTCON_PROP_USB_TYPEC_POLARITY, &property);
  352. if (ret) {
  353. DRM_DEV_ERROR(dp->dev, "get property failed\n");
  354. goto err_power_on;
  355. }
  356. port->lanes = cdn_dp_get_port_lanes(port);
  357. ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
  358. if (ret) {
  359. DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
  360. ret);
  361. goto err_power_on;
  362. }
  363. dp->active_port = port->id;
  364. return 0;
  365. err_power_on:
  366. if (phy_power_off(port->phy))
  367. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  368. else
  369. port->phy_enabled = false;
  370. err_phy:
  371. cdn_dp_grf_write(dp, GRF_SOC_CON26,
  372. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  373. return ret;
  374. }
  375. static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
  376. struct cdn_dp_port *port)
  377. {
  378. int ret;
  379. if (port->phy_enabled) {
  380. ret = phy_power_off(port->phy);
  381. if (ret) {
  382. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  383. return ret;
  384. }
  385. }
  386. port->phy_enabled = false;
  387. port->lanes = 0;
  388. dp->active_port = -1;
  389. return 0;
  390. }
  391. static int cdn_dp_disable(struct cdn_dp_device *dp)
  392. {
  393. int ret, i;
  394. if (!dp->active)
  395. return 0;
  396. for (i = 0; i < dp->ports; i++)
  397. cdn_dp_disable_phy(dp, dp->port[i]);
  398. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  399. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  400. if (ret) {
  401. DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
  402. ret);
  403. return ret;
  404. }
  405. cdn_dp_set_firmware_active(dp, false);
  406. cdn_dp_clk_disable(dp);
  407. dp->active = false;
  408. dp->link.rate = 0;
  409. dp->link.num_lanes = 0;
  410. if (!dp->connected) {
  411. kfree(dp->edid);
  412. dp->edid = NULL;
  413. }
  414. return 0;
  415. }
  416. static int cdn_dp_enable(struct cdn_dp_device *dp)
  417. {
  418. int ret, i, lanes;
  419. struct cdn_dp_port *port;
  420. port = cdn_dp_connected_port(dp);
  421. if (!port) {
  422. DRM_DEV_ERROR(dp->dev,
  423. "Can't enable without connection\n");
  424. return -ENODEV;
  425. }
  426. if (dp->active)
  427. return 0;
  428. ret = cdn_dp_clk_enable(dp);
  429. if (ret)
  430. return ret;
  431. ret = cdn_dp_firmware_init(dp);
  432. if (ret) {
  433. DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
  434. goto err_clk_disable;
  435. }
  436. /* only enable the port that connected with downstream device */
  437. for (i = port->id; i < dp->ports; i++) {
  438. port = dp->port[i];
  439. lanes = cdn_dp_get_port_lanes(port);
  440. if (lanes) {
  441. ret = cdn_dp_enable_phy(dp, port);
  442. if (ret)
  443. continue;
  444. ret = cdn_dp_get_sink_capability(dp);
  445. if (ret) {
  446. cdn_dp_disable_phy(dp, port);
  447. } else {
  448. dp->active = true;
  449. dp->lanes = port->lanes;
  450. return 0;
  451. }
  452. }
  453. }
  454. err_clk_disable:
  455. cdn_dp_clk_disable(dp);
  456. return ret;
  457. }
  458. static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
  459. struct drm_display_mode *mode,
  460. struct drm_display_mode *adjusted)
  461. {
  462. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  463. struct drm_display_info *display_info = &dp->connector.display_info;
  464. struct video_info *video = &dp->video_info;
  465. switch (display_info->bpc) {
  466. case 10:
  467. video->color_depth = 10;
  468. break;
  469. case 6:
  470. video->color_depth = 6;
  471. break;
  472. default:
  473. video->color_depth = 8;
  474. break;
  475. }
  476. video->color_fmt = PXL_RGB;
  477. video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
  478. video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
  479. memcpy(&dp->mode, adjusted, sizeof(*mode));
  480. }
  481. static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
  482. {
  483. u8 link_status[DP_LINK_STATUS_SIZE];
  484. struct cdn_dp_port *port = cdn_dp_connected_port(dp);
  485. u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
  486. if (!port || !dp->link.rate || !dp->link.num_lanes)
  487. return false;
  488. if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
  489. DP_LINK_STATUS_SIZE)) {
  490. DRM_ERROR("Failed to get link status\n");
  491. return false;
  492. }
  493. /* if link training is requested we should perform it always */
  494. return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
  495. }
  496. static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
  497. {
  498. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  499. int ret, val;
  500. ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
  501. if (ret < 0) {
  502. DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
  503. return;
  504. }
  505. DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
  506. (ret) ? "LIT" : "BIG");
  507. if (ret)
  508. val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
  509. else
  510. val = DP_SEL_VOP_LIT << 16;
  511. ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
  512. if (ret)
  513. return;
  514. mutex_lock(&dp->lock);
  515. ret = cdn_dp_enable(dp);
  516. if (ret) {
  517. DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
  518. ret);
  519. goto out;
  520. }
  521. if (!cdn_dp_check_link_status(dp)) {
  522. ret = cdn_dp_train_link(dp);
  523. if (ret) {
  524. DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
  525. goto out;
  526. }
  527. }
  528. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
  529. if (ret) {
  530. DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
  531. goto out;
  532. }
  533. ret = cdn_dp_config_video(dp);
  534. if (ret) {
  535. DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
  536. goto out;
  537. }
  538. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
  539. if (ret) {
  540. DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
  541. goto out;
  542. }
  543. out:
  544. mutex_unlock(&dp->lock);
  545. }
  546. static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
  547. {
  548. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  549. int ret;
  550. mutex_lock(&dp->lock);
  551. if (dp->active) {
  552. ret = cdn_dp_disable(dp);
  553. if (ret) {
  554. DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
  555. ret);
  556. }
  557. }
  558. mutex_unlock(&dp->lock);
  559. /*
  560. * In the following 2 cases, we need to run the event_work to re-enable
  561. * the DP:
  562. * 1. If there is not just one port device is connected, and remove one
  563. * device from a port, the DP will be disabled here, at this case,
  564. * run the event_work to re-open DP for the other port.
  565. * 2. If re-training or re-config failed, the DP will be disabled here.
  566. * run the event_work to re-connect it.
  567. */
  568. if (!dp->connected && cdn_dp_connected_port(dp))
  569. schedule_work(&dp->event_work);
  570. }
  571. static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
  572. struct drm_crtc_state *crtc_state,
  573. struct drm_connector_state *conn_state)
  574. {
  575. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
  576. s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
  577. s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
  578. return 0;
  579. }
  580. static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
  581. .mode_set = cdn_dp_encoder_mode_set,
  582. .enable = cdn_dp_encoder_enable,
  583. .disable = cdn_dp_encoder_disable,
  584. .atomic_check = cdn_dp_encoder_atomic_check,
  585. };
  586. static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
  587. .destroy = drm_encoder_cleanup,
  588. };
  589. static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
  590. {
  591. struct device *dev = dp->dev;
  592. struct device_node *np = dev->of_node;
  593. struct platform_device *pdev = to_platform_device(dev);
  594. struct resource *res;
  595. dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
  596. if (IS_ERR(dp->grf)) {
  597. DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
  598. return PTR_ERR(dp->grf);
  599. }
  600. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  601. dp->regs = devm_ioremap_resource(dev, res);
  602. if (IS_ERR(dp->regs)) {
  603. DRM_DEV_ERROR(dev, "ioremap reg failed\n");
  604. return PTR_ERR(dp->regs);
  605. }
  606. dp->core_clk = devm_clk_get(dev, "core-clk");
  607. if (IS_ERR(dp->core_clk)) {
  608. DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
  609. return PTR_ERR(dp->core_clk);
  610. }
  611. dp->pclk = devm_clk_get(dev, "pclk");
  612. if (IS_ERR(dp->pclk)) {
  613. DRM_DEV_ERROR(dev, "cannot get pclk\n");
  614. return PTR_ERR(dp->pclk);
  615. }
  616. dp->spdif_clk = devm_clk_get(dev, "spdif");
  617. if (IS_ERR(dp->spdif_clk)) {
  618. DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
  619. return PTR_ERR(dp->spdif_clk);
  620. }
  621. dp->grf_clk = devm_clk_get(dev, "grf");
  622. if (IS_ERR(dp->grf_clk)) {
  623. DRM_DEV_ERROR(dev, "cannot get grf clk\n");
  624. return PTR_ERR(dp->grf_clk);
  625. }
  626. dp->spdif_rst = devm_reset_control_get(dev, "spdif");
  627. if (IS_ERR(dp->spdif_rst)) {
  628. DRM_DEV_ERROR(dev, "no spdif reset control found\n");
  629. return PTR_ERR(dp->spdif_rst);
  630. }
  631. dp->dptx_rst = devm_reset_control_get(dev, "dptx");
  632. if (IS_ERR(dp->dptx_rst)) {
  633. DRM_DEV_ERROR(dev, "no uphy reset control found\n");
  634. return PTR_ERR(dp->dptx_rst);
  635. }
  636. dp->core_rst = devm_reset_control_get(dev, "core");
  637. if (IS_ERR(dp->core_rst)) {
  638. DRM_DEV_ERROR(dev, "no core reset control found\n");
  639. return PTR_ERR(dp->core_rst);
  640. }
  641. dp->apb_rst = devm_reset_control_get(dev, "apb");
  642. if (IS_ERR(dp->apb_rst)) {
  643. DRM_DEV_ERROR(dev, "no apb reset control found\n");
  644. return PTR_ERR(dp->apb_rst);
  645. }
  646. return 0;
  647. }
  648. static int cdn_dp_audio_hw_params(struct device *dev, void *data,
  649. struct hdmi_codec_daifmt *daifmt,
  650. struct hdmi_codec_params *params)
  651. {
  652. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  653. struct audio_info audio = {
  654. .sample_width = params->sample_width,
  655. .sample_rate = params->sample_rate,
  656. .channels = params->channels,
  657. };
  658. int ret;
  659. mutex_lock(&dp->lock);
  660. if (!dp->active) {
  661. ret = -ENODEV;
  662. goto out;
  663. }
  664. switch (daifmt->fmt) {
  665. case HDMI_I2S:
  666. audio.format = AFMT_I2S;
  667. break;
  668. case HDMI_SPDIF:
  669. audio.format = AFMT_SPDIF;
  670. break;
  671. default:
  672. DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
  673. ret = -EINVAL;
  674. goto out;
  675. }
  676. ret = cdn_dp_audio_config(dp, &audio);
  677. if (!ret)
  678. dp->audio_info = audio;
  679. out:
  680. mutex_unlock(&dp->lock);
  681. return ret;
  682. }
  683. static void cdn_dp_audio_shutdown(struct device *dev, void *data)
  684. {
  685. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  686. int ret;
  687. mutex_lock(&dp->lock);
  688. if (!dp->active)
  689. goto out;
  690. ret = cdn_dp_audio_stop(dp, &dp->audio_info);
  691. if (!ret)
  692. dp->audio_info.format = AFMT_UNUSED;
  693. out:
  694. mutex_unlock(&dp->lock);
  695. }
  696. static int cdn_dp_audio_digital_mute(struct device *dev, void *data,
  697. bool enable)
  698. {
  699. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  700. int ret;
  701. mutex_lock(&dp->lock);
  702. if (!dp->active) {
  703. ret = -ENODEV;
  704. goto out;
  705. }
  706. ret = cdn_dp_audio_mute(dp, enable);
  707. out:
  708. mutex_unlock(&dp->lock);
  709. return ret;
  710. }
  711. static int cdn_dp_audio_get_eld(struct device *dev, void *data,
  712. u8 *buf, size_t len)
  713. {
  714. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  715. memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
  716. return 0;
  717. }
  718. static const struct hdmi_codec_ops audio_codec_ops = {
  719. .hw_params = cdn_dp_audio_hw_params,
  720. .audio_shutdown = cdn_dp_audio_shutdown,
  721. .digital_mute = cdn_dp_audio_digital_mute,
  722. .get_eld = cdn_dp_audio_get_eld,
  723. };
  724. static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
  725. struct device *dev)
  726. {
  727. struct hdmi_codec_pdata codec_data = {
  728. .i2s = 1,
  729. .spdif = 1,
  730. .ops = &audio_codec_ops,
  731. .max_i2s_channels = 8,
  732. };
  733. dp->audio_pdev = platform_device_register_data(
  734. dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
  735. &codec_data, sizeof(codec_data));
  736. return PTR_ERR_OR_ZERO(dp->audio_pdev);
  737. }
  738. static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
  739. {
  740. int ret;
  741. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
  742. unsigned long sleep = 1000;
  743. WARN_ON(!mutex_is_locked(&dp->lock));
  744. if (dp->fw_loaded)
  745. return 0;
  746. /* Drop the lock before getting the firmware to avoid blocking boot */
  747. mutex_unlock(&dp->lock);
  748. while (time_before(jiffies, timeout)) {
  749. ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
  750. if (ret == -ENOENT) {
  751. msleep(sleep);
  752. sleep *= 2;
  753. continue;
  754. } else if (ret) {
  755. DRM_DEV_ERROR(dp->dev,
  756. "failed to request firmware: %d\n", ret);
  757. goto out;
  758. }
  759. dp->fw_loaded = true;
  760. ret = 0;
  761. goto out;
  762. }
  763. DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
  764. ret = -ETIMEDOUT;
  765. out:
  766. mutex_lock(&dp->lock);
  767. return ret;
  768. }
  769. static void cdn_dp_pd_event_work(struct work_struct *work)
  770. {
  771. struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
  772. event_work);
  773. struct drm_connector *connector = &dp->connector;
  774. enum drm_connector_status old_status;
  775. int ret;
  776. mutex_lock(&dp->lock);
  777. if (dp->suspended)
  778. goto out;
  779. ret = cdn_dp_request_firmware(dp);
  780. if (ret)
  781. goto out;
  782. dp->connected = true;
  783. /* Not connected, notify userspace to disable the block */
  784. if (!cdn_dp_connected_port(dp)) {
  785. DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
  786. dp->connected = false;
  787. /* Connected but not enabled, enable the block */
  788. } else if (!dp->active) {
  789. DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
  790. ret = cdn_dp_enable(dp);
  791. if (ret) {
  792. DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
  793. dp->connected = false;
  794. }
  795. /* Enabled and connected to a dongle without a sink, notify userspace */
  796. } else if (!cdn_dp_check_sink_connection(dp)) {
  797. DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
  798. dp->connected = false;
  799. /* Enabled and connected with a sink, re-train if requested */
  800. } else if (!cdn_dp_check_link_status(dp)) {
  801. unsigned int rate = dp->link.rate;
  802. unsigned int lanes = dp->link.num_lanes;
  803. struct drm_display_mode *mode = &dp->mode;
  804. DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
  805. ret = cdn_dp_train_link(dp);
  806. if (ret) {
  807. dp->connected = false;
  808. DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
  809. goto out;
  810. }
  811. /* If training result is changed, update the video config */
  812. if (mode->clock &&
  813. (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
  814. ret = cdn_dp_config_video(dp);
  815. if (ret) {
  816. dp->connected = false;
  817. DRM_DEV_ERROR(dp->dev,
  818. "Failed to config video %d\n",
  819. ret);
  820. }
  821. }
  822. }
  823. out:
  824. mutex_unlock(&dp->lock);
  825. old_status = connector->status;
  826. connector->status = connector->funcs->detect(connector, false);
  827. if (old_status != connector->status)
  828. drm_kms_helper_hotplug_event(dp->drm_dev);
  829. }
  830. static int cdn_dp_pd_event(struct notifier_block *nb,
  831. unsigned long event, void *priv)
  832. {
  833. struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
  834. event_nb);
  835. struct cdn_dp_device *dp = port->dp;
  836. /*
  837. * It would be nice to be able to just do the work inline right here.
  838. * However, we need to make a bunch of calls that might sleep in order
  839. * to turn on the block/phy, so use a worker instead.
  840. */
  841. schedule_work(&dp->event_work);
  842. return NOTIFY_DONE;
  843. }
  844. static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
  845. {
  846. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  847. struct drm_encoder *encoder;
  848. struct drm_connector *connector;
  849. struct cdn_dp_port *port;
  850. struct drm_device *drm_dev = data;
  851. int ret, i;
  852. ret = cdn_dp_parse_dt(dp);
  853. if (ret < 0)
  854. return ret;
  855. dp->drm_dev = drm_dev;
  856. dp->connected = false;
  857. dp->active = false;
  858. dp->active_port = -1;
  859. dp->fw_loaded = false;
  860. INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
  861. encoder = &dp->encoder;
  862. encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
  863. dev->of_node);
  864. DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
  865. ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
  866. DRM_MODE_ENCODER_TMDS, NULL);
  867. if (ret) {
  868. DRM_ERROR("failed to initialize encoder with drm\n");
  869. return ret;
  870. }
  871. drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
  872. connector = &dp->connector;
  873. connector->polled = DRM_CONNECTOR_POLL_HPD;
  874. connector->dpms = DRM_MODE_DPMS_OFF;
  875. ret = drm_connector_init(drm_dev, connector,
  876. &cdn_dp_atomic_connector_funcs,
  877. DRM_MODE_CONNECTOR_DisplayPort);
  878. if (ret) {
  879. DRM_ERROR("failed to initialize connector with drm\n");
  880. goto err_free_encoder;
  881. }
  882. drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
  883. ret = drm_mode_connector_attach_encoder(connector, encoder);
  884. if (ret) {
  885. DRM_ERROR("failed to attach connector and encoder\n");
  886. goto err_free_connector;
  887. }
  888. for (i = 0; i < dp->ports; i++) {
  889. port = dp->port[i];
  890. port->event_nb.notifier_call = cdn_dp_pd_event;
  891. ret = devm_extcon_register_notifier(dp->dev, port->extcon,
  892. EXTCON_DISP_DP,
  893. &port->event_nb);
  894. if (ret) {
  895. DRM_DEV_ERROR(dev,
  896. "register EXTCON_DISP_DP notifier err\n");
  897. goto err_free_connector;
  898. }
  899. }
  900. pm_runtime_enable(dev);
  901. schedule_work(&dp->event_work);
  902. return 0;
  903. err_free_connector:
  904. drm_connector_cleanup(connector);
  905. err_free_encoder:
  906. drm_encoder_cleanup(encoder);
  907. return ret;
  908. }
  909. static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
  910. {
  911. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  912. struct drm_encoder *encoder = &dp->encoder;
  913. struct drm_connector *connector = &dp->connector;
  914. cancel_work_sync(&dp->event_work);
  915. cdn_dp_encoder_disable(encoder);
  916. encoder->funcs->destroy(encoder);
  917. connector->funcs->destroy(connector);
  918. pm_runtime_disable(dev);
  919. if (dp->fw_loaded)
  920. release_firmware(dp->fw);
  921. kfree(dp->edid);
  922. dp->edid = NULL;
  923. }
  924. static const struct component_ops cdn_dp_component_ops = {
  925. .bind = cdn_dp_bind,
  926. .unbind = cdn_dp_unbind,
  927. };
  928. int cdn_dp_suspend(struct device *dev)
  929. {
  930. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  931. int ret = 0;
  932. mutex_lock(&dp->lock);
  933. if (dp->active)
  934. ret = cdn_dp_disable(dp);
  935. dp->suspended = true;
  936. mutex_unlock(&dp->lock);
  937. return ret;
  938. }
  939. int cdn_dp_resume(struct device *dev)
  940. {
  941. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  942. mutex_lock(&dp->lock);
  943. dp->suspended = false;
  944. if (dp->fw_loaded)
  945. schedule_work(&dp->event_work);
  946. mutex_unlock(&dp->lock);
  947. return 0;
  948. }
  949. static int cdn_dp_probe(struct platform_device *pdev)
  950. {
  951. struct device *dev = &pdev->dev;
  952. const struct of_device_id *match;
  953. struct cdn_dp_data *dp_data;
  954. struct cdn_dp_port *port;
  955. struct cdn_dp_device *dp;
  956. struct extcon_dev *extcon;
  957. struct phy *phy;
  958. int i;
  959. dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
  960. if (!dp)
  961. return -ENOMEM;
  962. dp->dev = dev;
  963. match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
  964. dp_data = (struct cdn_dp_data *)match->data;
  965. for (i = 0; i < dp_data->max_phy; i++) {
  966. extcon = extcon_get_edev_by_phandle(dev, i);
  967. phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
  968. if (PTR_ERR(extcon) == -EPROBE_DEFER ||
  969. PTR_ERR(phy) == -EPROBE_DEFER)
  970. return -EPROBE_DEFER;
  971. if (IS_ERR(extcon) || IS_ERR(phy))
  972. continue;
  973. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  974. if (!port)
  975. return -ENOMEM;
  976. port->extcon = extcon;
  977. port->phy = phy;
  978. port->dp = dp;
  979. port->id = i;
  980. dp->port[dp->ports++] = port;
  981. }
  982. if (!dp->ports) {
  983. DRM_DEV_ERROR(dev, "missing extcon or phy\n");
  984. return -EINVAL;
  985. }
  986. mutex_init(&dp->lock);
  987. dev_set_drvdata(dev, dp);
  988. cdn_dp_audio_codec_init(dp, dev);
  989. return component_add(dev, &cdn_dp_component_ops);
  990. }
  991. static int cdn_dp_remove(struct platform_device *pdev)
  992. {
  993. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  994. platform_device_unregister(dp->audio_pdev);
  995. cdn_dp_suspend(dp->dev);
  996. component_del(&pdev->dev, &cdn_dp_component_ops);
  997. return 0;
  998. }
  999. static void cdn_dp_shutdown(struct platform_device *pdev)
  1000. {
  1001. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  1002. cdn_dp_suspend(dp->dev);
  1003. }
  1004. static const struct dev_pm_ops cdn_dp_pm_ops = {
  1005. SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
  1006. cdn_dp_resume)
  1007. };
  1008. struct platform_driver cdn_dp_driver = {
  1009. .probe = cdn_dp_probe,
  1010. .remove = cdn_dp_remove,
  1011. .shutdown = cdn_dp_shutdown,
  1012. .driver = {
  1013. .name = "cdn-dp",
  1014. .owner = THIS_MODULE,
  1015. .of_match_table = of_match_ptr(cdn_dp_dt_ids),
  1016. .pm = &cdn_dp_pm_ops,
  1017. },
  1018. };