cdn-dp-core.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author: Chris Zhong <zyw@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drmP.h>
  15. #include <drm/drm_atomic_helper.h>
  16. #include <drm/drm_crtc_helper.h>
  17. #include <drm/drm_dp_helper.h>
  18. #include <drm/drm_edid.h>
  19. #include <drm/drm_of.h>
  20. #include <linux/clk.h>
  21. #include <linux/component.h>
  22. #include <linux/extcon.h>
  23. #include <linux/firmware.h>
  24. #include <linux/regmap.h>
  25. #include <linux/reset.h>
  26. #include <linux/mfd/syscon.h>
  27. #include <linux/phy/phy.h>
  28. #include <sound/hdmi-codec.h>
  29. #include "cdn-dp-core.h"
  30. #include "cdn-dp-reg.h"
  31. #include "rockchip_drm_vop.h"
  32. #define connector_to_dp(c) \
  33. container_of(c, struct cdn_dp_device, connector)
  34. #define encoder_to_dp(c) \
  35. container_of(c, struct cdn_dp_device, encoder)
  36. #define GRF_SOC_CON9 0x6224
  37. #define DP_SEL_VOP_LIT BIT(12)
  38. #define GRF_SOC_CON26 0x6268
  39. #define DPTX_HPD_SEL (3 << 12)
  40. #define DPTX_HPD_DEL (2 << 12)
  41. #define DPTX_HPD_SEL_MASK (3 << 28)
  42. #define CDN_FW_TIMEOUT_MS (64 * 1000)
  43. #define CDN_DPCD_TIMEOUT_MS 5000
  44. #define CDN_DP_FIRMWARE "rockchip/dptx.bin"
  45. struct cdn_dp_data {
  46. u8 max_phy;
  47. };
  48. struct cdn_dp_data rk3399_cdn_dp = {
  49. .max_phy = 2,
  50. };
  51. static const struct of_device_id cdn_dp_dt_ids[] = {
  52. { .compatible = "rockchip,rk3399-cdn-dp",
  53. .data = (void *)&rk3399_cdn_dp },
  54. {}
  55. };
  56. MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
  57. static int cdn_dp_grf_write(struct cdn_dp_device *dp,
  58. unsigned int reg, unsigned int val)
  59. {
  60. int ret;
  61. ret = clk_prepare_enable(dp->grf_clk);
  62. if (ret) {
  63. DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
  64. return ret;
  65. }
  66. ret = regmap_write(dp->grf, reg, val);
  67. if (ret) {
  68. DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
  69. return ret;
  70. }
  71. clk_disable_unprepare(dp->grf_clk);
  72. return 0;
  73. }
  74. static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
  75. {
  76. int ret;
  77. unsigned long rate;
  78. ret = clk_prepare_enable(dp->pclk);
  79. if (ret < 0) {
  80. DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
  81. goto err_pclk;
  82. }
  83. ret = clk_prepare_enable(dp->core_clk);
  84. if (ret < 0) {
  85. DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
  86. goto err_core_clk;
  87. }
  88. ret = pm_runtime_get_sync(dp->dev);
  89. if (ret < 0) {
  90. DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
  91. goto err_pm_runtime_get;
  92. }
  93. reset_control_assert(dp->core_rst);
  94. reset_control_assert(dp->dptx_rst);
  95. reset_control_assert(dp->apb_rst);
  96. reset_control_deassert(dp->core_rst);
  97. reset_control_deassert(dp->dptx_rst);
  98. reset_control_deassert(dp->apb_rst);
  99. rate = clk_get_rate(dp->core_clk);
  100. if (!rate) {
  101. DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
  102. ret = -EINVAL;
  103. goto err_set_rate;
  104. }
  105. cdn_dp_set_fw_clk(dp, rate);
  106. cdn_dp_clock_reset(dp);
  107. return 0;
  108. err_set_rate:
  109. pm_runtime_put(dp->dev);
  110. err_pm_runtime_get:
  111. clk_disable_unprepare(dp->core_clk);
  112. err_core_clk:
  113. clk_disable_unprepare(dp->pclk);
  114. err_pclk:
  115. return ret;
  116. }
  117. static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
  118. {
  119. pm_runtime_put_sync(dp->dev);
  120. clk_disable_unprepare(dp->pclk);
  121. clk_disable_unprepare(dp->core_clk);
  122. }
  123. static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
  124. {
  125. struct extcon_dev *edev = port->extcon;
  126. union extcon_property_value property;
  127. int dptx;
  128. u8 lanes;
  129. dptx = extcon_get_state(edev, EXTCON_DISP_DP);
  130. if (dptx > 0) {
  131. extcon_get_property(edev, EXTCON_DISP_DP,
  132. EXTCON_PROP_USB_SS, &property);
  133. if (property.intval)
  134. lanes = 2;
  135. else
  136. lanes = 4;
  137. } else {
  138. lanes = 0;
  139. }
  140. return lanes;
  141. }
  142. static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
  143. {
  144. int ret;
  145. u8 value;
  146. *sink_count = 0;
  147. ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
  148. if (ret)
  149. return ret;
  150. *sink_count = DP_GET_SINK_COUNT(value);
  151. return 0;
  152. }
  153. static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
  154. {
  155. struct cdn_dp_port *port;
  156. int i, lanes;
  157. for (i = 0; i < dp->ports; i++) {
  158. port = dp->port[i];
  159. lanes = cdn_dp_get_port_lanes(port);
  160. if (lanes)
  161. return port;
  162. }
  163. return NULL;
  164. }
  165. static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
  166. {
  167. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
  168. struct cdn_dp_port *port;
  169. u8 sink_count = 0;
  170. if (dp->active_port < 0 || dp->active_port >= dp->ports) {
  171. DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
  172. return false;
  173. }
  174. port = dp->port[dp->active_port];
  175. /*
  176. * Attempt to read sink count, retry in case the sink may not be ready.
  177. *
  178. * Sinks are *supposed* to come up within 1ms from an off state, but
  179. * some docks need more time to power up.
  180. */
  181. while (time_before(jiffies, timeout)) {
  182. if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
  183. return false;
  184. if (!cdn_dp_get_sink_count(dp, &sink_count))
  185. return sink_count ? true : false;
  186. usleep_range(5000, 10000);
  187. }
  188. DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
  189. return false;
  190. }
  191. static enum drm_connector_status
  192. cdn_dp_connector_detect(struct drm_connector *connector, bool force)
  193. {
  194. struct cdn_dp_device *dp = connector_to_dp(connector);
  195. enum drm_connector_status status = connector_status_disconnected;
  196. mutex_lock(&dp->lock);
  197. if (dp->connected)
  198. status = connector_status_connected;
  199. mutex_unlock(&dp->lock);
  200. return status;
  201. }
  202. static void cdn_dp_connector_destroy(struct drm_connector *connector)
  203. {
  204. drm_connector_unregister(connector);
  205. drm_connector_cleanup(connector);
  206. }
  207. static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
  208. .detect = cdn_dp_connector_detect,
  209. .destroy = cdn_dp_connector_destroy,
  210. .fill_modes = drm_helper_probe_single_connector_modes,
  211. .reset = drm_atomic_helper_connector_reset,
  212. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  213. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  214. };
  215. static int cdn_dp_connector_get_modes(struct drm_connector *connector)
  216. {
  217. struct cdn_dp_device *dp = connector_to_dp(connector);
  218. struct edid *edid;
  219. int ret = 0;
  220. mutex_lock(&dp->lock);
  221. edid = dp->edid;
  222. if (edid) {
  223. DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
  224. edid->width_cm, edid->height_cm);
  225. dp->sink_has_audio = drm_detect_monitor_audio(edid);
  226. ret = drm_add_edid_modes(connector, edid);
  227. if (ret)
  228. drm_connector_update_edid_property(connector,
  229. edid);
  230. }
  231. mutex_unlock(&dp->lock);
  232. return ret;
  233. }
  234. static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
  235. struct drm_display_mode *mode)
  236. {
  237. struct cdn_dp_device *dp = connector_to_dp(connector);
  238. struct drm_display_info *display_info = &dp->connector.display_info;
  239. u32 requested, actual, rate, sink_max, source_max = 0;
  240. u8 lanes, bpc;
  241. /* If DP is disconnected, every mode is invalid */
  242. if (!dp->connected)
  243. return MODE_BAD;
  244. switch (display_info->bpc) {
  245. case 10:
  246. bpc = 10;
  247. break;
  248. case 6:
  249. bpc = 6;
  250. break;
  251. default:
  252. bpc = 8;
  253. break;
  254. }
  255. requested = mode->clock * bpc * 3 / 1000;
  256. source_max = dp->lanes;
  257. sink_max = drm_dp_max_lane_count(dp->dpcd);
  258. lanes = min(source_max, sink_max);
  259. source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
  260. sink_max = drm_dp_max_link_rate(dp->dpcd);
  261. rate = min(source_max, sink_max);
  262. actual = rate * lanes / 100;
  263. /* efficiency is about 0.8 */
  264. actual = actual * 8 / 10;
  265. if (requested > actual) {
  266. DRM_DEV_DEBUG_KMS(dp->dev,
  267. "requested=%d, actual=%d, clock=%d\n",
  268. requested, actual, mode->clock);
  269. return MODE_CLOCK_HIGH;
  270. }
  271. return MODE_OK;
  272. }
  273. static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
  274. .get_modes = cdn_dp_connector_get_modes,
  275. .mode_valid = cdn_dp_connector_mode_valid,
  276. };
  277. static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
  278. {
  279. int ret;
  280. const u32 *iram_data, *dram_data;
  281. const struct firmware *fw = dp->fw;
  282. const struct cdn_firmware_header *hdr;
  283. hdr = (struct cdn_firmware_header *)fw->data;
  284. if (fw->size != le32_to_cpu(hdr->size_bytes)) {
  285. DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
  286. return -EINVAL;
  287. }
  288. iram_data = (const u32 *)(fw->data + hdr->header_size);
  289. dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
  290. ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
  291. dram_data, hdr->dram_size);
  292. if (ret)
  293. return ret;
  294. ret = cdn_dp_set_firmware_active(dp, true);
  295. if (ret) {
  296. DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
  297. return ret;
  298. }
  299. return cdn_dp_event_config(dp);
  300. }
  301. static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
  302. {
  303. int ret;
  304. if (!cdn_dp_check_sink_connection(dp))
  305. return -ENODEV;
  306. ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
  307. DP_RECEIVER_CAP_SIZE);
  308. if (ret) {
  309. DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
  310. return ret;
  311. }
  312. kfree(dp->edid);
  313. dp->edid = drm_do_get_edid(&dp->connector,
  314. cdn_dp_get_edid_block, dp);
  315. return 0;
  316. }
  317. static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
  318. {
  319. union extcon_property_value property;
  320. int ret;
  321. if (!port->phy_enabled) {
  322. ret = phy_power_on(port->phy);
  323. if (ret) {
  324. DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
  325. ret);
  326. goto err_phy;
  327. }
  328. port->phy_enabled = true;
  329. }
  330. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  331. DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
  332. if (ret) {
  333. DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
  334. goto err_power_on;
  335. }
  336. ret = cdn_dp_get_hpd_status(dp);
  337. if (ret <= 0) {
  338. if (!ret)
  339. DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
  340. goto err_power_on;
  341. }
  342. ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
  343. EXTCON_PROP_USB_TYPEC_POLARITY, &property);
  344. if (ret) {
  345. DRM_DEV_ERROR(dp->dev, "get property failed\n");
  346. goto err_power_on;
  347. }
  348. port->lanes = cdn_dp_get_port_lanes(port);
  349. ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
  350. if (ret) {
  351. DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
  352. ret);
  353. goto err_power_on;
  354. }
  355. dp->active_port = port->id;
  356. return 0;
  357. err_power_on:
  358. if (phy_power_off(port->phy))
  359. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  360. else
  361. port->phy_enabled = false;
  362. err_phy:
  363. cdn_dp_grf_write(dp, GRF_SOC_CON26,
  364. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  365. return ret;
  366. }
  367. static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
  368. struct cdn_dp_port *port)
  369. {
  370. int ret;
  371. if (port->phy_enabled) {
  372. ret = phy_power_off(port->phy);
  373. if (ret) {
  374. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  375. return ret;
  376. }
  377. }
  378. port->phy_enabled = false;
  379. port->lanes = 0;
  380. dp->active_port = -1;
  381. return 0;
  382. }
  383. static int cdn_dp_disable(struct cdn_dp_device *dp)
  384. {
  385. int ret, i;
  386. if (!dp->active)
  387. return 0;
  388. for (i = 0; i < dp->ports; i++)
  389. cdn_dp_disable_phy(dp, dp->port[i]);
  390. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  391. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  392. if (ret) {
  393. DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
  394. ret);
  395. return ret;
  396. }
  397. cdn_dp_set_firmware_active(dp, false);
  398. cdn_dp_clk_disable(dp);
  399. dp->active = false;
  400. dp->link.rate = 0;
  401. dp->link.num_lanes = 0;
  402. if (!dp->connected) {
  403. kfree(dp->edid);
  404. dp->edid = NULL;
  405. }
  406. return 0;
  407. }
  408. static int cdn_dp_enable(struct cdn_dp_device *dp)
  409. {
  410. int ret, i, lanes;
  411. struct cdn_dp_port *port;
  412. port = cdn_dp_connected_port(dp);
  413. if (!port) {
  414. DRM_DEV_ERROR(dp->dev,
  415. "Can't enable without connection\n");
  416. return -ENODEV;
  417. }
  418. if (dp->active)
  419. return 0;
  420. ret = cdn_dp_clk_enable(dp);
  421. if (ret)
  422. return ret;
  423. ret = cdn_dp_firmware_init(dp);
  424. if (ret) {
  425. DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
  426. goto err_clk_disable;
  427. }
  428. /* only enable the port that connected with downstream device */
  429. for (i = port->id; i < dp->ports; i++) {
  430. port = dp->port[i];
  431. lanes = cdn_dp_get_port_lanes(port);
  432. if (lanes) {
  433. ret = cdn_dp_enable_phy(dp, port);
  434. if (ret)
  435. continue;
  436. ret = cdn_dp_get_sink_capability(dp);
  437. if (ret) {
  438. cdn_dp_disable_phy(dp, port);
  439. } else {
  440. dp->active = true;
  441. dp->lanes = port->lanes;
  442. return 0;
  443. }
  444. }
  445. }
  446. err_clk_disable:
  447. cdn_dp_clk_disable(dp);
  448. return ret;
  449. }
  450. static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
  451. struct drm_display_mode *mode,
  452. struct drm_display_mode *adjusted)
  453. {
  454. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  455. struct drm_display_info *display_info = &dp->connector.display_info;
  456. struct video_info *video = &dp->video_info;
  457. switch (display_info->bpc) {
  458. case 10:
  459. video->color_depth = 10;
  460. break;
  461. case 6:
  462. video->color_depth = 6;
  463. break;
  464. default:
  465. video->color_depth = 8;
  466. break;
  467. }
  468. video->color_fmt = PXL_RGB;
  469. video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
  470. video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
  471. memcpy(&dp->mode, adjusted, sizeof(*mode));
  472. }
  473. static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
  474. {
  475. u8 link_status[DP_LINK_STATUS_SIZE];
  476. struct cdn_dp_port *port = cdn_dp_connected_port(dp);
  477. u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
  478. if (!port || !dp->link.rate || !dp->link.num_lanes)
  479. return false;
  480. if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
  481. DP_LINK_STATUS_SIZE)) {
  482. DRM_ERROR("Failed to get link status\n");
  483. return false;
  484. }
  485. /* if link training is requested we should perform it always */
  486. return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
  487. }
  488. static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
  489. {
  490. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  491. int ret, val;
  492. ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
  493. if (ret < 0) {
  494. DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
  495. return;
  496. }
  497. DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
  498. (ret) ? "LIT" : "BIG");
  499. if (ret)
  500. val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
  501. else
  502. val = DP_SEL_VOP_LIT << 16;
  503. ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
  504. if (ret)
  505. return;
  506. mutex_lock(&dp->lock);
  507. ret = cdn_dp_enable(dp);
  508. if (ret) {
  509. DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
  510. ret);
  511. goto out;
  512. }
  513. if (!cdn_dp_check_link_status(dp)) {
  514. ret = cdn_dp_train_link(dp);
  515. if (ret) {
  516. DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
  517. goto out;
  518. }
  519. }
  520. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
  521. if (ret) {
  522. DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
  523. goto out;
  524. }
  525. ret = cdn_dp_config_video(dp);
  526. if (ret) {
  527. DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
  528. goto out;
  529. }
  530. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
  531. if (ret) {
  532. DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
  533. goto out;
  534. }
  535. out:
  536. mutex_unlock(&dp->lock);
  537. }
  538. static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
  539. {
  540. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  541. int ret;
  542. mutex_lock(&dp->lock);
  543. if (dp->active) {
  544. ret = cdn_dp_disable(dp);
  545. if (ret) {
  546. DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
  547. ret);
  548. }
  549. }
  550. mutex_unlock(&dp->lock);
  551. /*
  552. * In the following 2 cases, we need to run the event_work to re-enable
  553. * the DP:
  554. * 1. If there is not just one port device is connected, and remove one
  555. * device from a port, the DP will be disabled here, at this case,
  556. * run the event_work to re-open DP for the other port.
  557. * 2. If re-training or re-config failed, the DP will be disabled here.
  558. * run the event_work to re-connect it.
  559. */
  560. if (!dp->connected && cdn_dp_connected_port(dp))
  561. schedule_work(&dp->event_work);
  562. }
  563. static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
  564. struct drm_crtc_state *crtc_state,
  565. struct drm_connector_state *conn_state)
  566. {
  567. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
  568. s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
  569. s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
  570. return 0;
  571. }
  572. static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
  573. .mode_set = cdn_dp_encoder_mode_set,
  574. .enable = cdn_dp_encoder_enable,
  575. .disable = cdn_dp_encoder_disable,
  576. .atomic_check = cdn_dp_encoder_atomic_check,
  577. };
  578. static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
  579. .destroy = drm_encoder_cleanup,
  580. };
  581. static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
  582. {
  583. struct device *dev = dp->dev;
  584. struct device_node *np = dev->of_node;
  585. struct platform_device *pdev = to_platform_device(dev);
  586. struct resource *res;
  587. dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
  588. if (IS_ERR(dp->grf)) {
  589. DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
  590. return PTR_ERR(dp->grf);
  591. }
  592. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  593. dp->regs = devm_ioremap_resource(dev, res);
  594. if (IS_ERR(dp->regs)) {
  595. DRM_DEV_ERROR(dev, "ioremap reg failed\n");
  596. return PTR_ERR(dp->regs);
  597. }
  598. dp->core_clk = devm_clk_get(dev, "core-clk");
  599. if (IS_ERR(dp->core_clk)) {
  600. DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
  601. return PTR_ERR(dp->core_clk);
  602. }
  603. dp->pclk = devm_clk_get(dev, "pclk");
  604. if (IS_ERR(dp->pclk)) {
  605. DRM_DEV_ERROR(dev, "cannot get pclk\n");
  606. return PTR_ERR(dp->pclk);
  607. }
  608. dp->spdif_clk = devm_clk_get(dev, "spdif");
  609. if (IS_ERR(dp->spdif_clk)) {
  610. DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
  611. return PTR_ERR(dp->spdif_clk);
  612. }
  613. dp->grf_clk = devm_clk_get(dev, "grf");
  614. if (IS_ERR(dp->grf_clk)) {
  615. DRM_DEV_ERROR(dev, "cannot get grf clk\n");
  616. return PTR_ERR(dp->grf_clk);
  617. }
  618. dp->spdif_rst = devm_reset_control_get(dev, "spdif");
  619. if (IS_ERR(dp->spdif_rst)) {
  620. DRM_DEV_ERROR(dev, "no spdif reset control found\n");
  621. return PTR_ERR(dp->spdif_rst);
  622. }
  623. dp->dptx_rst = devm_reset_control_get(dev, "dptx");
  624. if (IS_ERR(dp->dptx_rst)) {
  625. DRM_DEV_ERROR(dev, "no uphy reset control found\n");
  626. return PTR_ERR(dp->dptx_rst);
  627. }
  628. dp->core_rst = devm_reset_control_get(dev, "core");
  629. if (IS_ERR(dp->core_rst)) {
  630. DRM_DEV_ERROR(dev, "no core reset control found\n");
  631. return PTR_ERR(dp->core_rst);
  632. }
  633. dp->apb_rst = devm_reset_control_get(dev, "apb");
  634. if (IS_ERR(dp->apb_rst)) {
  635. DRM_DEV_ERROR(dev, "no apb reset control found\n");
  636. return PTR_ERR(dp->apb_rst);
  637. }
  638. return 0;
  639. }
  640. static int cdn_dp_audio_hw_params(struct device *dev, void *data,
  641. struct hdmi_codec_daifmt *daifmt,
  642. struct hdmi_codec_params *params)
  643. {
  644. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  645. struct audio_info audio = {
  646. .sample_width = params->sample_width,
  647. .sample_rate = params->sample_rate,
  648. .channels = params->channels,
  649. };
  650. int ret;
  651. mutex_lock(&dp->lock);
  652. if (!dp->active) {
  653. ret = -ENODEV;
  654. goto out;
  655. }
  656. switch (daifmt->fmt) {
  657. case HDMI_I2S:
  658. audio.format = AFMT_I2S;
  659. break;
  660. case HDMI_SPDIF:
  661. audio.format = AFMT_SPDIF;
  662. break;
  663. default:
  664. DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
  665. ret = -EINVAL;
  666. goto out;
  667. }
  668. ret = cdn_dp_audio_config(dp, &audio);
  669. if (!ret)
  670. dp->audio_info = audio;
  671. out:
  672. mutex_unlock(&dp->lock);
  673. return ret;
  674. }
  675. static void cdn_dp_audio_shutdown(struct device *dev, void *data)
  676. {
  677. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  678. int ret;
  679. mutex_lock(&dp->lock);
  680. if (!dp->active)
  681. goto out;
  682. ret = cdn_dp_audio_stop(dp, &dp->audio_info);
  683. if (!ret)
  684. dp->audio_info.format = AFMT_UNUSED;
  685. out:
  686. mutex_unlock(&dp->lock);
  687. }
  688. static int cdn_dp_audio_digital_mute(struct device *dev, void *data,
  689. bool enable)
  690. {
  691. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  692. int ret;
  693. mutex_lock(&dp->lock);
  694. if (!dp->active) {
  695. ret = -ENODEV;
  696. goto out;
  697. }
  698. ret = cdn_dp_audio_mute(dp, enable);
  699. out:
  700. mutex_unlock(&dp->lock);
  701. return ret;
  702. }
  703. static int cdn_dp_audio_get_eld(struct device *dev, void *data,
  704. u8 *buf, size_t len)
  705. {
  706. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  707. memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
  708. return 0;
  709. }
  710. static const struct hdmi_codec_ops audio_codec_ops = {
  711. .hw_params = cdn_dp_audio_hw_params,
  712. .audio_shutdown = cdn_dp_audio_shutdown,
  713. .digital_mute = cdn_dp_audio_digital_mute,
  714. .get_eld = cdn_dp_audio_get_eld,
  715. };
  716. static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
  717. struct device *dev)
  718. {
  719. struct hdmi_codec_pdata codec_data = {
  720. .i2s = 1,
  721. .spdif = 1,
  722. .ops = &audio_codec_ops,
  723. .max_i2s_channels = 8,
  724. };
  725. dp->audio_pdev = platform_device_register_data(
  726. dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
  727. &codec_data, sizeof(codec_data));
  728. return PTR_ERR_OR_ZERO(dp->audio_pdev);
  729. }
  730. static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
  731. {
  732. int ret;
  733. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
  734. unsigned long sleep = 1000;
  735. WARN_ON(!mutex_is_locked(&dp->lock));
  736. if (dp->fw_loaded)
  737. return 0;
  738. /* Drop the lock before getting the firmware to avoid blocking boot */
  739. mutex_unlock(&dp->lock);
  740. while (time_before(jiffies, timeout)) {
  741. ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
  742. if (ret == -ENOENT) {
  743. msleep(sleep);
  744. sleep *= 2;
  745. continue;
  746. } else if (ret) {
  747. DRM_DEV_ERROR(dp->dev,
  748. "failed to request firmware: %d\n", ret);
  749. goto out;
  750. }
  751. dp->fw_loaded = true;
  752. ret = 0;
  753. goto out;
  754. }
  755. DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
  756. ret = -ETIMEDOUT;
  757. out:
  758. mutex_lock(&dp->lock);
  759. return ret;
  760. }
  761. static void cdn_dp_pd_event_work(struct work_struct *work)
  762. {
  763. struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
  764. event_work);
  765. struct drm_connector *connector = &dp->connector;
  766. enum drm_connector_status old_status;
  767. int ret;
  768. mutex_lock(&dp->lock);
  769. if (dp->suspended)
  770. goto out;
  771. ret = cdn_dp_request_firmware(dp);
  772. if (ret)
  773. goto out;
  774. dp->connected = true;
  775. /* Not connected, notify userspace to disable the block */
  776. if (!cdn_dp_connected_port(dp)) {
  777. DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
  778. dp->connected = false;
  779. /* Connected but not enabled, enable the block */
  780. } else if (!dp->active) {
  781. DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
  782. ret = cdn_dp_enable(dp);
  783. if (ret) {
  784. DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
  785. dp->connected = false;
  786. }
  787. /* Enabled and connected to a dongle without a sink, notify userspace */
  788. } else if (!cdn_dp_check_sink_connection(dp)) {
  789. DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
  790. dp->connected = false;
  791. /* Enabled and connected with a sink, re-train if requested */
  792. } else if (!cdn_dp_check_link_status(dp)) {
  793. unsigned int rate = dp->link.rate;
  794. unsigned int lanes = dp->link.num_lanes;
  795. struct drm_display_mode *mode = &dp->mode;
  796. DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
  797. ret = cdn_dp_train_link(dp);
  798. if (ret) {
  799. dp->connected = false;
  800. DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
  801. goto out;
  802. }
  803. /* If training result is changed, update the video config */
  804. if (mode->clock &&
  805. (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
  806. ret = cdn_dp_config_video(dp);
  807. if (ret) {
  808. dp->connected = false;
  809. DRM_DEV_ERROR(dp->dev,
  810. "Failed to config video %d\n",
  811. ret);
  812. }
  813. }
  814. }
  815. out:
  816. mutex_unlock(&dp->lock);
  817. old_status = connector->status;
  818. connector->status = connector->funcs->detect(connector, false);
  819. if (old_status != connector->status)
  820. drm_kms_helper_hotplug_event(dp->drm_dev);
  821. }
  822. static int cdn_dp_pd_event(struct notifier_block *nb,
  823. unsigned long event, void *priv)
  824. {
  825. struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
  826. event_nb);
  827. struct cdn_dp_device *dp = port->dp;
  828. /*
  829. * It would be nice to be able to just do the work inline right here.
  830. * However, we need to make a bunch of calls that might sleep in order
  831. * to turn on the block/phy, so use a worker instead.
  832. */
  833. schedule_work(&dp->event_work);
  834. return NOTIFY_DONE;
  835. }
  836. static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
  837. {
  838. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  839. struct drm_encoder *encoder;
  840. struct drm_connector *connector;
  841. struct cdn_dp_port *port;
  842. struct drm_device *drm_dev = data;
  843. int ret, i;
  844. ret = cdn_dp_parse_dt(dp);
  845. if (ret < 0)
  846. return ret;
  847. dp->drm_dev = drm_dev;
  848. dp->connected = false;
  849. dp->active = false;
  850. dp->active_port = -1;
  851. dp->fw_loaded = false;
  852. INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
  853. encoder = &dp->encoder;
  854. encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
  855. dev->of_node);
  856. DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
  857. ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
  858. DRM_MODE_ENCODER_TMDS, NULL);
  859. if (ret) {
  860. DRM_ERROR("failed to initialize encoder with drm\n");
  861. return ret;
  862. }
  863. drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
  864. connector = &dp->connector;
  865. connector->polled = DRM_CONNECTOR_POLL_HPD;
  866. connector->dpms = DRM_MODE_DPMS_OFF;
  867. ret = drm_connector_init(drm_dev, connector,
  868. &cdn_dp_atomic_connector_funcs,
  869. DRM_MODE_CONNECTOR_DisplayPort);
  870. if (ret) {
  871. DRM_ERROR("failed to initialize connector with drm\n");
  872. goto err_free_encoder;
  873. }
  874. drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
  875. ret = drm_connector_attach_encoder(connector, encoder);
  876. if (ret) {
  877. DRM_ERROR("failed to attach connector and encoder\n");
  878. goto err_free_connector;
  879. }
  880. for (i = 0; i < dp->ports; i++) {
  881. port = dp->port[i];
  882. port->event_nb.notifier_call = cdn_dp_pd_event;
  883. ret = devm_extcon_register_notifier(dp->dev, port->extcon,
  884. EXTCON_DISP_DP,
  885. &port->event_nb);
  886. if (ret) {
  887. DRM_DEV_ERROR(dev,
  888. "register EXTCON_DISP_DP notifier err\n");
  889. goto err_free_connector;
  890. }
  891. }
  892. pm_runtime_enable(dev);
  893. schedule_work(&dp->event_work);
  894. return 0;
  895. err_free_connector:
  896. drm_connector_cleanup(connector);
  897. err_free_encoder:
  898. drm_encoder_cleanup(encoder);
  899. return ret;
  900. }
  901. static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
  902. {
  903. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  904. struct drm_encoder *encoder = &dp->encoder;
  905. struct drm_connector *connector = &dp->connector;
  906. cancel_work_sync(&dp->event_work);
  907. cdn_dp_encoder_disable(encoder);
  908. encoder->funcs->destroy(encoder);
  909. connector->funcs->destroy(connector);
  910. pm_runtime_disable(dev);
  911. if (dp->fw_loaded)
  912. release_firmware(dp->fw);
  913. kfree(dp->edid);
  914. dp->edid = NULL;
  915. }
  916. static const struct component_ops cdn_dp_component_ops = {
  917. .bind = cdn_dp_bind,
  918. .unbind = cdn_dp_unbind,
  919. };
  920. int cdn_dp_suspend(struct device *dev)
  921. {
  922. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  923. int ret = 0;
  924. mutex_lock(&dp->lock);
  925. if (dp->active)
  926. ret = cdn_dp_disable(dp);
  927. dp->suspended = true;
  928. mutex_unlock(&dp->lock);
  929. return ret;
  930. }
  931. int cdn_dp_resume(struct device *dev)
  932. {
  933. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  934. mutex_lock(&dp->lock);
  935. dp->suspended = false;
  936. if (dp->fw_loaded)
  937. schedule_work(&dp->event_work);
  938. mutex_unlock(&dp->lock);
  939. return 0;
  940. }
  941. static int cdn_dp_probe(struct platform_device *pdev)
  942. {
  943. struct device *dev = &pdev->dev;
  944. const struct of_device_id *match;
  945. struct cdn_dp_data *dp_data;
  946. struct cdn_dp_port *port;
  947. struct cdn_dp_device *dp;
  948. struct extcon_dev *extcon;
  949. struct phy *phy;
  950. int i;
  951. dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
  952. if (!dp)
  953. return -ENOMEM;
  954. dp->dev = dev;
  955. match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
  956. dp_data = (struct cdn_dp_data *)match->data;
  957. for (i = 0; i < dp_data->max_phy; i++) {
  958. extcon = extcon_get_edev_by_phandle(dev, i);
  959. phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
  960. if (PTR_ERR(extcon) == -EPROBE_DEFER ||
  961. PTR_ERR(phy) == -EPROBE_DEFER)
  962. return -EPROBE_DEFER;
  963. if (IS_ERR(extcon) || IS_ERR(phy))
  964. continue;
  965. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  966. if (!port)
  967. return -ENOMEM;
  968. port->extcon = extcon;
  969. port->phy = phy;
  970. port->dp = dp;
  971. port->id = i;
  972. dp->port[dp->ports++] = port;
  973. }
  974. if (!dp->ports) {
  975. DRM_DEV_ERROR(dev, "missing extcon or phy\n");
  976. return -EINVAL;
  977. }
  978. mutex_init(&dp->lock);
  979. dev_set_drvdata(dev, dp);
  980. cdn_dp_audio_codec_init(dp, dev);
  981. return component_add(dev, &cdn_dp_component_ops);
  982. }
  983. static int cdn_dp_remove(struct platform_device *pdev)
  984. {
  985. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  986. platform_device_unregister(dp->audio_pdev);
  987. cdn_dp_suspend(dp->dev);
  988. component_del(&pdev->dev, &cdn_dp_component_ops);
  989. return 0;
  990. }
  991. static void cdn_dp_shutdown(struct platform_device *pdev)
  992. {
  993. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  994. cdn_dp_suspend(dp->dev);
  995. }
  996. static const struct dev_pm_ops cdn_dp_pm_ops = {
  997. SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
  998. cdn_dp_resume)
  999. };
  1000. struct platform_driver cdn_dp_driver = {
  1001. .probe = cdn_dp_probe,
  1002. .remove = cdn_dp_remove,
  1003. .shutdown = cdn_dp_shutdown,
  1004. .driver = {
  1005. .name = "cdn-dp",
  1006. .owner = THIS_MODULE,
  1007. .of_match_table = of_match_ptr(cdn_dp_dt_ids),
  1008. .pm = &cdn_dp_pm_ops,
  1009. },
  1010. };