cdn-dp-core.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author: Chris Zhong <zyw@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drmP.h>
  15. #include <drm/drm_atomic_helper.h>
  16. #include <drm/drm_crtc_helper.h>
  17. #include <drm/drm_dp_helper.h>
  18. #include <drm/drm_edid.h>
  19. #include <drm/drm_of.h>
  20. #include <linux/clk.h>
  21. #include <linux/component.h>
  22. #include <linux/extcon.h>
  23. #include <linux/firmware.h>
  24. #include <linux/regmap.h>
  25. #include <linux/reset.h>
  26. #include <linux/mfd/syscon.h>
  27. #include <linux/phy/phy.h>
  28. #include <sound/hdmi-codec.h>
  29. #include "cdn-dp-core.h"
  30. #include "cdn-dp-reg.h"
  31. #include "rockchip_drm_vop.h"
  32. #define connector_to_dp(c) \
  33. container_of(c, struct cdn_dp_device, connector)
  34. #define encoder_to_dp(c) \
  35. container_of(c, struct cdn_dp_device, encoder)
  36. #define GRF_SOC_CON9 0x6224
  37. #define DP_SEL_VOP_LIT BIT(12)
  38. #define GRF_SOC_CON26 0x6268
  39. #define UPHY_SEL_BIT 3
  40. #define UPHY_SEL_MASK BIT(19)
  41. #define DPTX_HPD_SEL (3 << 12)
  42. #define DPTX_HPD_DEL (2 << 12)
  43. #define DPTX_HPD_SEL_MASK (3 << 28)
  44. #define CDN_FW_TIMEOUT_MS (64 * 1000)
  45. #define CDN_DPCD_TIMEOUT_MS 5000
  46. #define CDN_DP_FIRMWARE "rockchip/dptx.bin"
  47. struct cdn_dp_data {
  48. u8 max_phy;
  49. };
  50. struct cdn_dp_data rk3399_cdn_dp = {
  51. .max_phy = 2,
  52. };
  53. static const struct of_device_id cdn_dp_dt_ids[] = {
  54. { .compatible = "rockchip,rk3399-cdn-dp",
  55. .data = (void *)&rk3399_cdn_dp },
  56. {}
  57. };
  58. MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
  59. static int cdn_dp_grf_write(struct cdn_dp_device *dp,
  60. unsigned int reg, unsigned int val)
  61. {
  62. int ret;
  63. ret = clk_prepare_enable(dp->grf_clk);
  64. if (ret) {
  65. DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
  66. return ret;
  67. }
  68. ret = regmap_write(dp->grf, reg, val);
  69. if (ret) {
  70. DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
  71. return ret;
  72. }
  73. clk_disable_unprepare(dp->grf_clk);
  74. return 0;
  75. }
  76. static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
  77. {
  78. int ret;
  79. unsigned long rate;
  80. ret = clk_prepare_enable(dp->pclk);
  81. if (ret < 0) {
  82. DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
  83. goto err_pclk;
  84. }
  85. ret = clk_prepare_enable(dp->core_clk);
  86. if (ret < 0) {
  87. DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
  88. goto err_core_clk;
  89. }
  90. ret = pm_runtime_get_sync(dp->dev);
  91. if (ret < 0) {
  92. DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
  93. goto err_pm_runtime_get;
  94. }
  95. reset_control_assert(dp->core_rst);
  96. reset_control_assert(dp->dptx_rst);
  97. reset_control_assert(dp->apb_rst);
  98. reset_control_deassert(dp->core_rst);
  99. reset_control_deassert(dp->dptx_rst);
  100. reset_control_deassert(dp->apb_rst);
  101. rate = clk_get_rate(dp->core_clk);
  102. if (!rate) {
  103. DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
  104. ret = -EINVAL;
  105. goto err_set_rate;
  106. }
  107. cdn_dp_set_fw_clk(dp, rate);
  108. cdn_dp_clock_reset(dp);
  109. return 0;
  110. err_set_rate:
  111. pm_runtime_put(dp->dev);
  112. err_pm_runtime_get:
  113. clk_disable_unprepare(dp->core_clk);
  114. err_core_clk:
  115. clk_disable_unprepare(dp->pclk);
  116. err_pclk:
  117. return ret;
  118. }
  119. static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
  120. {
  121. pm_runtime_put_sync(dp->dev);
  122. clk_disable_unprepare(dp->pclk);
  123. clk_disable_unprepare(dp->core_clk);
  124. }
  125. static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
  126. {
  127. struct extcon_dev *edev = port->extcon;
  128. union extcon_property_value property;
  129. int dptx;
  130. u8 lanes;
  131. dptx = extcon_get_state(edev, EXTCON_DISP_DP);
  132. if (dptx > 0) {
  133. extcon_get_property(edev, EXTCON_DISP_DP,
  134. EXTCON_PROP_USB_SS, &property);
  135. if (property.intval)
  136. lanes = 2;
  137. else
  138. lanes = 4;
  139. } else {
  140. lanes = 0;
  141. }
  142. return lanes;
  143. }
  144. static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
  145. {
  146. int ret;
  147. u8 value;
  148. *sink_count = 0;
  149. ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
  150. if (ret)
  151. return ret;
  152. *sink_count = DP_GET_SINK_COUNT(value);
  153. return 0;
  154. }
  155. static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
  156. {
  157. struct cdn_dp_port *port;
  158. int i, lanes;
  159. for (i = 0; i < dp->ports; i++) {
  160. port = dp->port[i];
  161. lanes = cdn_dp_get_port_lanes(port);
  162. if (lanes)
  163. return port;
  164. }
  165. return NULL;
  166. }
  167. static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
  168. {
  169. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
  170. struct cdn_dp_port *port;
  171. u8 sink_count = 0;
  172. if (dp->active_port < 0 || dp->active_port >= dp->ports) {
  173. DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
  174. return false;
  175. }
  176. port = dp->port[dp->active_port];
  177. /*
  178. * Attempt to read sink count, retry in case the sink may not be ready.
  179. *
  180. * Sinks are *supposed* to come up within 1ms from an off state, but
  181. * some docks need more time to power up.
  182. */
  183. while (time_before(jiffies, timeout)) {
  184. if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
  185. return false;
  186. if (!cdn_dp_get_sink_count(dp, &sink_count))
  187. return sink_count ? true : false;
  188. usleep_range(5000, 10000);
  189. }
  190. DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
  191. return false;
  192. }
  193. static enum drm_connector_status
  194. cdn_dp_connector_detect(struct drm_connector *connector, bool force)
  195. {
  196. struct cdn_dp_device *dp = connector_to_dp(connector);
  197. enum drm_connector_status status = connector_status_disconnected;
  198. mutex_lock(&dp->lock);
  199. if (dp->connected)
  200. status = connector_status_connected;
  201. mutex_unlock(&dp->lock);
  202. return status;
  203. }
  204. static void cdn_dp_connector_destroy(struct drm_connector *connector)
  205. {
  206. drm_connector_unregister(connector);
  207. drm_connector_cleanup(connector);
  208. }
  209. static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
  210. .detect = cdn_dp_connector_detect,
  211. .destroy = cdn_dp_connector_destroy,
  212. .fill_modes = drm_helper_probe_single_connector_modes,
  213. .reset = drm_atomic_helper_connector_reset,
  214. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  215. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  216. };
  217. static int cdn_dp_connector_get_modes(struct drm_connector *connector)
  218. {
  219. struct cdn_dp_device *dp = connector_to_dp(connector);
  220. struct edid *edid;
  221. int ret = 0;
  222. mutex_lock(&dp->lock);
  223. edid = dp->edid;
  224. if (edid) {
  225. DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
  226. edid->width_cm, edid->height_cm);
  227. dp->sink_has_audio = drm_detect_monitor_audio(edid);
  228. ret = drm_add_edid_modes(connector, edid);
  229. if (ret) {
  230. drm_mode_connector_update_edid_property(connector,
  231. edid);
  232. drm_edid_to_eld(connector, edid);
  233. }
  234. }
  235. mutex_unlock(&dp->lock);
  236. return ret;
  237. }
  238. static struct drm_encoder *
  239. cdn_dp_connector_best_encoder(struct drm_connector *connector)
  240. {
  241. struct cdn_dp_device *dp = connector_to_dp(connector);
  242. return &dp->encoder;
  243. }
  244. static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
  245. struct drm_display_mode *mode)
  246. {
  247. struct cdn_dp_device *dp = connector_to_dp(connector);
  248. struct drm_display_info *display_info = &dp->connector.display_info;
  249. u32 requested, actual, rate, sink_max, source_max = 0;
  250. u8 lanes, bpc;
  251. /* If DP is disconnected, every mode is invalid */
  252. if (!dp->connected)
  253. return MODE_BAD;
  254. switch (display_info->bpc) {
  255. case 10:
  256. bpc = 10;
  257. break;
  258. case 6:
  259. bpc = 6;
  260. break;
  261. default:
  262. bpc = 8;
  263. break;
  264. }
  265. requested = mode->clock * bpc * 3 / 1000;
  266. source_max = dp->lanes;
  267. sink_max = drm_dp_max_lane_count(dp->dpcd);
  268. lanes = min(source_max, sink_max);
  269. source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
  270. sink_max = drm_dp_max_link_rate(dp->dpcd);
  271. rate = min(source_max, sink_max);
  272. actual = rate * lanes / 100;
  273. /* efficiency is about 0.8 */
  274. actual = actual * 8 / 10;
  275. if (requested > actual) {
  276. DRM_DEV_DEBUG_KMS(dp->dev,
  277. "requested=%d, actual=%d, clock=%d\n",
  278. requested, actual, mode->clock);
  279. return MODE_CLOCK_HIGH;
  280. }
  281. return MODE_OK;
  282. }
  283. static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
  284. .get_modes = cdn_dp_connector_get_modes,
  285. .best_encoder = cdn_dp_connector_best_encoder,
  286. .mode_valid = cdn_dp_connector_mode_valid,
  287. };
  288. static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
  289. {
  290. int ret;
  291. const u32 *iram_data, *dram_data;
  292. const struct firmware *fw = dp->fw;
  293. const struct cdn_firmware_header *hdr;
  294. hdr = (struct cdn_firmware_header *)fw->data;
  295. if (fw->size != le32_to_cpu(hdr->size_bytes)) {
  296. DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
  297. return -EINVAL;
  298. }
  299. iram_data = (const u32 *)(fw->data + hdr->header_size);
  300. dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
  301. ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
  302. dram_data, hdr->dram_size);
  303. if (ret)
  304. return ret;
  305. ret = cdn_dp_set_firmware_active(dp, true);
  306. if (ret) {
  307. DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
  308. return ret;
  309. }
  310. return cdn_dp_event_config(dp);
  311. }
  312. static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
  313. {
  314. int ret;
  315. if (!cdn_dp_check_sink_connection(dp))
  316. return -ENODEV;
  317. ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
  318. DP_RECEIVER_CAP_SIZE);
  319. if (ret) {
  320. DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
  321. return ret;
  322. }
  323. kfree(dp->edid);
  324. dp->edid = drm_do_get_edid(&dp->connector,
  325. cdn_dp_get_edid_block, dp);
  326. return 0;
  327. }
  328. static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
  329. {
  330. union extcon_property_value property;
  331. int ret;
  332. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  333. (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK);
  334. if (ret)
  335. return ret;
  336. if (!port->phy_enabled) {
  337. ret = phy_power_on(port->phy);
  338. if (ret) {
  339. DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
  340. ret);
  341. goto err_phy;
  342. }
  343. port->phy_enabled = true;
  344. }
  345. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  346. DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
  347. if (ret) {
  348. DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
  349. goto err_power_on;
  350. }
  351. ret = cdn_dp_get_hpd_status(dp);
  352. if (ret <= 0) {
  353. if (!ret)
  354. DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
  355. goto err_power_on;
  356. }
  357. ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
  358. EXTCON_PROP_USB_TYPEC_POLARITY, &property);
  359. if (ret) {
  360. DRM_DEV_ERROR(dp->dev, "get property failed\n");
  361. goto err_power_on;
  362. }
  363. port->lanes = cdn_dp_get_port_lanes(port);
  364. ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
  365. if (ret) {
  366. DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
  367. ret);
  368. goto err_power_on;
  369. }
  370. dp->active_port = port->id;
  371. return 0;
  372. err_power_on:
  373. if (phy_power_off(port->phy))
  374. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  375. else
  376. port->phy_enabled = false;
  377. err_phy:
  378. cdn_dp_grf_write(dp, GRF_SOC_CON26,
  379. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  380. return ret;
  381. }
  382. static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
  383. struct cdn_dp_port *port)
  384. {
  385. int ret;
  386. if (port->phy_enabled) {
  387. ret = phy_power_off(port->phy);
  388. if (ret) {
  389. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  390. return ret;
  391. }
  392. }
  393. port->phy_enabled = false;
  394. port->lanes = 0;
  395. dp->active_port = -1;
  396. return 0;
  397. }
  398. static int cdn_dp_disable(struct cdn_dp_device *dp)
  399. {
  400. int ret, i;
  401. if (!dp->active)
  402. return 0;
  403. for (i = 0; i < dp->ports; i++)
  404. cdn_dp_disable_phy(dp, dp->port[i]);
  405. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  406. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  407. if (ret) {
  408. DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
  409. ret);
  410. return ret;
  411. }
  412. cdn_dp_set_firmware_active(dp, false);
  413. cdn_dp_clk_disable(dp);
  414. dp->active = false;
  415. dp->link.rate = 0;
  416. dp->link.num_lanes = 0;
  417. if (!dp->connected) {
  418. kfree(dp->edid);
  419. dp->edid = NULL;
  420. }
  421. return 0;
  422. }
  423. static int cdn_dp_enable(struct cdn_dp_device *dp)
  424. {
  425. int ret, i, lanes;
  426. struct cdn_dp_port *port;
  427. port = cdn_dp_connected_port(dp);
  428. if (!port) {
  429. DRM_DEV_ERROR(dp->dev,
  430. "Can't enable without connection\n");
  431. return -ENODEV;
  432. }
  433. if (dp->active)
  434. return 0;
  435. ret = cdn_dp_clk_enable(dp);
  436. if (ret)
  437. return ret;
  438. ret = cdn_dp_firmware_init(dp);
  439. if (ret) {
  440. DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
  441. goto err_clk_disable;
  442. }
  443. /* only enable the port that connected with downstream device */
  444. for (i = port->id; i < dp->ports; i++) {
  445. port = dp->port[i];
  446. lanes = cdn_dp_get_port_lanes(port);
  447. if (lanes) {
  448. ret = cdn_dp_enable_phy(dp, port);
  449. if (ret)
  450. continue;
  451. ret = cdn_dp_get_sink_capability(dp);
  452. if (ret) {
  453. cdn_dp_disable_phy(dp, port);
  454. } else {
  455. dp->active = true;
  456. dp->lanes = port->lanes;
  457. return 0;
  458. }
  459. }
  460. }
  461. err_clk_disable:
  462. cdn_dp_clk_disable(dp);
  463. return ret;
  464. }
  465. static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
  466. struct drm_display_mode *mode,
  467. struct drm_display_mode *adjusted)
  468. {
  469. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  470. struct drm_display_info *display_info = &dp->connector.display_info;
  471. struct video_info *video = &dp->video_info;
  472. switch (display_info->bpc) {
  473. case 10:
  474. video->color_depth = 10;
  475. break;
  476. case 6:
  477. video->color_depth = 6;
  478. break;
  479. default:
  480. video->color_depth = 8;
  481. break;
  482. }
  483. video->color_fmt = PXL_RGB;
  484. video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
  485. video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
  486. memcpy(&dp->mode, adjusted, sizeof(*mode));
  487. }
  488. static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
  489. {
  490. u8 link_status[DP_LINK_STATUS_SIZE];
  491. struct cdn_dp_port *port = cdn_dp_connected_port(dp);
  492. u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
  493. if (!port || !dp->link.rate || !dp->link.num_lanes)
  494. return false;
  495. if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
  496. DP_LINK_STATUS_SIZE)) {
  497. DRM_ERROR("Failed to get link status\n");
  498. return false;
  499. }
  500. /* if link training is requested we should perform it always */
  501. return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
  502. }
  503. static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
  504. {
  505. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  506. int ret, val;
  507. ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
  508. if (ret < 0) {
  509. DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
  510. return;
  511. }
  512. DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
  513. (ret) ? "LIT" : "BIG");
  514. if (ret)
  515. val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
  516. else
  517. val = DP_SEL_VOP_LIT << 16;
  518. ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
  519. if (ret)
  520. return;
  521. mutex_lock(&dp->lock);
  522. ret = cdn_dp_enable(dp);
  523. if (ret) {
  524. DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
  525. ret);
  526. goto out;
  527. }
  528. if (!cdn_dp_check_link_status(dp)) {
  529. ret = cdn_dp_train_link(dp);
  530. if (ret) {
  531. DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
  532. goto out;
  533. }
  534. }
  535. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
  536. if (ret) {
  537. DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
  538. goto out;
  539. }
  540. ret = cdn_dp_config_video(dp);
  541. if (ret) {
  542. DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
  543. goto out;
  544. }
  545. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
  546. if (ret) {
  547. DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
  548. goto out;
  549. }
  550. out:
  551. mutex_unlock(&dp->lock);
  552. }
  553. static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
  554. {
  555. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  556. int ret;
  557. mutex_lock(&dp->lock);
  558. if (dp->active) {
  559. ret = cdn_dp_disable(dp);
  560. if (ret) {
  561. DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
  562. ret);
  563. }
  564. }
  565. mutex_unlock(&dp->lock);
  566. /*
  567. * In the following 2 cases, we need to run the event_work to re-enable
  568. * the DP:
  569. * 1. If there is not just one port device is connected, and remove one
  570. * device from a port, the DP will be disabled here, at this case,
  571. * run the event_work to re-open DP for the other port.
  572. * 2. If re-training or re-config failed, the DP will be disabled here.
  573. * run the event_work to re-connect it.
  574. */
  575. if (!dp->connected && cdn_dp_connected_port(dp))
  576. schedule_work(&dp->event_work);
  577. }
  578. static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
  579. struct drm_crtc_state *crtc_state,
  580. struct drm_connector_state *conn_state)
  581. {
  582. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
  583. s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
  584. s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
  585. return 0;
  586. }
  587. static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
  588. .mode_set = cdn_dp_encoder_mode_set,
  589. .enable = cdn_dp_encoder_enable,
  590. .disable = cdn_dp_encoder_disable,
  591. .atomic_check = cdn_dp_encoder_atomic_check,
  592. };
  593. static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
  594. .destroy = drm_encoder_cleanup,
  595. };
  596. static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
  597. {
  598. struct device *dev = dp->dev;
  599. struct device_node *np = dev->of_node;
  600. struct platform_device *pdev = to_platform_device(dev);
  601. struct resource *res;
  602. dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
  603. if (IS_ERR(dp->grf)) {
  604. DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
  605. return PTR_ERR(dp->grf);
  606. }
  607. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  608. dp->regs = devm_ioremap_resource(dev, res);
  609. if (IS_ERR(dp->regs)) {
  610. DRM_DEV_ERROR(dev, "ioremap reg failed\n");
  611. return PTR_ERR(dp->regs);
  612. }
  613. dp->core_clk = devm_clk_get(dev, "core-clk");
  614. if (IS_ERR(dp->core_clk)) {
  615. DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
  616. return PTR_ERR(dp->core_clk);
  617. }
  618. dp->pclk = devm_clk_get(dev, "pclk");
  619. if (IS_ERR(dp->pclk)) {
  620. DRM_DEV_ERROR(dev, "cannot get pclk\n");
  621. return PTR_ERR(dp->pclk);
  622. }
  623. dp->spdif_clk = devm_clk_get(dev, "spdif");
  624. if (IS_ERR(dp->spdif_clk)) {
  625. DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
  626. return PTR_ERR(dp->spdif_clk);
  627. }
  628. dp->grf_clk = devm_clk_get(dev, "grf");
  629. if (IS_ERR(dp->grf_clk)) {
  630. DRM_DEV_ERROR(dev, "cannot get grf clk\n");
  631. return PTR_ERR(dp->grf_clk);
  632. }
  633. dp->spdif_rst = devm_reset_control_get(dev, "spdif");
  634. if (IS_ERR(dp->spdif_rst)) {
  635. DRM_DEV_ERROR(dev, "no spdif reset control found\n");
  636. return PTR_ERR(dp->spdif_rst);
  637. }
  638. dp->dptx_rst = devm_reset_control_get(dev, "dptx");
  639. if (IS_ERR(dp->dptx_rst)) {
  640. DRM_DEV_ERROR(dev, "no uphy reset control found\n");
  641. return PTR_ERR(dp->dptx_rst);
  642. }
  643. dp->core_rst = devm_reset_control_get(dev, "core");
  644. if (IS_ERR(dp->core_rst)) {
  645. DRM_DEV_ERROR(dev, "no core reset control found\n");
  646. return PTR_ERR(dp->core_rst);
  647. }
  648. dp->apb_rst = devm_reset_control_get(dev, "apb");
  649. if (IS_ERR(dp->apb_rst)) {
  650. DRM_DEV_ERROR(dev, "no apb reset control found\n");
  651. return PTR_ERR(dp->apb_rst);
  652. }
  653. return 0;
  654. }
  655. static int cdn_dp_audio_hw_params(struct device *dev, void *data,
  656. struct hdmi_codec_daifmt *daifmt,
  657. struct hdmi_codec_params *params)
  658. {
  659. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  660. struct audio_info audio = {
  661. .sample_width = params->sample_width,
  662. .sample_rate = params->sample_rate,
  663. .channels = params->channels,
  664. };
  665. int ret;
  666. mutex_lock(&dp->lock);
  667. if (!dp->active) {
  668. ret = -ENODEV;
  669. goto out;
  670. }
  671. switch (daifmt->fmt) {
  672. case HDMI_I2S:
  673. audio.format = AFMT_I2S;
  674. break;
  675. case HDMI_SPDIF:
  676. audio.format = AFMT_SPDIF;
  677. break;
  678. default:
  679. DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
  680. ret = -EINVAL;
  681. goto out;
  682. }
  683. ret = cdn_dp_audio_config(dp, &audio);
  684. if (!ret)
  685. dp->audio_info = audio;
  686. out:
  687. mutex_unlock(&dp->lock);
  688. return ret;
  689. }
  690. static void cdn_dp_audio_shutdown(struct device *dev, void *data)
  691. {
  692. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  693. int ret;
  694. mutex_lock(&dp->lock);
  695. if (!dp->active)
  696. goto out;
  697. ret = cdn_dp_audio_stop(dp, &dp->audio_info);
  698. if (!ret)
  699. dp->audio_info.format = AFMT_UNUSED;
  700. out:
  701. mutex_unlock(&dp->lock);
  702. }
  703. static int cdn_dp_audio_digital_mute(struct device *dev, void *data,
  704. bool enable)
  705. {
  706. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  707. int ret;
  708. mutex_lock(&dp->lock);
  709. if (!dp->active) {
  710. ret = -ENODEV;
  711. goto out;
  712. }
  713. ret = cdn_dp_audio_mute(dp, enable);
  714. out:
  715. mutex_unlock(&dp->lock);
  716. return ret;
  717. }
  718. static int cdn_dp_audio_get_eld(struct device *dev, void *data,
  719. u8 *buf, size_t len)
  720. {
  721. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  722. memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
  723. return 0;
  724. }
  725. static const struct hdmi_codec_ops audio_codec_ops = {
  726. .hw_params = cdn_dp_audio_hw_params,
  727. .audio_shutdown = cdn_dp_audio_shutdown,
  728. .digital_mute = cdn_dp_audio_digital_mute,
  729. .get_eld = cdn_dp_audio_get_eld,
  730. };
  731. static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
  732. struct device *dev)
  733. {
  734. struct hdmi_codec_pdata codec_data = {
  735. .i2s = 1,
  736. .spdif = 1,
  737. .ops = &audio_codec_ops,
  738. .max_i2s_channels = 8,
  739. };
  740. dp->audio_pdev = platform_device_register_data(
  741. dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
  742. &codec_data, sizeof(codec_data));
  743. return PTR_ERR_OR_ZERO(dp->audio_pdev);
  744. }
  745. static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
  746. {
  747. int ret;
  748. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
  749. unsigned long sleep = 1000;
  750. WARN_ON(!mutex_is_locked(&dp->lock));
  751. if (dp->fw_loaded)
  752. return 0;
  753. /* Drop the lock before getting the firmware to avoid blocking boot */
  754. mutex_unlock(&dp->lock);
  755. while (time_before(jiffies, timeout)) {
  756. ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
  757. if (ret == -ENOENT) {
  758. msleep(sleep);
  759. sleep *= 2;
  760. continue;
  761. } else if (ret) {
  762. DRM_DEV_ERROR(dp->dev,
  763. "failed to request firmware: %d\n", ret);
  764. goto out;
  765. }
  766. dp->fw_loaded = true;
  767. ret = 0;
  768. goto out;
  769. }
  770. DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
  771. ret = -ETIMEDOUT;
  772. out:
  773. mutex_lock(&dp->lock);
  774. return ret;
  775. }
  776. static void cdn_dp_pd_event_work(struct work_struct *work)
  777. {
  778. struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
  779. event_work);
  780. struct drm_connector *connector = &dp->connector;
  781. enum drm_connector_status old_status;
  782. int ret;
  783. mutex_lock(&dp->lock);
  784. if (dp->suspended)
  785. goto out;
  786. ret = cdn_dp_request_firmware(dp);
  787. if (ret)
  788. goto out;
  789. dp->connected = true;
  790. /* Not connected, notify userspace to disable the block */
  791. if (!cdn_dp_connected_port(dp)) {
  792. DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
  793. dp->connected = false;
  794. /* Connected but not enabled, enable the block */
  795. } else if (!dp->active) {
  796. DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
  797. ret = cdn_dp_enable(dp);
  798. if (ret) {
  799. DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
  800. dp->connected = false;
  801. }
  802. /* Enabled and connected to a dongle without a sink, notify userspace */
  803. } else if (!cdn_dp_check_sink_connection(dp)) {
  804. DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
  805. dp->connected = false;
  806. /* Enabled and connected with a sink, re-train if requested */
  807. } else if (!cdn_dp_check_link_status(dp)) {
  808. unsigned int rate = dp->link.rate;
  809. unsigned int lanes = dp->link.num_lanes;
  810. struct drm_display_mode *mode = &dp->mode;
  811. DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
  812. ret = cdn_dp_train_link(dp);
  813. if (ret) {
  814. dp->connected = false;
  815. DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
  816. goto out;
  817. }
  818. /* If training result is changed, update the video config */
  819. if (mode->clock &&
  820. (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
  821. ret = cdn_dp_config_video(dp);
  822. if (ret) {
  823. dp->connected = false;
  824. DRM_DEV_ERROR(dp->dev,
  825. "Failed to config video %d\n",
  826. ret);
  827. }
  828. }
  829. }
  830. out:
  831. mutex_unlock(&dp->lock);
  832. old_status = connector->status;
  833. connector->status = connector->funcs->detect(connector, false);
  834. if (old_status != connector->status)
  835. drm_kms_helper_hotplug_event(dp->drm_dev);
  836. }
  837. static int cdn_dp_pd_event(struct notifier_block *nb,
  838. unsigned long event, void *priv)
  839. {
  840. struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
  841. event_nb);
  842. struct cdn_dp_device *dp = port->dp;
  843. /*
  844. * It would be nice to be able to just do the work inline right here.
  845. * However, we need to make a bunch of calls that might sleep in order
  846. * to turn on the block/phy, so use a worker instead.
  847. */
  848. schedule_work(&dp->event_work);
  849. return NOTIFY_DONE;
  850. }
  851. static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
  852. {
  853. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  854. struct drm_encoder *encoder;
  855. struct drm_connector *connector;
  856. struct cdn_dp_port *port;
  857. struct drm_device *drm_dev = data;
  858. int ret, i;
  859. ret = cdn_dp_parse_dt(dp);
  860. if (ret < 0)
  861. return ret;
  862. dp->drm_dev = drm_dev;
  863. dp->connected = false;
  864. dp->active = false;
  865. dp->active_port = -1;
  866. dp->fw_loaded = false;
  867. INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
  868. encoder = &dp->encoder;
  869. encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
  870. dev->of_node);
  871. DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
  872. ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
  873. DRM_MODE_ENCODER_TMDS, NULL);
  874. if (ret) {
  875. DRM_ERROR("failed to initialize encoder with drm\n");
  876. return ret;
  877. }
  878. drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
  879. connector = &dp->connector;
  880. connector->polled = DRM_CONNECTOR_POLL_HPD;
  881. connector->dpms = DRM_MODE_DPMS_OFF;
  882. ret = drm_connector_init(drm_dev, connector,
  883. &cdn_dp_atomic_connector_funcs,
  884. DRM_MODE_CONNECTOR_DisplayPort);
  885. if (ret) {
  886. DRM_ERROR("failed to initialize connector with drm\n");
  887. goto err_free_encoder;
  888. }
  889. drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
  890. ret = drm_mode_connector_attach_encoder(connector, encoder);
  891. if (ret) {
  892. DRM_ERROR("failed to attach connector and encoder\n");
  893. goto err_free_connector;
  894. }
  895. for (i = 0; i < dp->ports; i++) {
  896. port = dp->port[i];
  897. port->event_nb.notifier_call = cdn_dp_pd_event;
  898. ret = devm_extcon_register_notifier(dp->dev, port->extcon,
  899. EXTCON_DISP_DP,
  900. &port->event_nb);
  901. if (ret) {
  902. DRM_DEV_ERROR(dev,
  903. "register EXTCON_DISP_DP notifier err\n");
  904. goto err_free_connector;
  905. }
  906. }
  907. pm_runtime_enable(dev);
  908. schedule_work(&dp->event_work);
  909. return 0;
  910. err_free_connector:
  911. drm_connector_cleanup(connector);
  912. err_free_encoder:
  913. drm_encoder_cleanup(encoder);
  914. return ret;
  915. }
  916. static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
  917. {
  918. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  919. struct drm_encoder *encoder = &dp->encoder;
  920. struct drm_connector *connector = &dp->connector;
  921. cancel_work_sync(&dp->event_work);
  922. cdn_dp_encoder_disable(encoder);
  923. encoder->funcs->destroy(encoder);
  924. connector->funcs->destroy(connector);
  925. pm_runtime_disable(dev);
  926. if (dp->fw_loaded)
  927. release_firmware(dp->fw);
  928. kfree(dp->edid);
  929. dp->edid = NULL;
  930. }
  931. static const struct component_ops cdn_dp_component_ops = {
  932. .bind = cdn_dp_bind,
  933. .unbind = cdn_dp_unbind,
  934. };
  935. int cdn_dp_suspend(struct device *dev)
  936. {
  937. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  938. int ret = 0;
  939. mutex_lock(&dp->lock);
  940. if (dp->active)
  941. ret = cdn_dp_disable(dp);
  942. dp->suspended = true;
  943. mutex_unlock(&dp->lock);
  944. return ret;
  945. }
  946. int cdn_dp_resume(struct device *dev)
  947. {
  948. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  949. mutex_lock(&dp->lock);
  950. dp->suspended = false;
  951. if (dp->fw_loaded)
  952. schedule_work(&dp->event_work);
  953. mutex_unlock(&dp->lock);
  954. return 0;
  955. }
  956. static int cdn_dp_probe(struct platform_device *pdev)
  957. {
  958. struct device *dev = &pdev->dev;
  959. const struct of_device_id *match;
  960. struct cdn_dp_data *dp_data;
  961. struct cdn_dp_port *port;
  962. struct cdn_dp_device *dp;
  963. struct extcon_dev *extcon;
  964. struct phy *phy;
  965. int i;
  966. dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
  967. if (!dp)
  968. return -ENOMEM;
  969. dp->dev = dev;
  970. match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
  971. dp_data = (struct cdn_dp_data *)match->data;
  972. for (i = 0; i < dp_data->max_phy; i++) {
  973. extcon = extcon_get_edev_by_phandle(dev, i);
  974. phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
  975. if (PTR_ERR(extcon) == -EPROBE_DEFER ||
  976. PTR_ERR(phy) == -EPROBE_DEFER)
  977. return -EPROBE_DEFER;
  978. if (IS_ERR(extcon) || IS_ERR(phy))
  979. continue;
  980. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  981. if (!port)
  982. return -ENOMEM;
  983. port->extcon = extcon;
  984. port->phy = phy;
  985. port->dp = dp;
  986. port->id = i;
  987. dp->port[dp->ports++] = port;
  988. }
  989. if (!dp->ports) {
  990. DRM_DEV_ERROR(dev, "missing extcon or phy\n");
  991. return -EINVAL;
  992. }
  993. mutex_init(&dp->lock);
  994. dev_set_drvdata(dev, dp);
  995. cdn_dp_audio_codec_init(dp, dev);
  996. return component_add(dev, &cdn_dp_component_ops);
  997. }
  998. static int cdn_dp_remove(struct platform_device *pdev)
  999. {
  1000. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  1001. platform_device_unregister(dp->audio_pdev);
  1002. cdn_dp_suspend(dp->dev);
  1003. component_del(&pdev->dev, &cdn_dp_component_ops);
  1004. return 0;
  1005. }
  1006. static void cdn_dp_shutdown(struct platform_device *pdev)
  1007. {
  1008. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  1009. cdn_dp_suspend(dp->dev);
  1010. }
  1011. static const struct dev_pm_ops cdn_dp_pm_ops = {
  1012. SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
  1013. cdn_dp_resume)
  1014. };
  1015. struct platform_driver cdn_dp_driver = {
  1016. .probe = cdn_dp_probe,
  1017. .remove = cdn_dp_remove,
  1018. .shutdown = cdn_dp_shutdown,
  1019. .driver = {
  1020. .name = "cdn-dp",
  1021. .owner = THIS_MODULE,
  1022. .of_match_table = of_match_ptr(cdn_dp_dt_ids),
  1023. .pm = &cdn_dp_pm_ops,
  1024. },
  1025. };