cdn-dp-core.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author: Chris Zhong <zyw@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drmP.h>
  15. #include <drm/drm_atomic_helper.h>
  16. #include <drm/drm_crtc_helper.h>
  17. #include <drm/drm_dp_helper.h>
  18. #include <drm/drm_edid.h>
  19. #include <drm/drm_of.h>
  20. #include <linux/clk.h>
  21. #include <linux/component.h>
  22. #include <linux/extcon.h>
  23. #include <linux/firmware.h>
  24. #include <linux/regmap.h>
  25. #include <linux/reset.h>
  26. #include <linux/mfd/syscon.h>
  27. #include <linux/phy/phy.h>
  28. #include <sound/hdmi-codec.h>
  29. #include "cdn-dp-core.h"
  30. #include "cdn-dp-reg.h"
  31. #include "rockchip_drm_vop.h"
  32. #define connector_to_dp(c) \
  33. container_of(c, struct cdn_dp_device, connector)
  34. #define encoder_to_dp(c) \
  35. container_of(c, struct cdn_dp_device, encoder)
  36. #define GRF_SOC_CON9 0x6224
  37. #define DP_SEL_VOP_LIT BIT(12)
  38. #define GRF_SOC_CON26 0x6268
  39. #define UPHY_SEL_BIT 3
  40. #define UPHY_SEL_MASK BIT(19)
  41. #define DPTX_HPD_SEL (3 << 12)
  42. #define DPTX_HPD_DEL (2 << 12)
  43. #define DPTX_HPD_SEL_MASK (3 << 28)
  44. #define CDN_FW_TIMEOUT_MS (64 * 1000)
  45. #define CDN_DPCD_TIMEOUT_MS 5000
  46. #define CDN_DP_FIRMWARE "rockchip/dptx.bin"
  47. struct cdn_dp_data {
  48. u8 max_phy;
  49. };
  50. struct cdn_dp_data rk3399_cdn_dp = {
  51. .max_phy = 2,
  52. };
  53. static const struct of_device_id cdn_dp_dt_ids[] = {
  54. { .compatible = "rockchip,rk3399-cdn-dp",
  55. .data = (void *)&rk3399_cdn_dp },
  56. {}
  57. };
  58. MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
  59. static int cdn_dp_grf_write(struct cdn_dp_device *dp,
  60. unsigned int reg, unsigned int val)
  61. {
  62. int ret;
  63. ret = clk_prepare_enable(dp->grf_clk);
  64. if (ret) {
  65. DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
  66. return ret;
  67. }
  68. ret = regmap_write(dp->grf, reg, val);
  69. if (ret) {
  70. DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
  71. return ret;
  72. }
  73. clk_disable_unprepare(dp->grf_clk);
  74. return 0;
  75. }
  76. static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
  77. {
  78. int ret;
  79. unsigned long rate;
  80. ret = clk_prepare_enable(dp->pclk);
  81. if (ret < 0) {
  82. DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
  83. goto err_pclk;
  84. }
  85. ret = clk_prepare_enable(dp->core_clk);
  86. if (ret < 0) {
  87. DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
  88. goto err_core_clk;
  89. }
  90. ret = pm_runtime_get_sync(dp->dev);
  91. if (ret < 0) {
  92. DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
  93. goto err_pm_runtime_get;
  94. }
  95. reset_control_assert(dp->core_rst);
  96. reset_control_assert(dp->dptx_rst);
  97. reset_control_assert(dp->apb_rst);
  98. reset_control_deassert(dp->core_rst);
  99. reset_control_deassert(dp->dptx_rst);
  100. reset_control_deassert(dp->apb_rst);
  101. rate = clk_get_rate(dp->core_clk);
  102. if (!rate) {
  103. DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
  104. ret = -EINVAL;
  105. goto err_set_rate;
  106. }
  107. cdn_dp_set_fw_clk(dp, rate);
  108. cdn_dp_clock_reset(dp);
  109. return 0;
  110. err_set_rate:
  111. pm_runtime_put(dp->dev);
  112. err_pm_runtime_get:
  113. clk_disable_unprepare(dp->core_clk);
  114. err_core_clk:
  115. clk_disable_unprepare(dp->pclk);
  116. err_pclk:
  117. return ret;
  118. }
  119. static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
  120. {
  121. pm_runtime_put_sync(dp->dev);
  122. clk_disable_unprepare(dp->pclk);
  123. clk_disable_unprepare(dp->core_clk);
  124. }
  125. static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
  126. {
  127. struct extcon_dev *edev = port->extcon;
  128. union extcon_property_value property;
  129. int dptx;
  130. u8 lanes;
  131. dptx = extcon_get_state(edev, EXTCON_DISP_DP);
  132. if (dptx > 0) {
  133. extcon_get_property(edev, EXTCON_DISP_DP,
  134. EXTCON_PROP_USB_SS, &property);
  135. if (property.intval)
  136. lanes = 2;
  137. else
  138. lanes = 4;
  139. } else {
  140. lanes = 0;
  141. }
  142. return lanes;
  143. }
  144. static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
  145. {
  146. int ret;
  147. u8 value;
  148. *sink_count = 0;
  149. ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
  150. if (ret)
  151. return ret;
  152. *sink_count = DP_GET_SINK_COUNT(value);
  153. return 0;
  154. }
  155. static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
  156. {
  157. struct cdn_dp_port *port;
  158. int i, lanes;
  159. for (i = 0; i < dp->ports; i++) {
  160. port = dp->port[i];
  161. lanes = cdn_dp_get_port_lanes(port);
  162. if (lanes)
  163. return port;
  164. }
  165. return NULL;
  166. }
  167. static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
  168. {
  169. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
  170. struct cdn_dp_port *port;
  171. u8 sink_count = 0;
  172. if (dp->active_port < 0 || dp->active_port >= dp->ports) {
  173. DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
  174. return false;
  175. }
  176. port = dp->port[dp->active_port];
  177. /*
  178. * Attempt to read sink count, retry in case the sink may not be ready.
  179. *
  180. * Sinks are *supposed* to come up within 1ms from an off state, but
  181. * some docks need more time to power up.
  182. */
  183. while (time_before(jiffies, timeout)) {
  184. if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
  185. return false;
  186. if (!cdn_dp_get_sink_count(dp, &sink_count))
  187. return sink_count ? true : false;
  188. usleep_range(5000, 10000);
  189. }
  190. DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
  191. return false;
  192. }
  193. static enum drm_connector_status
  194. cdn_dp_connector_detect(struct drm_connector *connector, bool force)
  195. {
  196. struct cdn_dp_device *dp = connector_to_dp(connector);
  197. enum drm_connector_status status = connector_status_disconnected;
  198. mutex_lock(&dp->lock);
  199. if (dp->connected)
  200. status = connector_status_connected;
  201. mutex_unlock(&dp->lock);
  202. return status;
  203. }
  204. static void cdn_dp_connector_destroy(struct drm_connector *connector)
  205. {
  206. drm_connector_unregister(connector);
  207. drm_connector_cleanup(connector);
  208. }
  209. static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
  210. .detect = cdn_dp_connector_detect,
  211. .destroy = cdn_dp_connector_destroy,
  212. .fill_modes = drm_helper_probe_single_connector_modes,
  213. .reset = drm_atomic_helper_connector_reset,
  214. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  215. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  216. };
  217. static int cdn_dp_connector_get_modes(struct drm_connector *connector)
  218. {
  219. struct cdn_dp_device *dp = connector_to_dp(connector);
  220. struct edid *edid;
  221. int ret = 0;
  222. mutex_lock(&dp->lock);
  223. edid = dp->edid;
  224. if (edid) {
  225. DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
  226. edid->width_cm, edid->height_cm);
  227. dp->sink_has_audio = drm_detect_monitor_audio(edid);
  228. ret = drm_add_edid_modes(connector, edid);
  229. if (ret)
  230. drm_mode_connector_update_edid_property(connector,
  231. edid);
  232. }
  233. mutex_unlock(&dp->lock);
  234. return ret;
  235. }
  236. static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
  237. struct drm_display_mode *mode)
  238. {
  239. struct cdn_dp_device *dp = connector_to_dp(connector);
  240. struct drm_display_info *display_info = &dp->connector.display_info;
  241. u32 requested, actual, rate, sink_max, source_max = 0;
  242. u8 lanes, bpc;
  243. /* If DP is disconnected, every mode is invalid */
  244. if (!dp->connected)
  245. return MODE_BAD;
  246. switch (display_info->bpc) {
  247. case 10:
  248. bpc = 10;
  249. break;
  250. case 6:
  251. bpc = 6;
  252. break;
  253. default:
  254. bpc = 8;
  255. break;
  256. }
  257. requested = mode->clock * bpc * 3 / 1000;
  258. source_max = dp->lanes;
  259. sink_max = drm_dp_max_lane_count(dp->dpcd);
  260. lanes = min(source_max, sink_max);
  261. source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
  262. sink_max = drm_dp_max_link_rate(dp->dpcd);
  263. rate = min(source_max, sink_max);
  264. actual = rate * lanes / 100;
  265. /* efficiency is about 0.8 */
  266. actual = actual * 8 / 10;
  267. if (requested > actual) {
  268. DRM_DEV_DEBUG_KMS(dp->dev,
  269. "requested=%d, actual=%d, clock=%d\n",
  270. requested, actual, mode->clock);
  271. return MODE_CLOCK_HIGH;
  272. }
  273. return MODE_OK;
  274. }
  275. static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
  276. .get_modes = cdn_dp_connector_get_modes,
  277. .mode_valid = cdn_dp_connector_mode_valid,
  278. };
  279. static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
  280. {
  281. int ret;
  282. const u32 *iram_data, *dram_data;
  283. const struct firmware *fw = dp->fw;
  284. const struct cdn_firmware_header *hdr;
  285. hdr = (struct cdn_firmware_header *)fw->data;
  286. if (fw->size != le32_to_cpu(hdr->size_bytes)) {
  287. DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
  288. return -EINVAL;
  289. }
  290. iram_data = (const u32 *)(fw->data + hdr->header_size);
  291. dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
  292. ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
  293. dram_data, hdr->dram_size);
  294. if (ret)
  295. return ret;
  296. ret = cdn_dp_set_firmware_active(dp, true);
  297. if (ret) {
  298. DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
  299. return ret;
  300. }
  301. return cdn_dp_event_config(dp);
  302. }
  303. static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
  304. {
  305. int ret;
  306. if (!cdn_dp_check_sink_connection(dp))
  307. return -ENODEV;
  308. ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
  309. DP_RECEIVER_CAP_SIZE);
  310. if (ret) {
  311. DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
  312. return ret;
  313. }
  314. kfree(dp->edid);
  315. dp->edid = drm_do_get_edid(&dp->connector,
  316. cdn_dp_get_edid_block, dp);
  317. return 0;
  318. }
  319. static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
  320. {
  321. union extcon_property_value property;
  322. int ret;
  323. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  324. (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK);
  325. if (ret)
  326. return ret;
  327. if (!port->phy_enabled) {
  328. ret = phy_power_on(port->phy);
  329. if (ret) {
  330. DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
  331. ret);
  332. goto err_phy;
  333. }
  334. port->phy_enabled = true;
  335. }
  336. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  337. DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
  338. if (ret) {
  339. DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
  340. goto err_power_on;
  341. }
  342. ret = cdn_dp_get_hpd_status(dp);
  343. if (ret <= 0) {
  344. if (!ret)
  345. DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
  346. goto err_power_on;
  347. }
  348. ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
  349. EXTCON_PROP_USB_TYPEC_POLARITY, &property);
  350. if (ret) {
  351. DRM_DEV_ERROR(dp->dev, "get property failed\n");
  352. goto err_power_on;
  353. }
  354. port->lanes = cdn_dp_get_port_lanes(port);
  355. ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
  356. if (ret) {
  357. DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
  358. ret);
  359. goto err_power_on;
  360. }
  361. dp->active_port = port->id;
  362. return 0;
  363. err_power_on:
  364. if (phy_power_off(port->phy))
  365. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  366. else
  367. port->phy_enabled = false;
  368. err_phy:
  369. cdn_dp_grf_write(dp, GRF_SOC_CON26,
  370. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  371. return ret;
  372. }
  373. static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
  374. struct cdn_dp_port *port)
  375. {
  376. int ret;
  377. if (port->phy_enabled) {
  378. ret = phy_power_off(port->phy);
  379. if (ret) {
  380. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  381. return ret;
  382. }
  383. }
  384. port->phy_enabled = false;
  385. port->lanes = 0;
  386. dp->active_port = -1;
  387. return 0;
  388. }
  389. static int cdn_dp_disable(struct cdn_dp_device *dp)
  390. {
  391. int ret, i;
  392. if (!dp->active)
  393. return 0;
  394. for (i = 0; i < dp->ports; i++)
  395. cdn_dp_disable_phy(dp, dp->port[i]);
  396. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  397. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  398. if (ret) {
  399. DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
  400. ret);
  401. return ret;
  402. }
  403. cdn_dp_set_firmware_active(dp, false);
  404. cdn_dp_clk_disable(dp);
  405. dp->active = false;
  406. dp->link.rate = 0;
  407. dp->link.num_lanes = 0;
  408. if (!dp->connected) {
  409. kfree(dp->edid);
  410. dp->edid = NULL;
  411. }
  412. return 0;
  413. }
  414. static int cdn_dp_enable(struct cdn_dp_device *dp)
  415. {
  416. int ret, i, lanes;
  417. struct cdn_dp_port *port;
  418. port = cdn_dp_connected_port(dp);
  419. if (!port) {
  420. DRM_DEV_ERROR(dp->dev,
  421. "Can't enable without connection\n");
  422. return -ENODEV;
  423. }
  424. if (dp->active)
  425. return 0;
  426. ret = cdn_dp_clk_enable(dp);
  427. if (ret)
  428. return ret;
  429. ret = cdn_dp_firmware_init(dp);
  430. if (ret) {
  431. DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
  432. goto err_clk_disable;
  433. }
  434. /* only enable the port that connected with downstream device */
  435. for (i = port->id; i < dp->ports; i++) {
  436. port = dp->port[i];
  437. lanes = cdn_dp_get_port_lanes(port);
  438. if (lanes) {
  439. ret = cdn_dp_enable_phy(dp, port);
  440. if (ret)
  441. continue;
  442. ret = cdn_dp_get_sink_capability(dp);
  443. if (ret) {
  444. cdn_dp_disable_phy(dp, port);
  445. } else {
  446. dp->active = true;
  447. dp->lanes = port->lanes;
  448. return 0;
  449. }
  450. }
  451. }
  452. err_clk_disable:
  453. cdn_dp_clk_disable(dp);
  454. return ret;
  455. }
  456. static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
  457. struct drm_display_mode *mode,
  458. struct drm_display_mode *adjusted)
  459. {
  460. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  461. struct drm_display_info *display_info = &dp->connector.display_info;
  462. struct video_info *video = &dp->video_info;
  463. switch (display_info->bpc) {
  464. case 10:
  465. video->color_depth = 10;
  466. break;
  467. case 6:
  468. video->color_depth = 6;
  469. break;
  470. default:
  471. video->color_depth = 8;
  472. break;
  473. }
  474. video->color_fmt = PXL_RGB;
  475. video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
  476. video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
  477. memcpy(&dp->mode, adjusted, sizeof(*mode));
  478. }
  479. static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
  480. {
  481. u8 link_status[DP_LINK_STATUS_SIZE];
  482. struct cdn_dp_port *port = cdn_dp_connected_port(dp);
  483. u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
  484. if (!port || !dp->link.rate || !dp->link.num_lanes)
  485. return false;
  486. if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
  487. DP_LINK_STATUS_SIZE)) {
  488. DRM_ERROR("Failed to get link status\n");
  489. return false;
  490. }
  491. /* if link training is requested we should perform it always */
  492. return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
  493. }
  494. static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
  495. {
  496. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  497. int ret, val;
  498. ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
  499. if (ret < 0) {
  500. DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
  501. return;
  502. }
  503. DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
  504. (ret) ? "LIT" : "BIG");
  505. if (ret)
  506. val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
  507. else
  508. val = DP_SEL_VOP_LIT << 16;
  509. ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
  510. if (ret)
  511. return;
  512. mutex_lock(&dp->lock);
  513. ret = cdn_dp_enable(dp);
  514. if (ret) {
  515. DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
  516. ret);
  517. goto out;
  518. }
  519. if (!cdn_dp_check_link_status(dp)) {
  520. ret = cdn_dp_train_link(dp);
  521. if (ret) {
  522. DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
  523. goto out;
  524. }
  525. }
  526. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
  527. if (ret) {
  528. DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
  529. goto out;
  530. }
  531. ret = cdn_dp_config_video(dp);
  532. if (ret) {
  533. DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
  534. goto out;
  535. }
  536. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
  537. if (ret) {
  538. DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
  539. goto out;
  540. }
  541. out:
  542. mutex_unlock(&dp->lock);
  543. }
  544. static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
  545. {
  546. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  547. int ret;
  548. mutex_lock(&dp->lock);
  549. if (dp->active) {
  550. ret = cdn_dp_disable(dp);
  551. if (ret) {
  552. DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
  553. ret);
  554. }
  555. }
  556. mutex_unlock(&dp->lock);
  557. /*
  558. * In the following 2 cases, we need to run the event_work to re-enable
  559. * the DP:
  560. * 1. If there is not just one port device is connected, and remove one
  561. * device from a port, the DP will be disabled here, at this case,
  562. * run the event_work to re-open DP for the other port.
  563. * 2. If re-training or re-config failed, the DP will be disabled here.
  564. * run the event_work to re-connect it.
  565. */
  566. if (!dp->connected && cdn_dp_connected_port(dp))
  567. schedule_work(&dp->event_work);
  568. }
  569. static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
  570. struct drm_crtc_state *crtc_state,
  571. struct drm_connector_state *conn_state)
  572. {
  573. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
  574. s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
  575. s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
  576. return 0;
  577. }
  578. static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
  579. .mode_set = cdn_dp_encoder_mode_set,
  580. .enable = cdn_dp_encoder_enable,
  581. .disable = cdn_dp_encoder_disable,
  582. .atomic_check = cdn_dp_encoder_atomic_check,
  583. };
  584. static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
  585. .destroy = drm_encoder_cleanup,
  586. };
  587. static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
  588. {
  589. struct device *dev = dp->dev;
  590. struct device_node *np = dev->of_node;
  591. struct platform_device *pdev = to_platform_device(dev);
  592. struct resource *res;
  593. dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
  594. if (IS_ERR(dp->grf)) {
  595. DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
  596. return PTR_ERR(dp->grf);
  597. }
  598. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  599. dp->regs = devm_ioremap_resource(dev, res);
  600. if (IS_ERR(dp->regs)) {
  601. DRM_DEV_ERROR(dev, "ioremap reg failed\n");
  602. return PTR_ERR(dp->regs);
  603. }
  604. dp->core_clk = devm_clk_get(dev, "core-clk");
  605. if (IS_ERR(dp->core_clk)) {
  606. DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
  607. return PTR_ERR(dp->core_clk);
  608. }
  609. dp->pclk = devm_clk_get(dev, "pclk");
  610. if (IS_ERR(dp->pclk)) {
  611. DRM_DEV_ERROR(dev, "cannot get pclk\n");
  612. return PTR_ERR(dp->pclk);
  613. }
  614. dp->spdif_clk = devm_clk_get(dev, "spdif");
  615. if (IS_ERR(dp->spdif_clk)) {
  616. DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
  617. return PTR_ERR(dp->spdif_clk);
  618. }
  619. dp->grf_clk = devm_clk_get(dev, "grf");
  620. if (IS_ERR(dp->grf_clk)) {
  621. DRM_DEV_ERROR(dev, "cannot get grf clk\n");
  622. return PTR_ERR(dp->grf_clk);
  623. }
  624. dp->spdif_rst = devm_reset_control_get(dev, "spdif");
  625. if (IS_ERR(dp->spdif_rst)) {
  626. DRM_DEV_ERROR(dev, "no spdif reset control found\n");
  627. return PTR_ERR(dp->spdif_rst);
  628. }
  629. dp->dptx_rst = devm_reset_control_get(dev, "dptx");
  630. if (IS_ERR(dp->dptx_rst)) {
  631. DRM_DEV_ERROR(dev, "no uphy reset control found\n");
  632. return PTR_ERR(dp->dptx_rst);
  633. }
  634. dp->core_rst = devm_reset_control_get(dev, "core");
  635. if (IS_ERR(dp->core_rst)) {
  636. DRM_DEV_ERROR(dev, "no core reset control found\n");
  637. return PTR_ERR(dp->core_rst);
  638. }
  639. dp->apb_rst = devm_reset_control_get(dev, "apb");
  640. if (IS_ERR(dp->apb_rst)) {
  641. DRM_DEV_ERROR(dev, "no apb reset control found\n");
  642. return PTR_ERR(dp->apb_rst);
  643. }
  644. return 0;
  645. }
  646. static int cdn_dp_audio_hw_params(struct device *dev, void *data,
  647. struct hdmi_codec_daifmt *daifmt,
  648. struct hdmi_codec_params *params)
  649. {
  650. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  651. struct audio_info audio = {
  652. .sample_width = params->sample_width,
  653. .sample_rate = params->sample_rate,
  654. .channels = params->channels,
  655. };
  656. int ret;
  657. mutex_lock(&dp->lock);
  658. if (!dp->active) {
  659. ret = -ENODEV;
  660. goto out;
  661. }
  662. switch (daifmt->fmt) {
  663. case HDMI_I2S:
  664. audio.format = AFMT_I2S;
  665. break;
  666. case HDMI_SPDIF:
  667. audio.format = AFMT_SPDIF;
  668. break;
  669. default:
  670. DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
  671. ret = -EINVAL;
  672. goto out;
  673. }
  674. ret = cdn_dp_audio_config(dp, &audio);
  675. if (!ret)
  676. dp->audio_info = audio;
  677. out:
  678. mutex_unlock(&dp->lock);
  679. return ret;
  680. }
  681. static void cdn_dp_audio_shutdown(struct device *dev, void *data)
  682. {
  683. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  684. int ret;
  685. mutex_lock(&dp->lock);
  686. if (!dp->active)
  687. goto out;
  688. ret = cdn_dp_audio_stop(dp, &dp->audio_info);
  689. if (!ret)
  690. dp->audio_info.format = AFMT_UNUSED;
  691. out:
  692. mutex_unlock(&dp->lock);
  693. }
  694. static int cdn_dp_audio_digital_mute(struct device *dev, void *data,
  695. bool enable)
  696. {
  697. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  698. int ret;
  699. mutex_lock(&dp->lock);
  700. if (!dp->active) {
  701. ret = -ENODEV;
  702. goto out;
  703. }
  704. ret = cdn_dp_audio_mute(dp, enable);
  705. out:
  706. mutex_unlock(&dp->lock);
  707. return ret;
  708. }
  709. static int cdn_dp_audio_get_eld(struct device *dev, void *data,
  710. u8 *buf, size_t len)
  711. {
  712. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  713. memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
  714. return 0;
  715. }
  716. static const struct hdmi_codec_ops audio_codec_ops = {
  717. .hw_params = cdn_dp_audio_hw_params,
  718. .audio_shutdown = cdn_dp_audio_shutdown,
  719. .digital_mute = cdn_dp_audio_digital_mute,
  720. .get_eld = cdn_dp_audio_get_eld,
  721. };
  722. static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
  723. struct device *dev)
  724. {
  725. struct hdmi_codec_pdata codec_data = {
  726. .i2s = 1,
  727. .spdif = 1,
  728. .ops = &audio_codec_ops,
  729. .max_i2s_channels = 8,
  730. };
  731. dp->audio_pdev = platform_device_register_data(
  732. dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
  733. &codec_data, sizeof(codec_data));
  734. return PTR_ERR_OR_ZERO(dp->audio_pdev);
  735. }
  736. static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
  737. {
  738. int ret;
  739. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
  740. unsigned long sleep = 1000;
  741. WARN_ON(!mutex_is_locked(&dp->lock));
  742. if (dp->fw_loaded)
  743. return 0;
  744. /* Drop the lock before getting the firmware to avoid blocking boot */
  745. mutex_unlock(&dp->lock);
  746. while (time_before(jiffies, timeout)) {
  747. ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
  748. if (ret == -ENOENT) {
  749. msleep(sleep);
  750. sleep *= 2;
  751. continue;
  752. } else if (ret) {
  753. DRM_DEV_ERROR(dp->dev,
  754. "failed to request firmware: %d\n", ret);
  755. goto out;
  756. }
  757. dp->fw_loaded = true;
  758. ret = 0;
  759. goto out;
  760. }
  761. DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
  762. ret = -ETIMEDOUT;
  763. out:
  764. mutex_lock(&dp->lock);
  765. return ret;
  766. }
  767. static void cdn_dp_pd_event_work(struct work_struct *work)
  768. {
  769. struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
  770. event_work);
  771. struct drm_connector *connector = &dp->connector;
  772. enum drm_connector_status old_status;
  773. int ret;
  774. mutex_lock(&dp->lock);
  775. if (dp->suspended)
  776. goto out;
  777. ret = cdn_dp_request_firmware(dp);
  778. if (ret)
  779. goto out;
  780. dp->connected = true;
  781. /* Not connected, notify userspace to disable the block */
  782. if (!cdn_dp_connected_port(dp)) {
  783. DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
  784. dp->connected = false;
  785. /* Connected but not enabled, enable the block */
  786. } else if (!dp->active) {
  787. DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
  788. ret = cdn_dp_enable(dp);
  789. if (ret) {
  790. DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
  791. dp->connected = false;
  792. }
  793. /* Enabled and connected to a dongle without a sink, notify userspace */
  794. } else if (!cdn_dp_check_sink_connection(dp)) {
  795. DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
  796. dp->connected = false;
  797. /* Enabled and connected with a sink, re-train if requested */
  798. } else if (!cdn_dp_check_link_status(dp)) {
  799. unsigned int rate = dp->link.rate;
  800. unsigned int lanes = dp->link.num_lanes;
  801. struct drm_display_mode *mode = &dp->mode;
  802. DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
  803. ret = cdn_dp_train_link(dp);
  804. if (ret) {
  805. dp->connected = false;
  806. DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
  807. goto out;
  808. }
  809. /* If training result is changed, update the video config */
  810. if (mode->clock &&
  811. (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
  812. ret = cdn_dp_config_video(dp);
  813. if (ret) {
  814. dp->connected = false;
  815. DRM_DEV_ERROR(dp->dev,
  816. "Failed to config video %d\n",
  817. ret);
  818. }
  819. }
  820. }
  821. out:
  822. mutex_unlock(&dp->lock);
  823. old_status = connector->status;
  824. connector->status = connector->funcs->detect(connector, false);
  825. if (old_status != connector->status)
  826. drm_kms_helper_hotplug_event(dp->drm_dev);
  827. }
  828. static int cdn_dp_pd_event(struct notifier_block *nb,
  829. unsigned long event, void *priv)
  830. {
  831. struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
  832. event_nb);
  833. struct cdn_dp_device *dp = port->dp;
  834. /*
  835. * It would be nice to be able to just do the work inline right here.
  836. * However, we need to make a bunch of calls that might sleep in order
  837. * to turn on the block/phy, so use a worker instead.
  838. */
  839. schedule_work(&dp->event_work);
  840. return NOTIFY_DONE;
  841. }
  842. static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
  843. {
  844. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  845. struct drm_encoder *encoder;
  846. struct drm_connector *connector;
  847. struct cdn_dp_port *port;
  848. struct drm_device *drm_dev = data;
  849. int ret, i;
  850. ret = cdn_dp_parse_dt(dp);
  851. if (ret < 0)
  852. return ret;
  853. dp->drm_dev = drm_dev;
  854. dp->connected = false;
  855. dp->active = false;
  856. dp->active_port = -1;
  857. dp->fw_loaded = false;
  858. INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
  859. encoder = &dp->encoder;
  860. encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
  861. dev->of_node);
  862. DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
  863. ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
  864. DRM_MODE_ENCODER_TMDS, NULL);
  865. if (ret) {
  866. DRM_ERROR("failed to initialize encoder with drm\n");
  867. return ret;
  868. }
  869. drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
  870. connector = &dp->connector;
  871. connector->polled = DRM_CONNECTOR_POLL_HPD;
  872. connector->dpms = DRM_MODE_DPMS_OFF;
  873. ret = drm_connector_init(drm_dev, connector,
  874. &cdn_dp_atomic_connector_funcs,
  875. DRM_MODE_CONNECTOR_DisplayPort);
  876. if (ret) {
  877. DRM_ERROR("failed to initialize connector with drm\n");
  878. goto err_free_encoder;
  879. }
  880. drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
  881. ret = drm_mode_connector_attach_encoder(connector, encoder);
  882. if (ret) {
  883. DRM_ERROR("failed to attach connector and encoder\n");
  884. goto err_free_connector;
  885. }
  886. for (i = 0; i < dp->ports; i++) {
  887. port = dp->port[i];
  888. port->event_nb.notifier_call = cdn_dp_pd_event;
  889. ret = devm_extcon_register_notifier(dp->dev, port->extcon,
  890. EXTCON_DISP_DP,
  891. &port->event_nb);
  892. if (ret) {
  893. DRM_DEV_ERROR(dev,
  894. "register EXTCON_DISP_DP notifier err\n");
  895. goto err_free_connector;
  896. }
  897. }
  898. pm_runtime_enable(dev);
  899. schedule_work(&dp->event_work);
  900. return 0;
  901. err_free_connector:
  902. drm_connector_cleanup(connector);
  903. err_free_encoder:
  904. drm_encoder_cleanup(encoder);
  905. return ret;
  906. }
  907. static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
  908. {
  909. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  910. struct drm_encoder *encoder = &dp->encoder;
  911. struct drm_connector *connector = &dp->connector;
  912. cancel_work_sync(&dp->event_work);
  913. cdn_dp_encoder_disable(encoder);
  914. encoder->funcs->destroy(encoder);
  915. connector->funcs->destroy(connector);
  916. pm_runtime_disable(dev);
  917. if (dp->fw_loaded)
  918. release_firmware(dp->fw);
  919. kfree(dp->edid);
  920. dp->edid = NULL;
  921. }
  922. static const struct component_ops cdn_dp_component_ops = {
  923. .bind = cdn_dp_bind,
  924. .unbind = cdn_dp_unbind,
  925. };
  926. int cdn_dp_suspend(struct device *dev)
  927. {
  928. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  929. int ret = 0;
  930. mutex_lock(&dp->lock);
  931. if (dp->active)
  932. ret = cdn_dp_disable(dp);
  933. dp->suspended = true;
  934. mutex_unlock(&dp->lock);
  935. return ret;
  936. }
  937. int cdn_dp_resume(struct device *dev)
  938. {
  939. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  940. mutex_lock(&dp->lock);
  941. dp->suspended = false;
  942. if (dp->fw_loaded)
  943. schedule_work(&dp->event_work);
  944. mutex_unlock(&dp->lock);
  945. return 0;
  946. }
  947. static int cdn_dp_probe(struct platform_device *pdev)
  948. {
  949. struct device *dev = &pdev->dev;
  950. const struct of_device_id *match;
  951. struct cdn_dp_data *dp_data;
  952. struct cdn_dp_port *port;
  953. struct cdn_dp_device *dp;
  954. struct extcon_dev *extcon;
  955. struct phy *phy;
  956. int i;
  957. dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
  958. if (!dp)
  959. return -ENOMEM;
  960. dp->dev = dev;
  961. match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
  962. dp_data = (struct cdn_dp_data *)match->data;
  963. for (i = 0; i < dp_data->max_phy; i++) {
  964. extcon = extcon_get_edev_by_phandle(dev, i);
  965. phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
  966. if (PTR_ERR(extcon) == -EPROBE_DEFER ||
  967. PTR_ERR(phy) == -EPROBE_DEFER)
  968. return -EPROBE_DEFER;
  969. if (IS_ERR(extcon) || IS_ERR(phy))
  970. continue;
  971. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  972. if (!port)
  973. return -ENOMEM;
  974. port->extcon = extcon;
  975. port->phy = phy;
  976. port->dp = dp;
  977. port->id = i;
  978. dp->port[dp->ports++] = port;
  979. }
  980. if (!dp->ports) {
  981. DRM_DEV_ERROR(dev, "missing extcon or phy\n");
  982. return -EINVAL;
  983. }
  984. mutex_init(&dp->lock);
  985. dev_set_drvdata(dev, dp);
  986. cdn_dp_audio_codec_init(dp, dev);
  987. return component_add(dev, &cdn_dp_component_ops);
  988. }
  989. static int cdn_dp_remove(struct platform_device *pdev)
  990. {
  991. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  992. platform_device_unregister(dp->audio_pdev);
  993. cdn_dp_suspend(dp->dev);
  994. component_del(&pdev->dev, &cdn_dp_component_ops);
  995. return 0;
  996. }
  997. static void cdn_dp_shutdown(struct platform_device *pdev)
  998. {
  999. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  1000. cdn_dp_suspend(dp->dev);
  1001. }
  1002. static const struct dev_pm_ops cdn_dp_pm_ops = {
  1003. SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
  1004. cdn_dp_resume)
  1005. };
  1006. struct platform_driver cdn_dp_driver = {
  1007. .probe = cdn_dp_probe,
  1008. .remove = cdn_dp_remove,
  1009. .shutdown = cdn_dp_shutdown,
  1010. .driver = {
  1011. .name = "cdn-dp",
  1012. .owner = THIS_MODULE,
  1013. .of_match_table = of_match_ptr(cdn_dp_dt_ids),
  1014. .pm = &cdn_dp_pm_ops,
  1015. },
  1016. };