cdn-dp-core.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author: Chris Zhong <zyw@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drmP.h>
  15. #include <drm/drm_atomic_helper.h>
  16. #include <drm/drm_crtc_helper.h>
  17. #include <drm/drm_dp_helper.h>
  18. #include <drm/drm_edid.h>
  19. #include <drm/drm_of.h>
  20. #include <linux/clk.h>
  21. #include <linux/component.h>
  22. #include <linux/extcon.h>
  23. #include <linux/firmware.h>
  24. #include <linux/regmap.h>
  25. #include <linux/reset.h>
  26. #include <linux/mfd/syscon.h>
  27. #include <linux/phy/phy.h>
  28. #include <sound/hdmi-codec.h>
  29. #include "cdn-dp-core.h"
  30. #include "cdn-dp-reg.h"
  31. #include "rockchip_drm_vop.h"
  32. #define connector_to_dp(c) \
  33. container_of(c, struct cdn_dp_device, connector)
  34. #define encoder_to_dp(c) \
  35. container_of(c, struct cdn_dp_device, encoder)
  36. #define GRF_SOC_CON9 0x6224
  37. #define DP_SEL_VOP_LIT BIT(12)
  38. #define GRF_SOC_CON26 0x6268
  39. #define UPHY_SEL_BIT 3
  40. #define UPHY_SEL_MASK BIT(19)
  41. #define DPTX_HPD_SEL (3 << 12)
  42. #define DPTX_HPD_DEL (2 << 12)
  43. #define DPTX_HPD_SEL_MASK (3 << 28)
  44. #define CDN_FW_TIMEOUT_MS (64 * 1000)
  45. #define CDN_DPCD_TIMEOUT_MS 5000
  46. #define CDN_DP_FIRMWARE "rockchip/dptx.bin"
  47. struct cdn_dp_data {
  48. u8 max_phy;
  49. };
  50. struct cdn_dp_data rk3399_cdn_dp = {
  51. .max_phy = 2,
  52. };
  53. static const struct of_device_id cdn_dp_dt_ids[] = {
  54. { .compatible = "rockchip,rk3399-cdn-dp",
  55. .data = (void *)&rk3399_cdn_dp },
  56. {}
  57. };
  58. MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
  59. static int cdn_dp_grf_write(struct cdn_dp_device *dp,
  60. unsigned int reg, unsigned int val)
  61. {
  62. int ret;
  63. ret = clk_prepare_enable(dp->grf_clk);
  64. if (ret) {
  65. DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
  66. return ret;
  67. }
  68. ret = regmap_write(dp->grf, reg, val);
  69. if (ret) {
  70. DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
  71. return ret;
  72. }
  73. clk_disable_unprepare(dp->grf_clk);
  74. return 0;
  75. }
  76. static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
  77. {
  78. int ret;
  79. unsigned long rate;
  80. ret = clk_prepare_enable(dp->pclk);
  81. if (ret < 0) {
  82. DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
  83. goto err_pclk;
  84. }
  85. ret = clk_prepare_enable(dp->core_clk);
  86. if (ret < 0) {
  87. DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
  88. goto err_core_clk;
  89. }
  90. ret = pm_runtime_get_sync(dp->dev);
  91. if (ret < 0) {
  92. DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
  93. goto err_pm_runtime_get;
  94. }
  95. reset_control_assert(dp->core_rst);
  96. reset_control_assert(dp->dptx_rst);
  97. reset_control_assert(dp->apb_rst);
  98. reset_control_deassert(dp->core_rst);
  99. reset_control_deassert(dp->dptx_rst);
  100. reset_control_deassert(dp->apb_rst);
  101. rate = clk_get_rate(dp->core_clk);
  102. if (!rate) {
  103. DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
  104. ret = -EINVAL;
  105. goto err_set_rate;
  106. }
  107. cdn_dp_set_fw_clk(dp, rate);
  108. cdn_dp_clock_reset(dp);
  109. return 0;
  110. err_set_rate:
  111. pm_runtime_put(dp->dev);
  112. err_pm_runtime_get:
  113. clk_disable_unprepare(dp->core_clk);
  114. err_core_clk:
  115. clk_disable_unprepare(dp->pclk);
  116. err_pclk:
  117. return ret;
  118. }
  119. static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
  120. {
  121. pm_runtime_put_sync(dp->dev);
  122. clk_disable_unprepare(dp->pclk);
  123. clk_disable_unprepare(dp->core_clk);
  124. }
  125. static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
  126. {
  127. struct extcon_dev *edev = port->extcon;
  128. union extcon_property_value property;
  129. int dptx;
  130. u8 lanes;
  131. dptx = extcon_get_state(edev, EXTCON_DISP_DP);
  132. if (dptx > 0) {
  133. extcon_get_property(edev, EXTCON_DISP_DP,
  134. EXTCON_PROP_USB_SS, &property);
  135. if (property.intval)
  136. lanes = 2;
  137. else
  138. lanes = 4;
  139. } else {
  140. lanes = 0;
  141. }
  142. return lanes;
  143. }
  144. static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
  145. {
  146. int ret;
  147. u8 value;
  148. *sink_count = 0;
  149. ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
  150. if (ret)
  151. return ret;
  152. *sink_count = DP_GET_SINK_COUNT(value);
  153. return 0;
  154. }
  155. static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
  156. {
  157. struct cdn_dp_port *port;
  158. int i, lanes;
  159. for (i = 0; i < dp->ports; i++) {
  160. port = dp->port[i];
  161. lanes = cdn_dp_get_port_lanes(port);
  162. if (lanes)
  163. return port;
  164. }
  165. return NULL;
  166. }
  167. static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
  168. {
  169. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
  170. struct cdn_dp_port *port;
  171. u8 sink_count = 0;
  172. if (dp->active_port < 0 || dp->active_port >= dp->ports) {
  173. DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
  174. return false;
  175. }
  176. port = dp->port[dp->active_port];
  177. /*
  178. * Attempt to read sink count, retry in case the sink may not be ready.
  179. *
  180. * Sinks are *supposed* to come up within 1ms from an off state, but
  181. * some docks need more time to power up.
  182. */
  183. while (time_before(jiffies, timeout)) {
  184. if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
  185. return false;
  186. if (!cdn_dp_get_sink_count(dp, &sink_count))
  187. return sink_count ? true : false;
  188. usleep_range(5000, 10000);
  189. }
  190. DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
  191. return false;
  192. }
  193. static enum drm_connector_status
  194. cdn_dp_connector_detect(struct drm_connector *connector, bool force)
  195. {
  196. struct cdn_dp_device *dp = connector_to_dp(connector);
  197. enum drm_connector_status status = connector_status_disconnected;
  198. mutex_lock(&dp->lock);
  199. if (dp->connected)
  200. status = connector_status_connected;
  201. mutex_unlock(&dp->lock);
  202. return status;
  203. }
  204. static void cdn_dp_connector_destroy(struct drm_connector *connector)
  205. {
  206. drm_connector_unregister(connector);
  207. drm_connector_cleanup(connector);
  208. }
  209. static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
  210. .dpms = drm_atomic_helper_connector_dpms,
  211. .detect = cdn_dp_connector_detect,
  212. .destroy = cdn_dp_connector_destroy,
  213. .fill_modes = drm_helper_probe_single_connector_modes,
  214. .reset = drm_atomic_helper_connector_reset,
  215. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  216. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  217. };
  218. static int cdn_dp_connector_get_modes(struct drm_connector *connector)
  219. {
  220. struct cdn_dp_device *dp = connector_to_dp(connector);
  221. struct edid *edid;
  222. int ret = 0;
  223. mutex_lock(&dp->lock);
  224. edid = dp->edid;
  225. if (edid) {
  226. DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
  227. edid->width_cm, edid->height_cm);
  228. dp->sink_has_audio = drm_detect_monitor_audio(edid);
  229. ret = drm_add_edid_modes(connector, edid);
  230. if (ret) {
  231. drm_mode_connector_update_edid_property(connector,
  232. edid);
  233. drm_edid_to_eld(connector, edid);
  234. }
  235. }
  236. mutex_unlock(&dp->lock);
  237. return ret;
  238. }
  239. static struct drm_encoder *
  240. cdn_dp_connector_best_encoder(struct drm_connector *connector)
  241. {
  242. struct cdn_dp_device *dp = connector_to_dp(connector);
  243. return &dp->encoder;
  244. }
  245. static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
  246. struct drm_display_mode *mode)
  247. {
  248. struct cdn_dp_device *dp = connector_to_dp(connector);
  249. struct drm_display_info *display_info = &dp->connector.display_info;
  250. u32 requested, actual, rate, sink_max, source_max = 0;
  251. u8 lanes, bpc;
  252. /* If DP is disconnected, every mode is invalid */
  253. if (!dp->connected)
  254. return MODE_BAD;
  255. switch (display_info->bpc) {
  256. case 10:
  257. bpc = 10;
  258. break;
  259. case 6:
  260. bpc = 6;
  261. break;
  262. default:
  263. bpc = 8;
  264. break;
  265. }
  266. requested = mode->clock * bpc * 3 / 1000;
  267. source_max = dp->lanes;
  268. sink_max = drm_dp_max_lane_count(dp->dpcd);
  269. lanes = min(source_max, sink_max);
  270. source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
  271. sink_max = drm_dp_max_link_rate(dp->dpcd);
  272. rate = min(source_max, sink_max);
  273. actual = rate * lanes / 100;
  274. /* efficiency is about 0.8 */
  275. actual = actual * 8 / 10;
  276. if (requested > actual) {
  277. DRM_DEV_DEBUG_KMS(dp->dev,
  278. "requested=%d, actual=%d, clock=%d\n",
  279. requested, actual, mode->clock);
  280. return MODE_CLOCK_HIGH;
  281. }
  282. return MODE_OK;
  283. }
  284. static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
  285. .get_modes = cdn_dp_connector_get_modes,
  286. .best_encoder = cdn_dp_connector_best_encoder,
  287. .mode_valid = cdn_dp_connector_mode_valid,
  288. };
  289. static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
  290. {
  291. int ret;
  292. const u32 *iram_data, *dram_data;
  293. const struct firmware *fw = dp->fw;
  294. const struct cdn_firmware_header *hdr;
  295. hdr = (struct cdn_firmware_header *)fw->data;
  296. if (fw->size != le32_to_cpu(hdr->size_bytes)) {
  297. DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
  298. return -EINVAL;
  299. }
  300. iram_data = (const u32 *)(fw->data + hdr->header_size);
  301. dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
  302. ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
  303. dram_data, hdr->dram_size);
  304. if (ret)
  305. return ret;
  306. ret = cdn_dp_set_firmware_active(dp, true);
  307. if (ret) {
  308. DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
  309. return ret;
  310. }
  311. return cdn_dp_event_config(dp);
  312. }
  313. static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
  314. {
  315. int ret;
  316. if (!cdn_dp_check_sink_connection(dp))
  317. return -ENODEV;
  318. ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
  319. DP_RECEIVER_CAP_SIZE);
  320. if (ret) {
  321. DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
  322. return ret;
  323. }
  324. kfree(dp->edid);
  325. dp->edid = drm_do_get_edid(&dp->connector,
  326. cdn_dp_get_edid_block, dp);
  327. return 0;
  328. }
  329. static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
  330. {
  331. union extcon_property_value property;
  332. int ret;
  333. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  334. (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK);
  335. if (ret)
  336. return ret;
  337. if (!port->phy_enabled) {
  338. ret = phy_power_on(port->phy);
  339. if (ret) {
  340. DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
  341. ret);
  342. goto err_phy;
  343. }
  344. port->phy_enabled = true;
  345. }
  346. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  347. DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
  348. if (ret) {
  349. DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
  350. goto err_power_on;
  351. }
  352. ret = cdn_dp_get_hpd_status(dp);
  353. if (ret <= 0) {
  354. if (!ret)
  355. DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
  356. goto err_power_on;
  357. }
  358. ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
  359. EXTCON_PROP_USB_TYPEC_POLARITY, &property);
  360. if (ret) {
  361. DRM_DEV_ERROR(dp->dev, "get property failed\n");
  362. goto err_power_on;
  363. }
  364. port->lanes = cdn_dp_get_port_lanes(port);
  365. ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
  366. if (ret) {
  367. DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
  368. ret);
  369. goto err_power_on;
  370. }
  371. dp->active_port = port->id;
  372. return 0;
  373. err_power_on:
  374. if (phy_power_off(port->phy))
  375. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  376. else
  377. port->phy_enabled = false;
  378. err_phy:
  379. cdn_dp_grf_write(dp, GRF_SOC_CON26,
  380. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  381. return ret;
  382. }
  383. static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
  384. struct cdn_dp_port *port)
  385. {
  386. int ret;
  387. if (port->phy_enabled) {
  388. ret = phy_power_off(port->phy);
  389. if (ret) {
  390. DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
  391. return ret;
  392. }
  393. }
  394. port->phy_enabled = false;
  395. port->lanes = 0;
  396. dp->active_port = -1;
  397. return 0;
  398. }
  399. static int cdn_dp_disable(struct cdn_dp_device *dp)
  400. {
  401. int ret, i;
  402. if (!dp->active)
  403. return 0;
  404. for (i = 0; i < dp->ports; i++)
  405. cdn_dp_disable_phy(dp, dp->port[i]);
  406. ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
  407. DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
  408. if (ret) {
  409. DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
  410. ret);
  411. return ret;
  412. }
  413. cdn_dp_set_firmware_active(dp, false);
  414. cdn_dp_clk_disable(dp);
  415. dp->active = false;
  416. dp->link.rate = 0;
  417. dp->link.num_lanes = 0;
  418. if (!dp->connected) {
  419. kfree(dp->edid);
  420. dp->edid = NULL;
  421. }
  422. return 0;
  423. }
  424. static int cdn_dp_enable(struct cdn_dp_device *dp)
  425. {
  426. int ret, i, lanes;
  427. struct cdn_dp_port *port;
  428. port = cdn_dp_connected_port(dp);
  429. if (!port) {
  430. DRM_DEV_ERROR(dp->dev,
  431. "Can't enable without connection\n");
  432. return -ENODEV;
  433. }
  434. if (dp->active)
  435. return 0;
  436. ret = cdn_dp_clk_enable(dp);
  437. if (ret)
  438. return ret;
  439. ret = cdn_dp_firmware_init(dp);
  440. if (ret) {
  441. DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
  442. goto err_clk_disable;
  443. }
  444. /* only enable the port that connected with downstream device */
  445. for (i = port->id; i < dp->ports; i++) {
  446. port = dp->port[i];
  447. lanes = cdn_dp_get_port_lanes(port);
  448. if (lanes) {
  449. ret = cdn_dp_enable_phy(dp, port);
  450. if (ret)
  451. continue;
  452. ret = cdn_dp_get_sink_capability(dp);
  453. if (ret) {
  454. cdn_dp_disable_phy(dp, port);
  455. } else {
  456. dp->active = true;
  457. dp->lanes = port->lanes;
  458. return 0;
  459. }
  460. }
  461. }
  462. err_clk_disable:
  463. cdn_dp_clk_disable(dp);
  464. return ret;
  465. }
  466. static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
  467. struct drm_display_mode *mode,
  468. struct drm_display_mode *adjusted)
  469. {
  470. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  471. struct drm_display_info *display_info = &dp->connector.display_info;
  472. struct video_info *video = &dp->video_info;
  473. switch (display_info->bpc) {
  474. case 10:
  475. video->color_depth = 10;
  476. break;
  477. case 6:
  478. video->color_depth = 6;
  479. break;
  480. default:
  481. video->color_depth = 8;
  482. break;
  483. }
  484. video->color_fmt = PXL_RGB;
  485. video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
  486. video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
  487. memcpy(&dp->mode, adjusted, sizeof(*mode));
  488. }
  489. static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
  490. {
  491. u8 link_status[DP_LINK_STATUS_SIZE];
  492. struct cdn_dp_port *port = cdn_dp_connected_port(dp);
  493. u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
  494. if (!port || !dp->link.rate || !dp->link.num_lanes)
  495. return false;
  496. if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
  497. DP_LINK_STATUS_SIZE)) {
  498. DRM_ERROR("Failed to get link status\n");
  499. return false;
  500. }
  501. /* if link training is requested we should perform it always */
  502. return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
  503. }
  504. static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
  505. {
  506. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  507. int ret, val;
  508. ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
  509. if (ret < 0) {
  510. DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
  511. return;
  512. }
  513. DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
  514. (ret) ? "LIT" : "BIG");
  515. if (ret)
  516. val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
  517. else
  518. val = DP_SEL_VOP_LIT << 16;
  519. ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
  520. if (ret)
  521. return;
  522. mutex_lock(&dp->lock);
  523. ret = cdn_dp_enable(dp);
  524. if (ret) {
  525. DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
  526. ret);
  527. goto out;
  528. }
  529. if (!cdn_dp_check_link_status(dp)) {
  530. ret = cdn_dp_train_link(dp);
  531. if (ret) {
  532. DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
  533. goto out;
  534. }
  535. }
  536. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
  537. if (ret) {
  538. DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
  539. goto out;
  540. }
  541. ret = cdn_dp_config_video(dp);
  542. if (ret) {
  543. DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
  544. goto out;
  545. }
  546. ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
  547. if (ret) {
  548. DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
  549. goto out;
  550. }
  551. out:
  552. mutex_unlock(&dp->lock);
  553. }
  554. static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
  555. {
  556. struct cdn_dp_device *dp = encoder_to_dp(encoder);
  557. int ret;
  558. mutex_lock(&dp->lock);
  559. if (dp->active) {
  560. ret = cdn_dp_disable(dp);
  561. if (ret) {
  562. DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
  563. ret);
  564. }
  565. }
  566. mutex_unlock(&dp->lock);
  567. /*
  568. * In the following 2 cases, we need to run the event_work to re-enable
  569. * the DP:
  570. * 1. If there is not just one port device is connected, and remove one
  571. * device from a port, the DP will be disabled here, at this case,
  572. * run the event_work to re-open DP for the other port.
  573. * 2. If re-training or re-config failed, the DP will be disabled here.
  574. * run the event_work to re-connect it.
  575. */
  576. if (!dp->connected && cdn_dp_connected_port(dp))
  577. schedule_work(&dp->event_work);
  578. }
  579. static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
  580. struct drm_crtc_state *crtc_state,
  581. struct drm_connector_state *conn_state)
  582. {
  583. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
  584. s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
  585. s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
  586. return 0;
  587. }
  588. static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
  589. .mode_set = cdn_dp_encoder_mode_set,
  590. .enable = cdn_dp_encoder_enable,
  591. .disable = cdn_dp_encoder_disable,
  592. .atomic_check = cdn_dp_encoder_atomic_check,
  593. };
  594. static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
  595. .destroy = drm_encoder_cleanup,
  596. };
  597. static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
  598. {
  599. struct device *dev = dp->dev;
  600. struct device_node *np = dev->of_node;
  601. struct platform_device *pdev = to_platform_device(dev);
  602. struct resource *res;
  603. dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
  604. if (IS_ERR(dp->grf)) {
  605. DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
  606. return PTR_ERR(dp->grf);
  607. }
  608. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  609. dp->regs = devm_ioremap_resource(dev, res);
  610. if (IS_ERR(dp->regs)) {
  611. DRM_DEV_ERROR(dev, "ioremap reg failed\n");
  612. return PTR_ERR(dp->regs);
  613. }
  614. dp->core_clk = devm_clk_get(dev, "core-clk");
  615. if (IS_ERR(dp->core_clk)) {
  616. DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
  617. return PTR_ERR(dp->core_clk);
  618. }
  619. dp->pclk = devm_clk_get(dev, "pclk");
  620. if (IS_ERR(dp->pclk)) {
  621. DRM_DEV_ERROR(dev, "cannot get pclk\n");
  622. return PTR_ERR(dp->pclk);
  623. }
  624. dp->spdif_clk = devm_clk_get(dev, "spdif");
  625. if (IS_ERR(dp->spdif_clk)) {
  626. DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
  627. return PTR_ERR(dp->spdif_clk);
  628. }
  629. dp->grf_clk = devm_clk_get(dev, "grf");
  630. if (IS_ERR(dp->grf_clk)) {
  631. DRM_DEV_ERROR(dev, "cannot get grf clk\n");
  632. return PTR_ERR(dp->grf_clk);
  633. }
  634. dp->spdif_rst = devm_reset_control_get(dev, "spdif");
  635. if (IS_ERR(dp->spdif_rst)) {
  636. DRM_DEV_ERROR(dev, "no spdif reset control found\n");
  637. return PTR_ERR(dp->spdif_rst);
  638. }
  639. dp->dptx_rst = devm_reset_control_get(dev, "dptx");
  640. if (IS_ERR(dp->dptx_rst)) {
  641. DRM_DEV_ERROR(dev, "no uphy reset control found\n");
  642. return PTR_ERR(dp->dptx_rst);
  643. }
  644. dp->core_rst = devm_reset_control_get(dev, "core");
  645. if (IS_ERR(dp->core_rst)) {
  646. DRM_DEV_ERROR(dev, "no core reset control found\n");
  647. return PTR_ERR(dp->core_rst);
  648. }
  649. dp->apb_rst = devm_reset_control_get(dev, "apb");
  650. if (IS_ERR(dp->apb_rst)) {
  651. DRM_DEV_ERROR(dev, "no apb reset control found\n");
  652. return PTR_ERR(dp->apb_rst);
  653. }
  654. return 0;
  655. }
  656. static int cdn_dp_audio_hw_params(struct device *dev, void *data,
  657. struct hdmi_codec_daifmt *daifmt,
  658. struct hdmi_codec_params *params)
  659. {
  660. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  661. struct audio_info audio = {
  662. .sample_width = params->sample_width,
  663. .sample_rate = params->sample_rate,
  664. .channels = params->channels,
  665. };
  666. int ret;
  667. mutex_lock(&dp->lock);
  668. if (!dp->active) {
  669. ret = -ENODEV;
  670. goto out;
  671. }
  672. switch (daifmt->fmt) {
  673. case HDMI_I2S:
  674. audio.format = AFMT_I2S;
  675. break;
  676. case HDMI_SPDIF:
  677. audio.format = AFMT_SPDIF;
  678. break;
  679. default:
  680. DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
  681. ret = -EINVAL;
  682. goto out;
  683. }
  684. ret = cdn_dp_audio_config(dp, &audio);
  685. if (!ret)
  686. dp->audio_info = audio;
  687. out:
  688. mutex_unlock(&dp->lock);
  689. return ret;
  690. }
  691. static void cdn_dp_audio_shutdown(struct device *dev, void *data)
  692. {
  693. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  694. int ret;
  695. mutex_lock(&dp->lock);
  696. if (!dp->active)
  697. goto out;
  698. ret = cdn_dp_audio_stop(dp, &dp->audio_info);
  699. if (!ret)
  700. dp->audio_info.format = AFMT_UNUSED;
  701. out:
  702. mutex_unlock(&dp->lock);
  703. }
  704. static int cdn_dp_audio_digital_mute(struct device *dev, void *data,
  705. bool enable)
  706. {
  707. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  708. int ret;
  709. mutex_lock(&dp->lock);
  710. if (!dp->active) {
  711. ret = -ENODEV;
  712. goto out;
  713. }
  714. ret = cdn_dp_audio_mute(dp, enable);
  715. out:
  716. mutex_unlock(&dp->lock);
  717. return ret;
  718. }
  719. static int cdn_dp_audio_get_eld(struct device *dev, void *data,
  720. u8 *buf, size_t len)
  721. {
  722. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  723. memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
  724. return 0;
  725. }
  726. static const struct hdmi_codec_ops audio_codec_ops = {
  727. .hw_params = cdn_dp_audio_hw_params,
  728. .audio_shutdown = cdn_dp_audio_shutdown,
  729. .digital_mute = cdn_dp_audio_digital_mute,
  730. .get_eld = cdn_dp_audio_get_eld,
  731. };
  732. static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
  733. struct device *dev)
  734. {
  735. struct hdmi_codec_pdata codec_data = {
  736. .i2s = 1,
  737. .spdif = 1,
  738. .ops = &audio_codec_ops,
  739. .max_i2s_channels = 8,
  740. };
  741. dp->audio_pdev = platform_device_register_data(
  742. dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
  743. &codec_data, sizeof(codec_data));
  744. return PTR_ERR_OR_ZERO(dp->audio_pdev);
  745. }
  746. static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
  747. {
  748. int ret;
  749. unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
  750. unsigned long sleep = 1000;
  751. WARN_ON(!mutex_is_locked(&dp->lock));
  752. if (dp->fw_loaded)
  753. return 0;
  754. /* Drop the lock before getting the firmware to avoid blocking boot */
  755. mutex_unlock(&dp->lock);
  756. while (time_before(jiffies, timeout)) {
  757. ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
  758. if (ret == -ENOENT) {
  759. msleep(sleep);
  760. sleep *= 2;
  761. continue;
  762. } else if (ret) {
  763. DRM_DEV_ERROR(dp->dev,
  764. "failed to request firmware: %d\n", ret);
  765. goto out;
  766. }
  767. dp->fw_loaded = true;
  768. ret = 0;
  769. goto out;
  770. }
  771. DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
  772. ret = -ETIMEDOUT;
  773. out:
  774. mutex_lock(&dp->lock);
  775. return ret;
  776. }
  777. static void cdn_dp_pd_event_work(struct work_struct *work)
  778. {
  779. struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
  780. event_work);
  781. struct drm_connector *connector = &dp->connector;
  782. enum drm_connector_status old_status;
  783. int ret;
  784. mutex_lock(&dp->lock);
  785. if (dp->suspended)
  786. goto out;
  787. ret = cdn_dp_request_firmware(dp);
  788. if (ret)
  789. goto out;
  790. dp->connected = true;
  791. /* Not connected, notify userspace to disable the block */
  792. if (!cdn_dp_connected_port(dp)) {
  793. DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
  794. dp->connected = false;
  795. /* Connected but not enabled, enable the block */
  796. } else if (!dp->active) {
  797. DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
  798. ret = cdn_dp_enable(dp);
  799. if (ret) {
  800. DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
  801. dp->connected = false;
  802. }
  803. /* Enabled and connected to a dongle without a sink, notify userspace */
  804. } else if (!cdn_dp_check_sink_connection(dp)) {
  805. DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
  806. dp->connected = false;
  807. /* Enabled and connected with a sink, re-train if requested */
  808. } else if (!cdn_dp_check_link_status(dp)) {
  809. unsigned int rate = dp->link.rate;
  810. unsigned int lanes = dp->link.num_lanes;
  811. struct drm_display_mode *mode = &dp->mode;
  812. DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
  813. ret = cdn_dp_train_link(dp);
  814. if (ret) {
  815. dp->connected = false;
  816. DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
  817. goto out;
  818. }
  819. /* If training result is changed, update the video config */
  820. if (mode->clock &&
  821. (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
  822. ret = cdn_dp_config_video(dp);
  823. if (ret) {
  824. dp->connected = false;
  825. DRM_DEV_ERROR(dp->dev,
  826. "Failed to config video %d\n",
  827. ret);
  828. }
  829. }
  830. }
  831. out:
  832. mutex_unlock(&dp->lock);
  833. old_status = connector->status;
  834. connector->status = connector->funcs->detect(connector, false);
  835. if (old_status != connector->status)
  836. drm_kms_helper_hotplug_event(dp->drm_dev);
  837. }
  838. static int cdn_dp_pd_event(struct notifier_block *nb,
  839. unsigned long event, void *priv)
  840. {
  841. struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
  842. event_nb);
  843. struct cdn_dp_device *dp = port->dp;
  844. /*
  845. * It would be nice to be able to just do the work inline right here.
  846. * However, we need to make a bunch of calls that might sleep in order
  847. * to turn on the block/phy, so use a worker instead.
  848. */
  849. schedule_work(&dp->event_work);
  850. return NOTIFY_DONE;
  851. }
  852. static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
  853. {
  854. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  855. struct drm_encoder *encoder;
  856. struct drm_connector *connector;
  857. struct cdn_dp_port *port;
  858. struct drm_device *drm_dev = data;
  859. int ret, i;
  860. ret = cdn_dp_parse_dt(dp);
  861. if (ret < 0)
  862. return ret;
  863. dp->drm_dev = drm_dev;
  864. dp->connected = false;
  865. dp->active = false;
  866. dp->active_port = -1;
  867. dp->fw_loaded = false;
  868. INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
  869. encoder = &dp->encoder;
  870. encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
  871. dev->of_node);
  872. DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
  873. ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
  874. DRM_MODE_ENCODER_TMDS, NULL);
  875. if (ret) {
  876. DRM_ERROR("failed to initialize encoder with drm\n");
  877. return ret;
  878. }
  879. drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
  880. connector = &dp->connector;
  881. connector->polled = DRM_CONNECTOR_POLL_HPD;
  882. connector->dpms = DRM_MODE_DPMS_OFF;
  883. ret = drm_connector_init(drm_dev, connector,
  884. &cdn_dp_atomic_connector_funcs,
  885. DRM_MODE_CONNECTOR_DisplayPort);
  886. if (ret) {
  887. DRM_ERROR("failed to initialize connector with drm\n");
  888. goto err_free_encoder;
  889. }
  890. drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
  891. ret = drm_mode_connector_attach_encoder(connector, encoder);
  892. if (ret) {
  893. DRM_ERROR("failed to attach connector and encoder\n");
  894. goto err_free_connector;
  895. }
  896. for (i = 0; i < dp->ports; i++) {
  897. port = dp->port[i];
  898. port->event_nb.notifier_call = cdn_dp_pd_event;
  899. ret = devm_extcon_register_notifier(dp->dev, port->extcon,
  900. EXTCON_DISP_DP,
  901. &port->event_nb);
  902. if (ret) {
  903. DRM_DEV_ERROR(dev,
  904. "register EXTCON_DISP_DP notifier err\n");
  905. goto err_free_connector;
  906. }
  907. }
  908. pm_runtime_enable(dev);
  909. schedule_work(&dp->event_work);
  910. return 0;
  911. err_free_connector:
  912. drm_connector_cleanup(connector);
  913. err_free_encoder:
  914. drm_encoder_cleanup(encoder);
  915. return ret;
  916. }
  917. static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
  918. {
  919. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  920. struct drm_encoder *encoder = &dp->encoder;
  921. struct drm_connector *connector = &dp->connector;
  922. cancel_work_sync(&dp->event_work);
  923. cdn_dp_encoder_disable(encoder);
  924. encoder->funcs->destroy(encoder);
  925. connector->funcs->destroy(connector);
  926. pm_runtime_disable(dev);
  927. if (dp->fw_loaded)
  928. release_firmware(dp->fw);
  929. kfree(dp->edid);
  930. dp->edid = NULL;
  931. }
  932. static const struct component_ops cdn_dp_component_ops = {
  933. .bind = cdn_dp_bind,
  934. .unbind = cdn_dp_unbind,
  935. };
  936. int cdn_dp_suspend(struct device *dev)
  937. {
  938. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  939. int ret = 0;
  940. mutex_lock(&dp->lock);
  941. if (dp->active)
  942. ret = cdn_dp_disable(dp);
  943. dp->suspended = true;
  944. mutex_unlock(&dp->lock);
  945. return ret;
  946. }
  947. int cdn_dp_resume(struct device *dev)
  948. {
  949. struct cdn_dp_device *dp = dev_get_drvdata(dev);
  950. mutex_lock(&dp->lock);
  951. dp->suspended = false;
  952. if (dp->fw_loaded)
  953. schedule_work(&dp->event_work);
  954. mutex_unlock(&dp->lock);
  955. return 0;
  956. }
  957. static int cdn_dp_probe(struct platform_device *pdev)
  958. {
  959. struct device *dev = &pdev->dev;
  960. const struct of_device_id *match;
  961. struct cdn_dp_data *dp_data;
  962. struct cdn_dp_port *port;
  963. struct cdn_dp_device *dp;
  964. struct extcon_dev *extcon;
  965. struct phy *phy;
  966. int i;
  967. dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
  968. if (!dp)
  969. return -ENOMEM;
  970. dp->dev = dev;
  971. match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
  972. dp_data = (struct cdn_dp_data *)match->data;
  973. for (i = 0; i < dp_data->max_phy; i++) {
  974. extcon = extcon_get_edev_by_phandle(dev, i);
  975. phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
  976. if (PTR_ERR(extcon) == -EPROBE_DEFER ||
  977. PTR_ERR(phy) == -EPROBE_DEFER)
  978. return -EPROBE_DEFER;
  979. if (IS_ERR(extcon) || IS_ERR(phy))
  980. continue;
  981. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  982. if (!dp)
  983. return -ENOMEM;
  984. port->extcon = extcon;
  985. port->phy = phy;
  986. port->dp = dp;
  987. port->id = i;
  988. dp->port[dp->ports++] = port;
  989. }
  990. if (!dp->ports) {
  991. DRM_DEV_ERROR(dev, "missing extcon or phy\n");
  992. return -EINVAL;
  993. }
  994. mutex_init(&dp->lock);
  995. dev_set_drvdata(dev, dp);
  996. cdn_dp_audio_codec_init(dp, dev);
  997. return component_add(dev, &cdn_dp_component_ops);
  998. }
  999. static int cdn_dp_remove(struct platform_device *pdev)
  1000. {
  1001. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  1002. platform_device_unregister(dp->audio_pdev);
  1003. cdn_dp_suspend(dp->dev);
  1004. component_del(&pdev->dev, &cdn_dp_component_ops);
  1005. return 0;
  1006. }
  1007. static void cdn_dp_shutdown(struct platform_device *pdev)
  1008. {
  1009. struct cdn_dp_device *dp = platform_get_drvdata(pdev);
  1010. cdn_dp_suspend(dp->dev);
  1011. }
  1012. static const struct dev_pm_ops cdn_dp_pm_ops = {
  1013. SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
  1014. cdn_dp_resume)
  1015. };
  1016. struct platform_driver cdn_dp_driver = {
  1017. .probe = cdn_dp_probe,
  1018. .remove = cdn_dp_remove,
  1019. .shutdown = cdn_dp_shutdown,
  1020. .driver = {
  1021. .name = "cdn-dp",
  1022. .owner = THIS_MODULE,
  1023. .of_match_table = of_match_ptr(cdn_dp_dt_ids),
  1024. .pm = &cdn_dp_pm_ops,
  1025. },
  1026. };