intel_hdcp.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. /* SPDX-License-Identifier: MIT */
  2. /*
  3. * Copyright (C) 2017 Google, Inc.
  4. *
  5. * Authors:
  6. * Sean Paul <seanpaul@chromium.org>
  7. */
  8. #include <drm/drmP.h>
  9. #include <drm/drm_hdcp.h>
  10. #include <linux/i2c.h>
  11. #include <linux/random.h>
  12. #include "intel_drv.h"
  13. #include "i915_reg.h"
  14. #define KEY_LOAD_TRIES 5
  15. static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
  16. const struct intel_hdcp_shim *shim)
  17. {
  18. int ret, read_ret;
  19. bool ksv_ready;
  20. /* Poll for ksv list ready (spec says max time allowed is 5s) */
  21. ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
  22. &ksv_ready),
  23. read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
  24. 100 * 1000);
  25. if (ret)
  26. return ret;
  27. if (read_ret)
  28. return read_ret;
  29. if (!ksv_ready)
  30. return -ETIMEDOUT;
  31. return 0;
  32. }
  33. static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
  34. {
  35. I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
  36. I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
  37. HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
  38. }
  39. static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
  40. {
  41. int ret;
  42. u32 val;
  43. val = I915_READ(HDCP_KEY_STATUS);
  44. if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
  45. return 0;
  46. /*
  47. * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
  48. * out of reset. So if Key is not already loaded, its an error state.
  49. */
  50. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  51. if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
  52. return -ENXIO;
  53. /*
  54. * Initiate loading the HDCP key from fuses.
  55. *
  56. * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
  57. * differ in the key load trigger process from other platforms.
  58. */
  59. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  60. mutex_lock(&dev_priv->pcu_lock);
  61. ret = sandybridge_pcode_write(dev_priv,
  62. SKL_PCODE_LOAD_HDCP_KEYS, 1);
  63. mutex_unlock(&dev_priv->pcu_lock);
  64. if (ret) {
  65. DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
  66. ret);
  67. return ret;
  68. }
  69. } else {
  70. I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
  71. }
  72. /* Wait for the keys to load (500us) */
  73. ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
  74. HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
  75. 10, 1, &val);
  76. if (ret)
  77. return ret;
  78. else if (!(val & HDCP_KEY_LOAD_STATUS))
  79. return -ENXIO;
  80. /* Send Aksv over to PCH display for use in authentication */
  81. I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
  82. return 0;
  83. }
  84. /* Returns updated SHA-1 index */
  85. static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
  86. {
  87. I915_WRITE(HDCP_SHA_TEXT, sha_text);
  88. if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
  89. HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
  90. DRM_ERROR("Timed out waiting for SHA1 ready\n");
  91. return -ETIMEDOUT;
  92. }
  93. return 0;
  94. }
  95. static
  96. u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
  97. {
  98. enum port port = intel_dig_port->base.port;
  99. switch (port) {
  100. case PORT_A:
  101. return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
  102. case PORT_B:
  103. return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
  104. case PORT_C:
  105. return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
  106. case PORT_D:
  107. return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
  108. case PORT_E:
  109. return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
  110. default:
  111. break;
  112. }
  113. DRM_ERROR("Unknown port %d\n", port);
  114. return -EINVAL;
  115. }
  116. static
  117. bool intel_hdcp_is_ksv_valid(u8 *ksv)
  118. {
  119. int i, ones = 0;
  120. /* KSV has 20 1's and 20 0's */
  121. for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
  122. ones += hweight8(ksv[i]);
  123. if (ones != 20)
  124. return false;
  125. return true;
  126. }
  127. /* Implements Part 2 of the HDCP authorization procedure */
  128. static
  129. int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
  130. const struct intel_hdcp_shim *shim)
  131. {
  132. struct drm_i915_private *dev_priv;
  133. u32 vprime, sha_text, sha_leftovers, rep_ctl;
  134. u8 bstatus[2], num_downstream, *ksv_fifo;
  135. int ret, i, j, sha_idx;
  136. dev_priv = intel_dig_port->base.base.dev->dev_private;
  137. ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
  138. if (ret) {
  139. DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
  140. return ret;
  141. }
  142. ret = shim->read_bstatus(intel_dig_port, bstatus);
  143. if (ret)
  144. return ret;
  145. if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
  146. DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
  147. DRM_ERROR("Max Topology Limit Exceeded\n");
  148. return -EPERM;
  149. }
  150. /*
  151. * When repeater reports 0 device count, HDCP1.4 spec allows disabling
  152. * the HDCP encryption. That implies that repeater can't have its own
  153. * display. As there is no consumption of encrypted content in the
  154. * repeater with 0 downstream devices, we are failing the
  155. * authentication.
  156. */
  157. num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
  158. if (num_downstream == 0)
  159. return -EINVAL;
  160. ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
  161. if (!ksv_fifo)
  162. return -ENOMEM;
  163. ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
  164. if (ret)
  165. return ret;
  166. /* Process V' values from the receiver */
  167. for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
  168. ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
  169. if (ret)
  170. return ret;
  171. I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
  172. }
  173. /*
  174. * We need to write the concatenation of all device KSVs, BINFO (DP) ||
  175. * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
  176. * stream is written via the HDCP_SHA_TEXT register in 32-bit
  177. * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
  178. * index will keep track of our progress through the 64 bytes as well as
  179. * helping us work the 40-bit KSVs through our 32-bit register.
  180. *
  181. * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
  182. */
  183. sha_idx = 0;
  184. sha_text = 0;
  185. sha_leftovers = 0;
  186. rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
  187. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  188. for (i = 0; i < num_downstream; i++) {
  189. unsigned int sha_empty;
  190. u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
  191. /* Fill up the empty slots in sha_text and write it out */
  192. sha_empty = sizeof(sha_text) - sha_leftovers;
  193. for (j = 0; j < sha_empty; j++)
  194. sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
  195. ret = intel_write_sha_text(dev_priv, sha_text);
  196. if (ret < 0)
  197. return ret;
  198. /* Programming guide writes this every 64 bytes */
  199. sha_idx += sizeof(sha_text);
  200. if (!(sha_idx % 64))
  201. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  202. /* Store the leftover bytes from the ksv in sha_text */
  203. sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
  204. sha_text = 0;
  205. for (j = 0; j < sha_leftovers; j++)
  206. sha_text |= ksv[sha_empty + j] <<
  207. ((sizeof(sha_text) - j - 1) * 8);
  208. /*
  209. * If we still have room in sha_text for more data, continue.
  210. * Otherwise, write it out immediately.
  211. */
  212. if (sizeof(sha_text) > sha_leftovers)
  213. continue;
  214. ret = intel_write_sha_text(dev_priv, sha_text);
  215. if (ret < 0)
  216. return ret;
  217. sha_leftovers = 0;
  218. sha_text = 0;
  219. sha_idx += sizeof(sha_text);
  220. }
  221. /*
  222. * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
  223. * bytes are leftover from the last ksv, we might be able to fit them
  224. * all in sha_text (first 2 cases), or we might need to split them up
  225. * into 2 writes (last 2 cases).
  226. */
  227. if (sha_leftovers == 0) {
  228. /* Write 16 bits of text, 16 bits of M0 */
  229. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
  230. ret = intel_write_sha_text(dev_priv,
  231. bstatus[0] << 8 | bstatus[1]);
  232. if (ret < 0)
  233. return ret;
  234. sha_idx += sizeof(sha_text);
  235. /* Write 32 bits of M0 */
  236. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
  237. ret = intel_write_sha_text(dev_priv, 0);
  238. if (ret < 0)
  239. return ret;
  240. sha_idx += sizeof(sha_text);
  241. /* Write 16 bits of M0 */
  242. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
  243. ret = intel_write_sha_text(dev_priv, 0);
  244. if (ret < 0)
  245. return ret;
  246. sha_idx += sizeof(sha_text);
  247. } else if (sha_leftovers == 1) {
  248. /* Write 24 bits of text, 8 bits of M0 */
  249. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
  250. sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
  251. /* Only 24-bits of data, must be in the LSB */
  252. sha_text = (sha_text & 0xffffff00) >> 8;
  253. ret = intel_write_sha_text(dev_priv, sha_text);
  254. if (ret < 0)
  255. return ret;
  256. sha_idx += sizeof(sha_text);
  257. /* Write 32 bits of M0 */
  258. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
  259. ret = intel_write_sha_text(dev_priv, 0);
  260. if (ret < 0)
  261. return ret;
  262. sha_idx += sizeof(sha_text);
  263. /* Write 24 bits of M0 */
  264. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
  265. ret = intel_write_sha_text(dev_priv, 0);
  266. if (ret < 0)
  267. return ret;
  268. sha_idx += sizeof(sha_text);
  269. } else if (sha_leftovers == 2) {
  270. /* Write 32 bits of text */
  271. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  272. sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
  273. ret = intel_write_sha_text(dev_priv, sha_text);
  274. if (ret < 0)
  275. return ret;
  276. sha_idx += sizeof(sha_text);
  277. /* Write 64 bits of M0 */
  278. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
  279. for (i = 0; i < 2; i++) {
  280. ret = intel_write_sha_text(dev_priv, 0);
  281. if (ret < 0)
  282. return ret;
  283. sha_idx += sizeof(sha_text);
  284. }
  285. } else if (sha_leftovers == 3) {
  286. /* Write 32 bits of text */
  287. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  288. sha_text |= bstatus[0] << 24;
  289. ret = intel_write_sha_text(dev_priv, sha_text);
  290. if (ret < 0)
  291. return ret;
  292. sha_idx += sizeof(sha_text);
  293. /* Write 8 bits of text, 24 bits of M0 */
  294. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
  295. ret = intel_write_sha_text(dev_priv, bstatus[1]);
  296. if (ret < 0)
  297. return ret;
  298. sha_idx += sizeof(sha_text);
  299. /* Write 32 bits of M0 */
  300. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
  301. ret = intel_write_sha_text(dev_priv, 0);
  302. if (ret < 0)
  303. return ret;
  304. sha_idx += sizeof(sha_text);
  305. /* Write 8 bits of M0 */
  306. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
  307. ret = intel_write_sha_text(dev_priv, 0);
  308. if (ret < 0)
  309. return ret;
  310. sha_idx += sizeof(sha_text);
  311. } else {
  312. DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers);
  313. return -EINVAL;
  314. }
  315. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  316. /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
  317. while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
  318. ret = intel_write_sha_text(dev_priv, 0);
  319. if (ret < 0)
  320. return ret;
  321. sha_idx += sizeof(sha_text);
  322. }
  323. /*
  324. * Last write gets the length of the concatenation in bits. That is:
  325. * - 5 bytes per device
  326. * - 10 bytes for BINFO/BSTATUS(2), M0(8)
  327. */
  328. sha_text = (num_downstream * 5 + 10) * 8;
  329. ret = intel_write_sha_text(dev_priv, sha_text);
  330. if (ret < 0)
  331. return ret;
  332. /* Tell the HW we're done with the hash and wait for it to ACK */
  333. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
  334. if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
  335. HDCP_SHA1_COMPLETE,
  336. HDCP_SHA1_COMPLETE, 1)) {
  337. DRM_ERROR("Timed out waiting for SHA1 complete\n");
  338. return -ETIMEDOUT;
  339. }
  340. if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
  341. DRM_ERROR("SHA-1 mismatch, HDCP failed\n");
  342. return -ENXIO;
  343. }
  344. DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
  345. num_downstream);
  346. return 0;
  347. }
  348. /* Implements Part 1 of the HDCP authorization procedure */
  349. static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
  350. const struct intel_hdcp_shim *shim)
  351. {
  352. struct drm_i915_private *dev_priv;
  353. enum port port;
  354. unsigned long r0_prime_gen_start;
  355. int ret, i, tries = 2;
  356. union {
  357. u32 reg[2];
  358. u8 shim[DRM_HDCP_AN_LEN];
  359. } an;
  360. union {
  361. u32 reg[2];
  362. u8 shim[DRM_HDCP_KSV_LEN];
  363. } bksv;
  364. union {
  365. u32 reg;
  366. u8 shim[DRM_HDCP_RI_LEN];
  367. } ri;
  368. bool repeater_present, hdcp_capable;
  369. dev_priv = intel_dig_port->base.base.dev->dev_private;
  370. port = intel_dig_port->base.port;
  371. /*
  372. * Detects whether the display is HDCP capable. Although we check for
  373. * valid Bksv below, the HDCP over DP spec requires that we check
  374. * whether the display supports HDCP before we write An. For HDMI
  375. * displays, this is not necessary.
  376. */
  377. if (shim->hdcp_capable) {
  378. ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
  379. if (ret)
  380. return ret;
  381. if (!hdcp_capable) {
  382. DRM_ERROR("Panel is not HDCP capable\n");
  383. return -EINVAL;
  384. }
  385. }
  386. /* Initialize An with 2 random values and acquire it */
  387. for (i = 0; i < 2; i++)
  388. I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
  389. I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
  390. /* Wait for An to be acquired */
  391. if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
  392. HDCP_STATUS_AN_READY,
  393. HDCP_STATUS_AN_READY, 1)) {
  394. DRM_ERROR("Timed out waiting for An\n");
  395. return -ETIMEDOUT;
  396. }
  397. an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
  398. an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
  399. ret = shim->write_an_aksv(intel_dig_port, an.shim);
  400. if (ret)
  401. return ret;
  402. r0_prime_gen_start = jiffies;
  403. memset(&bksv, 0, sizeof(bksv));
  404. /* HDCP spec states that we must retry the bksv if it is invalid */
  405. for (i = 0; i < tries; i++) {
  406. ret = shim->read_bksv(intel_dig_port, bksv.shim);
  407. if (ret)
  408. return ret;
  409. if (intel_hdcp_is_ksv_valid(bksv.shim))
  410. break;
  411. }
  412. if (i == tries) {
  413. DRM_ERROR("HDCP failed, Bksv is invalid\n");
  414. return -ENODEV;
  415. }
  416. I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
  417. I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
  418. ret = shim->repeater_present(intel_dig_port, &repeater_present);
  419. if (ret)
  420. return ret;
  421. if (repeater_present)
  422. I915_WRITE(HDCP_REP_CTL,
  423. intel_hdcp_get_repeater_ctl(intel_dig_port));
  424. ret = shim->toggle_signalling(intel_dig_port, true);
  425. if (ret)
  426. return ret;
  427. I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
  428. /* Wait for R0 ready */
  429. if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
  430. (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
  431. DRM_ERROR("Timed out waiting for R0 ready\n");
  432. return -ETIMEDOUT;
  433. }
  434. /*
  435. * Wait for R0' to become available. The spec says 100ms from Aksv, but
  436. * some monitors can take longer than this. We'll set the timeout at
  437. * 300ms just to be sure.
  438. *
  439. * On DP, there's an R0_READY bit available but no such bit
  440. * exists on HDMI. Since the upper-bound is the same, we'll just do
  441. * the stupid thing instead of polling on one and not the other.
  442. */
  443. wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
  444. ri.reg = 0;
  445. ret = shim->read_ri_prime(intel_dig_port, ri.shim);
  446. if (ret)
  447. return ret;
  448. I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
  449. /* Wait for Ri prime match */
  450. if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
  451. (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
  452. DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
  453. I915_READ(PORT_HDCP_STATUS(port)));
  454. return -ETIMEDOUT;
  455. }
  456. /* Wait for encryption confirmation */
  457. if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
  458. HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
  459. DRM_ERROR("Timed out waiting for encryption\n");
  460. return -ETIMEDOUT;
  461. }
  462. /*
  463. * XXX: If we have MST-connected devices, we need to enable encryption
  464. * on those as well.
  465. */
  466. if (repeater_present)
  467. return intel_hdcp_auth_downstream(intel_dig_port, shim);
  468. DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
  469. return 0;
  470. }
  471. static
  472. struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
  473. {
  474. return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
  475. }
  476. static int _intel_hdcp_disable(struct intel_connector *connector)
  477. {
  478. struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
  479. struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
  480. enum port port = intel_dig_port->base.port;
  481. int ret;
  482. DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
  483. connector->base.name, connector->base.base.id);
  484. I915_WRITE(PORT_HDCP_CONF(port), 0);
  485. if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
  486. 20)) {
  487. DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
  488. return -ETIMEDOUT;
  489. }
  490. ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
  491. if (ret) {
  492. DRM_ERROR("Failed to disable HDCP signalling\n");
  493. return ret;
  494. }
  495. DRM_DEBUG_KMS("HDCP is disabled\n");
  496. return 0;
  497. }
  498. static int _intel_hdcp_enable(struct intel_connector *connector)
  499. {
  500. struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
  501. int i, ret, tries = 3;
  502. DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
  503. connector->base.name, connector->base.base.id);
  504. if (!(I915_READ(SKL_FUSE_STATUS) & SKL_FUSE_PG_DIST_STATUS(1))) {
  505. DRM_ERROR("PG1 is disabled, cannot load keys\n");
  506. return -ENXIO;
  507. }
  508. for (i = 0; i < KEY_LOAD_TRIES; i++) {
  509. ret = intel_hdcp_load_keys(dev_priv);
  510. if (!ret)
  511. break;
  512. intel_hdcp_clear_keys(dev_priv);
  513. }
  514. if (ret) {
  515. DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
  516. return ret;
  517. }
  518. /* Incase of authentication failures, HDCP spec expects reauth. */
  519. for (i = 0; i < tries; i++) {
  520. ret = intel_hdcp_auth(conn_to_dig_port(connector),
  521. connector->hdcp_shim);
  522. if (!ret)
  523. return 0;
  524. DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
  525. /* Ensuring HDCP encryption and signalling are stopped. */
  526. _intel_hdcp_disable(connector);
  527. }
  528. DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
  529. return ret;
  530. }
  531. static void intel_hdcp_check_work(struct work_struct *work)
  532. {
  533. struct intel_connector *connector = container_of(to_delayed_work(work),
  534. struct intel_connector,
  535. hdcp_check_work);
  536. if (!intel_hdcp_check_link(connector))
  537. schedule_delayed_work(&connector->hdcp_check_work,
  538. DRM_HDCP_CHECK_PERIOD_MS);
  539. }
  540. static void intel_hdcp_prop_work(struct work_struct *work)
  541. {
  542. struct intel_connector *connector = container_of(work,
  543. struct intel_connector,
  544. hdcp_prop_work);
  545. struct drm_device *dev = connector->base.dev;
  546. struct drm_connector_state *state;
  547. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  548. mutex_lock(&connector->hdcp_mutex);
  549. /*
  550. * This worker is only used to flip between ENABLED/DESIRED. Either of
  551. * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
  552. * we're running just after hdcp has been disabled, so just exit
  553. */
  554. if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
  555. state = connector->base.state;
  556. state->content_protection = connector->hdcp_value;
  557. }
  558. mutex_unlock(&connector->hdcp_mutex);
  559. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  560. }
  561. bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
  562. {
  563. /* PORT E doesn't have HDCP, and PORT F is disabled */
  564. return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
  565. !IS_CHERRYVIEW(dev_priv) && port < PORT_E);
  566. }
  567. int intel_hdcp_init(struct intel_connector *connector,
  568. const struct intel_hdcp_shim *hdcp_shim)
  569. {
  570. int ret;
  571. ret = drm_connector_attach_content_protection_property(
  572. &connector->base);
  573. if (ret)
  574. return ret;
  575. connector->hdcp_shim = hdcp_shim;
  576. mutex_init(&connector->hdcp_mutex);
  577. INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
  578. INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
  579. return 0;
  580. }
  581. int intel_hdcp_enable(struct intel_connector *connector)
  582. {
  583. int ret;
  584. if (!connector->hdcp_shim)
  585. return -ENOENT;
  586. mutex_lock(&connector->hdcp_mutex);
  587. ret = _intel_hdcp_enable(connector);
  588. if (ret)
  589. goto out;
  590. connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
  591. schedule_work(&connector->hdcp_prop_work);
  592. schedule_delayed_work(&connector->hdcp_check_work,
  593. DRM_HDCP_CHECK_PERIOD_MS);
  594. out:
  595. mutex_unlock(&connector->hdcp_mutex);
  596. return ret;
  597. }
  598. int intel_hdcp_disable(struct intel_connector *connector)
  599. {
  600. int ret = 0;
  601. if (!connector->hdcp_shim)
  602. return -ENOENT;
  603. mutex_lock(&connector->hdcp_mutex);
  604. if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
  605. connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
  606. ret = _intel_hdcp_disable(connector);
  607. }
  608. mutex_unlock(&connector->hdcp_mutex);
  609. cancel_delayed_work_sync(&connector->hdcp_check_work);
  610. return ret;
  611. }
  612. void intel_hdcp_atomic_check(struct drm_connector *connector,
  613. struct drm_connector_state *old_state,
  614. struct drm_connector_state *new_state)
  615. {
  616. uint64_t old_cp = old_state->content_protection;
  617. uint64_t new_cp = new_state->content_protection;
  618. struct drm_crtc_state *crtc_state;
  619. if (!new_state->crtc) {
  620. /*
  621. * If the connector is being disabled with CP enabled, mark it
  622. * desired so it's re-enabled when the connector is brought back
  623. */
  624. if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
  625. new_state->content_protection =
  626. DRM_MODE_CONTENT_PROTECTION_DESIRED;
  627. return;
  628. }
  629. /*
  630. * Nothing to do if the state didn't change, or HDCP was activated since
  631. * the last commit
  632. */
  633. if (old_cp == new_cp ||
  634. (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
  635. new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
  636. return;
  637. crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
  638. new_state->crtc);
  639. crtc_state->mode_changed = true;
  640. }
  641. /* Implements Part 3 of the HDCP authorization procedure */
  642. int intel_hdcp_check_link(struct intel_connector *connector)
  643. {
  644. struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
  645. struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
  646. enum port port = intel_dig_port->base.port;
  647. int ret = 0;
  648. if (!connector->hdcp_shim)
  649. return -ENOENT;
  650. mutex_lock(&connector->hdcp_mutex);
  651. if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
  652. goto out;
  653. if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
  654. DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
  655. connector->base.name, connector->base.base.id,
  656. I915_READ(PORT_HDCP_STATUS(port)));
  657. ret = -ENXIO;
  658. connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
  659. schedule_work(&connector->hdcp_prop_work);
  660. goto out;
  661. }
  662. if (connector->hdcp_shim->check_link(intel_dig_port)) {
  663. if (connector->hdcp_value !=
  664. DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
  665. connector->hdcp_value =
  666. DRM_MODE_CONTENT_PROTECTION_ENABLED;
  667. schedule_work(&connector->hdcp_prop_work);
  668. }
  669. goto out;
  670. }
  671. DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
  672. connector->base.name, connector->base.base.id);
  673. ret = _intel_hdcp_disable(connector);
  674. if (ret) {
  675. DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
  676. connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
  677. schedule_work(&connector->hdcp_prop_work);
  678. goto out;
  679. }
  680. ret = _intel_hdcp_enable(connector);
  681. if (ret) {
  682. DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
  683. connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
  684. schedule_work(&connector->hdcp_prop_work);
  685. goto out;
  686. }
  687. out:
  688. mutex_unlock(&connector->hdcp_mutex);
  689. return ret;
  690. }