ctrl.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694
  1. /*
  2. * CAAM control-plane driver backend
  3. * Controller-level driver, kernel property detection, initialization
  4. *
  5. * Copyright 2008-2012 Freescale Semiconductor, Inc.
  6. */
  7. #include <linux/device.h>
  8. #include <linux/of_address.h>
  9. #include <linux/of_irq.h>
  10. #include "compat.h"
  11. #include "regs.h"
  12. #include "intern.h"
  13. #include "jr.h"
  14. #include "desc_constr.h"
  15. #include "error.h"
  16. /*
  17. * Descriptor to instantiate RNG State Handle 0 in normal mode and
  18. * load the JDKEK, TDKEK and TDSK registers
  19. */
  20. static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
  21. {
  22. u32 *jump_cmd, op_flags;
  23. init_job_desc(desc, 0);
  24. op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  25. (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
  26. /* INIT RNG in non-test mode */
  27. append_operation(desc, op_flags);
  28. if (!handle && do_sk) {
  29. /*
  30. * For SH0, Secure Keys must be generated as well
  31. */
  32. /* wait for done */
  33. jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
  34. set_jump_tgt_here(desc, jump_cmd);
  35. /*
  36. * load 1 to clear written reg:
  37. * resets the done interrrupt and returns the RNG to idle.
  38. */
  39. append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
  40. /* Initialize State Handle */
  41. append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  42. OP_ALG_AAI_RNG4_SK);
  43. }
  44. append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
  45. }
  46. /* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
  47. static void build_deinstantiation_desc(u32 *desc, int handle)
  48. {
  49. init_job_desc(desc, 0);
  50. /* Uninstantiate State Handle 0 */
  51. append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  52. (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
  53. append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
  54. }
  55. /*
  56. * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
  57. * the software (no JR/QI used).
  58. * @ctrldev - pointer to device
  59. * @status - descriptor status, after being run
  60. *
  61. * Return: - 0 if no error occurred
  62. * - -ENODEV if the DECO couldn't be acquired
  63. * - -EAGAIN if an error occurred while executing the descriptor
  64. */
  65. static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
  66. u32 *status)
  67. {
  68. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
  69. struct caam_full __iomem *topregs;
  70. unsigned int timeout = 100000;
  71. u32 deco_dbg_reg, flags;
  72. int i;
  73. /* Set the bit to request direct access to DECO0 */
  74. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  75. if (ctrlpriv->virt_en == 1) {
  76. setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
  77. while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) &&
  78. --timeout)
  79. cpu_relax();
  80. timeout = 100000;
  81. }
  82. setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
  83. while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
  84. --timeout)
  85. cpu_relax();
  86. if (!timeout) {
  87. dev_err(ctrldev, "failed to acquire DECO 0\n");
  88. clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
  89. return -ENODEV;
  90. }
  91. for (i = 0; i < desc_len(desc); i++)
  92. wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
  93. flags = DECO_JQCR_WHL;
  94. /*
  95. * If the descriptor length is longer than 4 words, then the
  96. * FOUR bit in JRCTRL register must be set.
  97. */
  98. if (desc_len(desc) >= 4)
  99. flags |= DECO_JQCR_FOUR;
  100. /* Instruct the DECO to execute it */
  101. wr_reg32(&topregs->deco.jr_ctl_hi, flags);
  102. timeout = 10000000;
  103. do {
  104. deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
  105. /*
  106. * If an error occured in the descriptor, then
  107. * the DECO status field will be set to 0x0D
  108. */
  109. if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
  110. DESC_DBG_DECO_STAT_HOST_ERR)
  111. break;
  112. cpu_relax();
  113. } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
  114. *status = rd_reg32(&topregs->deco.op_status_hi) &
  115. DECO_OP_STATUS_HI_ERR_MASK;
  116. if (ctrlpriv->virt_en == 1)
  117. clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
  118. /* Mark the DECO as free */
  119. clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
  120. if (!timeout)
  121. return -EAGAIN;
  122. return 0;
  123. }
  124. /*
  125. * instantiate_rng - builds and executes a descriptor on DECO0,
  126. * which initializes the RNG block.
  127. * @ctrldev - pointer to device
  128. * @state_handle_mask - bitmask containing the instantiation status
  129. * for the RNG4 state handles which exist in
  130. * the RNG4 block: 1 if it's been instantiated
  131. * by an external entry, 0 otherwise.
  132. * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
  133. * Caution: this can be done only once; if the keys need to be
  134. * regenerated, a POR is required
  135. *
  136. * Return: - 0 if no error occurred
  137. * - -ENOMEM if there isn't enough memory to allocate the descriptor
  138. * - -ENODEV if DECO0 couldn't be acquired
  139. * - -EAGAIN if an error occurred when executing the descriptor
  140. * f.i. there was a RNG hardware error due to not "good enough"
  141. * entropy being aquired.
  142. */
  143. static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
  144. int gen_sk)
  145. {
  146. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
  147. struct caam_full __iomem *topregs;
  148. struct rng4tst __iomem *r4tst;
  149. u32 *desc, status, rdsta_val;
  150. int ret = 0, sh_idx;
  151. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  152. r4tst = &topregs->ctrl.r4tst[0];
  153. desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
  154. if (!desc)
  155. return -ENOMEM;
  156. for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
  157. /*
  158. * If the corresponding bit is set, this state handle
  159. * was initialized by somebody else, so it's left alone.
  160. */
  161. if ((1 << sh_idx) & state_handle_mask)
  162. continue;
  163. /* Create the descriptor for instantiating RNG State Handle */
  164. build_instantiation_desc(desc, sh_idx, gen_sk);
  165. /* Try to run it through DECO0 */
  166. ret = run_descriptor_deco0(ctrldev, desc, &status);
  167. /*
  168. * If ret is not 0, or descriptor status is not 0, then
  169. * something went wrong. No need to try the next state
  170. * handle (if available), bail out here.
  171. * Also, if for some reason, the State Handle didn't get
  172. * instantiated although the descriptor has finished
  173. * without any error (HW optimizations for later
  174. * CAAM eras), then try again.
  175. */
  176. rdsta_val =
  177. rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
  178. if (status || !(rdsta_val & (1 << sh_idx)))
  179. ret = -EAGAIN;
  180. if (ret)
  181. break;
  182. dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
  183. /* Clear the contents before recreating the descriptor */
  184. memset(desc, 0x00, CAAM_CMD_SZ * 7);
  185. }
  186. kfree(desc);
  187. return ret;
  188. }
  189. /*
  190. * deinstantiate_rng - builds and executes a descriptor on DECO0,
  191. * which deinitializes the RNG block.
  192. * @ctrldev - pointer to device
  193. * @state_handle_mask - bitmask containing the instantiation status
  194. * for the RNG4 state handles which exist in
  195. * the RNG4 block: 1 if it's been instantiated
  196. *
  197. * Return: - 0 if no error occurred
  198. * - -ENOMEM if there isn't enough memory to allocate the descriptor
  199. * - -ENODEV if DECO0 couldn't be acquired
  200. * - -EAGAIN if an error occurred when executing the descriptor
  201. */
  202. static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
  203. {
  204. u32 *desc, status;
  205. int sh_idx, ret = 0;
  206. desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
  207. if (!desc)
  208. return -ENOMEM;
  209. for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
  210. /*
  211. * If the corresponding bit is set, then it means the state
  212. * handle was initialized by us, and thus it needs to be
  213. * deintialized as well
  214. */
  215. if ((1 << sh_idx) & state_handle_mask) {
  216. /*
  217. * Create the descriptor for deinstantating this state
  218. * handle
  219. */
  220. build_deinstantiation_desc(desc, sh_idx);
  221. /* Try to run it through DECO0 */
  222. ret = run_descriptor_deco0(ctrldev, desc, &status);
  223. if (ret || status) {
  224. dev_err(ctrldev,
  225. "Failed to deinstantiate RNG4 SH%d\n",
  226. sh_idx);
  227. break;
  228. }
  229. dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
  230. }
  231. }
  232. kfree(desc);
  233. return ret;
  234. }
  235. static int caam_remove(struct platform_device *pdev)
  236. {
  237. struct device *ctrldev;
  238. struct caam_drv_private *ctrlpriv;
  239. struct caam_full __iomem *topregs;
  240. int ring, ret = 0;
  241. ctrldev = &pdev->dev;
  242. ctrlpriv = dev_get_drvdata(ctrldev);
  243. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  244. /* Remove platform devices for JobRs */
  245. for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
  246. if (ctrlpriv->jrpdev[ring])
  247. of_device_unregister(ctrlpriv->jrpdev[ring]);
  248. }
  249. /* De-initialize RNG state handles initialized by this driver. */
  250. if (ctrlpriv->rng4_sh_init)
  251. deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
  252. /* Shut down debug views */
  253. #ifdef CONFIG_DEBUG_FS
  254. debugfs_remove_recursive(ctrlpriv->dfs_root);
  255. #endif
  256. /* Unmap controller region */
  257. iounmap(&topregs->ctrl);
  258. return ret;
  259. }
  260. /*
  261. * kick_trng - sets the various parameters for enabling the initialization
  262. * of the RNG4 block in CAAM
  263. * @pdev - pointer to the platform device
  264. * @ent_delay - Defines the length (in system clocks) of each entropy sample.
  265. */
  266. static void kick_trng(struct platform_device *pdev, int ent_delay)
  267. {
  268. struct device *ctrldev = &pdev->dev;
  269. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
  270. struct caam_full __iomem *topregs;
  271. struct rng4tst __iomem *r4tst;
  272. u32 val;
  273. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  274. r4tst = &topregs->ctrl.r4tst[0];
  275. /* put RNG4 into program mode */
  276. setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
  277. /*
  278. * Performance-wise, it does not make sense to
  279. * set the delay to a value that is lower
  280. * than the last one that worked (i.e. the state handles
  281. * were instantiated properly. Thus, instead of wasting
  282. * time trying to set the values controlling the sample
  283. * frequency, the function simply returns.
  284. */
  285. val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
  286. >> RTSDCTL_ENT_DLY_SHIFT;
  287. if (ent_delay <= val) {
  288. /* put RNG4 into run mode */
  289. clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
  290. return;
  291. }
  292. val = rd_reg32(&r4tst->rtsdctl);
  293. val = (val & ~RTSDCTL_ENT_DLY_MASK) |
  294. (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
  295. wr_reg32(&r4tst->rtsdctl, val);
  296. /* min. freq. count, equal to 1/4 of the entropy sample length */
  297. wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
  298. /* max. freq. count, equal to 8 times the entropy sample length */
  299. wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
  300. /* put RNG4 into run mode */
  301. clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
  302. }
  303. /**
  304. * caam_get_era() - Return the ERA of the SEC on SoC, based
  305. * on "sec-era" propery in the DTS. This property is updated by u-boot.
  306. **/
  307. int caam_get_era(void)
  308. {
  309. struct device_node *caam_node;
  310. for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
  311. const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
  312. "fsl,sec-era",
  313. NULL);
  314. return prop ? *prop : -ENOTSUPP;
  315. }
  316. return -ENOTSUPP;
  317. }
  318. EXPORT_SYMBOL(caam_get_era);
  319. /* Probe routine for CAAM top (controller) level */
  320. static int caam_probe(struct platform_device *pdev)
  321. {
  322. int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
  323. u64 caam_id;
  324. struct device *dev;
  325. struct device_node *nprop, *np;
  326. struct caam_ctrl __iomem *ctrl;
  327. struct caam_full __iomem *topregs;
  328. struct caam_drv_private *ctrlpriv;
  329. #ifdef CONFIG_DEBUG_FS
  330. struct caam_perfmon *perfmon;
  331. #endif
  332. u32 scfgr, comp_params;
  333. u32 cha_vid_ls;
  334. ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
  335. GFP_KERNEL);
  336. if (!ctrlpriv)
  337. return -ENOMEM;
  338. dev = &pdev->dev;
  339. dev_set_drvdata(dev, ctrlpriv);
  340. ctrlpriv->pdev = pdev;
  341. nprop = pdev->dev.of_node;
  342. /* Get configuration properties from device tree */
  343. /* First, get register page */
  344. ctrl = of_iomap(nprop, 0);
  345. if (ctrl == NULL) {
  346. dev_err(dev, "caam: of_iomap() failed\n");
  347. return -ENOMEM;
  348. }
  349. ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
  350. /* topregs used to derive pointers to CAAM sub-blocks only */
  351. topregs = (struct caam_full __iomem *)ctrl;
  352. /* Get the IRQ of the controller (for security violations only) */
  353. ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
  354. /*
  355. * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
  356. * long pointers in master configuration register
  357. */
  358. setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
  359. (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
  360. /*
  361. * Read the Compile Time paramters and SCFGR to determine
  362. * if Virtualization is enabled for this platform
  363. */
  364. comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms);
  365. scfgr = rd_reg32(&topregs->ctrl.scfgr);
  366. ctrlpriv->virt_en = 0;
  367. if (comp_params & CTPR_MS_VIRT_EN_INCL) {
  368. /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
  369. * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
  370. */
  371. if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
  372. (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
  373. (scfgr & SCFGR_VIRT_EN)))
  374. ctrlpriv->virt_en = 1;
  375. } else {
  376. /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
  377. if (comp_params & CTPR_MS_VIRT_EN_POR)
  378. ctrlpriv->virt_en = 1;
  379. }
  380. if (ctrlpriv->virt_en == 1)
  381. setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START |
  382. JRSTART_JR1_START | JRSTART_JR2_START |
  383. JRSTART_JR3_START);
  384. if (sizeof(dma_addr_t) == sizeof(u64))
  385. if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
  386. dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
  387. else
  388. dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
  389. else
  390. dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  391. /*
  392. * Detect and enable JobRs
  393. * First, find out how many ring spec'ed, allocate references
  394. * for all, then go probe each one.
  395. */
  396. rspec = 0;
  397. for_each_available_child_of_node(nprop, np)
  398. if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
  399. of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
  400. rspec++;
  401. ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
  402. sizeof(struct platform_device *) * rspec,
  403. GFP_KERNEL);
  404. if (ctrlpriv->jrpdev == NULL) {
  405. iounmap(&topregs->ctrl);
  406. return -ENOMEM;
  407. }
  408. ring = 0;
  409. ctrlpriv->total_jobrs = 0;
  410. for_each_available_child_of_node(nprop, np)
  411. if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
  412. of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
  413. ctrlpriv->jrpdev[ring] =
  414. of_platform_device_create(np, NULL, dev);
  415. if (!ctrlpriv->jrpdev[ring]) {
  416. pr_warn("JR%d Platform device creation error\n",
  417. ring);
  418. continue;
  419. }
  420. ctrlpriv->total_jobrs++;
  421. ring++;
  422. }
  423. /* Check to see if QI present. If so, enable */
  424. ctrlpriv->qi_present =
  425. !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) &
  426. CTPR_MS_QI_MASK);
  427. if (ctrlpriv->qi_present) {
  428. ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
  429. /* This is all that's required to physically enable QI */
  430. wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
  431. }
  432. /* If no QI and no rings specified, quit and go home */
  433. if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
  434. dev_err(dev, "no queues configured, terminating\n");
  435. caam_remove(pdev);
  436. return -ENOMEM;
  437. }
  438. cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls);
  439. /*
  440. * If SEC has RNG version >= 4 and RNG state handle has not been
  441. * already instantiated, do RNG instantiation
  442. */
  443. if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
  444. ctrlpriv->rng4_sh_init =
  445. rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
  446. /*
  447. * If the secure keys (TDKEK, JDKEK, TDSK), were already
  448. * generated, signal this to the function that is instantiating
  449. * the state handles. An error would occur if RNG4 attempts
  450. * to regenerate these keys before the next POR.
  451. */
  452. gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
  453. ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
  454. do {
  455. int inst_handles =
  456. rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
  457. RDSTA_IFMASK;
  458. /*
  459. * If either SH were instantiated by somebody else
  460. * (e.g. u-boot) then it is assumed that the entropy
  461. * parameters are properly set and thus the function
  462. * setting these (kick_trng(...)) is skipped.
  463. * Also, if a handle was instantiated, do not change
  464. * the TRNG parameters.
  465. */
  466. if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
  467. kick_trng(pdev, ent_delay);
  468. ent_delay += 400;
  469. }
  470. /*
  471. * if instantiate_rng(...) fails, the loop will rerun
  472. * and the kick_trng(...) function will modfiy the
  473. * upper and lower limits of the entropy sampling
  474. * interval, leading to a sucessful initialization of
  475. * the RNG.
  476. */
  477. ret = instantiate_rng(dev, inst_handles,
  478. gen_sk);
  479. } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
  480. if (ret) {
  481. dev_err(dev, "failed to instantiate RNG");
  482. caam_remove(pdev);
  483. return ret;
  484. }
  485. /*
  486. * Set handles init'ed by this module as the complement of the
  487. * already initialized ones
  488. */
  489. ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
  490. /* Enable RDB bit so that RNG works faster */
  491. setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
  492. }
  493. /* NOTE: RTIC detection ought to go here, around Si time */
  494. caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 |
  495. (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls);
  496. /* Report "alive" for developer to see */
  497. dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
  498. caam_get_era());
  499. dev_info(dev, "job rings = %d, qi = %d\n",
  500. ctrlpriv->total_jobrs, ctrlpriv->qi_present);
  501. #ifdef CONFIG_DEBUG_FS
  502. /*
  503. * FIXME: needs better naming distinction, as some amalgamation of
  504. * "caam" and nprop->full_name. The OF name isn't distinctive,
  505. * but does separate instances
  506. */
  507. perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
  508. ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
  509. ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
  510. /* Controller-level - performance monitor counters */
  511. ctrlpriv->ctl_rq_dequeued =
  512. debugfs_create_u64("rq_dequeued",
  513. S_IRUSR | S_IRGRP | S_IROTH,
  514. ctrlpriv->ctl, &perfmon->req_dequeued);
  515. ctrlpriv->ctl_ob_enc_req =
  516. debugfs_create_u64("ob_rq_encrypted",
  517. S_IRUSR | S_IRGRP | S_IROTH,
  518. ctrlpriv->ctl, &perfmon->ob_enc_req);
  519. ctrlpriv->ctl_ib_dec_req =
  520. debugfs_create_u64("ib_rq_decrypted",
  521. S_IRUSR | S_IRGRP | S_IROTH,
  522. ctrlpriv->ctl, &perfmon->ib_dec_req);
  523. ctrlpriv->ctl_ob_enc_bytes =
  524. debugfs_create_u64("ob_bytes_encrypted",
  525. S_IRUSR | S_IRGRP | S_IROTH,
  526. ctrlpriv->ctl, &perfmon->ob_enc_bytes);
  527. ctrlpriv->ctl_ob_prot_bytes =
  528. debugfs_create_u64("ob_bytes_protected",
  529. S_IRUSR | S_IRGRP | S_IROTH,
  530. ctrlpriv->ctl, &perfmon->ob_prot_bytes);
  531. ctrlpriv->ctl_ib_dec_bytes =
  532. debugfs_create_u64("ib_bytes_decrypted",
  533. S_IRUSR | S_IRGRP | S_IROTH,
  534. ctrlpriv->ctl, &perfmon->ib_dec_bytes);
  535. ctrlpriv->ctl_ib_valid_bytes =
  536. debugfs_create_u64("ib_bytes_validated",
  537. S_IRUSR | S_IRGRP | S_IROTH,
  538. ctrlpriv->ctl, &perfmon->ib_valid_bytes);
  539. /* Controller level - global status values */
  540. ctrlpriv->ctl_faultaddr =
  541. debugfs_create_u64("fault_addr",
  542. S_IRUSR | S_IRGRP | S_IROTH,
  543. ctrlpriv->ctl, &perfmon->faultaddr);
  544. ctrlpriv->ctl_faultdetail =
  545. debugfs_create_u32("fault_detail",
  546. S_IRUSR | S_IRGRP | S_IROTH,
  547. ctrlpriv->ctl, &perfmon->faultdetail);
  548. ctrlpriv->ctl_faultstatus =
  549. debugfs_create_u32("fault_status",
  550. S_IRUSR | S_IRGRP | S_IROTH,
  551. ctrlpriv->ctl, &perfmon->status);
  552. /* Internal covering keys (useful in non-secure mode only) */
  553. ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
  554. ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  555. ctrlpriv->ctl_kek = debugfs_create_blob("kek",
  556. S_IRUSR |
  557. S_IRGRP | S_IROTH,
  558. ctrlpriv->ctl,
  559. &ctrlpriv->ctl_kek_wrap);
  560. ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
  561. ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  562. ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
  563. S_IRUSR |
  564. S_IRGRP | S_IROTH,
  565. ctrlpriv->ctl,
  566. &ctrlpriv->ctl_tkek_wrap);
  567. ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
  568. ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  569. ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
  570. S_IRUSR |
  571. S_IRGRP | S_IROTH,
  572. ctrlpriv->ctl,
  573. &ctrlpriv->ctl_tdsk_wrap);
  574. #endif
  575. return 0;
  576. }
  577. static struct of_device_id caam_match[] = {
  578. {
  579. .compatible = "fsl,sec-v4.0",
  580. },
  581. {
  582. .compatible = "fsl,sec4.0",
  583. },
  584. {},
  585. };
  586. MODULE_DEVICE_TABLE(of, caam_match);
  587. static struct platform_driver caam_driver = {
  588. .driver = {
  589. .name = "caam",
  590. .owner = THIS_MODULE,
  591. .of_match_table = caam_match,
  592. },
  593. .probe = caam_probe,
  594. .remove = caam_remove,
  595. };
  596. module_platform_driver(caam_driver);
  597. MODULE_LICENSE("GPL");
  598. MODULE_DESCRIPTION("FSL CAAM request backend");
  599. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");