ctrl.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. /*
  2. * CAAM control-plane driver backend
  3. * Controller-level driver, kernel property detection, initialization
  4. *
  5. * Copyright 2008-2012 Freescale Semiconductor, Inc.
  6. */
  7. #include <linux/of_address.h>
  8. #include <linux/of_irq.h>
  9. #include "compat.h"
  10. #include "regs.h"
  11. #include "intern.h"
  12. #include "jr.h"
  13. #include "desc_constr.h"
  14. #include "error.h"
  15. #include "ctrl.h"
  16. /*
  17. * Descriptor to instantiate RNG State Handle 0 in normal mode and
  18. * load the JDKEK, TDKEK and TDSK registers
  19. */
  20. static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
  21. {
  22. u32 *jump_cmd, op_flags;
  23. init_job_desc(desc, 0);
  24. op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  25. (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
  26. /* INIT RNG in non-test mode */
  27. append_operation(desc, op_flags);
  28. if (!handle && do_sk) {
  29. /*
  30. * For SH0, Secure Keys must be generated as well
  31. */
  32. /* wait for done */
  33. jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
  34. set_jump_tgt_here(desc, jump_cmd);
  35. /*
  36. * load 1 to clear written reg:
  37. * resets the done interrrupt and returns the RNG to idle.
  38. */
  39. append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
  40. /* Initialize State Handle */
  41. append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  42. OP_ALG_AAI_RNG4_SK);
  43. }
  44. append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
  45. }
  46. /* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
  47. static void build_deinstantiation_desc(u32 *desc, int handle)
  48. {
  49. init_job_desc(desc, 0);
  50. /* Uninstantiate State Handle 0 */
  51. append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  52. (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
  53. append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
  54. }
  55. /*
  56. * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
  57. * the software (no JR/QI used).
  58. * @ctrldev - pointer to device
  59. * @status - descriptor status, after being run
  60. *
  61. * Return: - 0 if no error occurred
  62. * - -ENODEV if the DECO couldn't be acquired
  63. * - -EAGAIN if an error occurred while executing the descriptor
  64. */
  65. static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
  66. u32 *status)
  67. {
  68. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
  69. struct caam_full __iomem *topregs;
  70. unsigned int timeout = 100000;
  71. u32 deco_dbg_reg, flags;
  72. int i;
  73. /* Set the bit to request direct access to DECO0 */
  74. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  75. setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
  76. while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
  77. --timeout)
  78. cpu_relax();
  79. if (!timeout) {
  80. dev_err(ctrldev, "failed to acquire DECO 0\n");
  81. clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
  82. return -ENODEV;
  83. }
  84. for (i = 0; i < desc_len(desc); i++)
  85. wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
  86. flags = DECO_JQCR_WHL;
  87. /*
  88. * If the descriptor length is longer than 4 words, then the
  89. * FOUR bit in JRCTRL register must be set.
  90. */
  91. if (desc_len(desc) >= 4)
  92. flags |= DECO_JQCR_FOUR;
  93. /* Instruct the DECO to execute it */
  94. wr_reg32(&topregs->deco.jr_ctl_hi, flags);
  95. timeout = 10000000;
  96. do {
  97. deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
  98. /*
  99. * If an error occured in the descriptor, then
  100. * the DECO status field will be set to 0x0D
  101. */
  102. if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
  103. DESC_DBG_DECO_STAT_HOST_ERR)
  104. break;
  105. cpu_relax();
  106. } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
  107. *status = rd_reg32(&topregs->deco.op_status_hi) &
  108. DECO_OP_STATUS_HI_ERR_MASK;
  109. /* Mark the DECO as free */
  110. clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
  111. if (!timeout)
  112. return -EAGAIN;
  113. return 0;
  114. }
  115. /*
  116. * instantiate_rng - builds and executes a descriptor on DECO0,
  117. * which initializes the RNG block.
  118. * @ctrldev - pointer to device
  119. * @state_handle_mask - bitmask containing the instantiation status
  120. * for the RNG4 state handles which exist in
  121. * the RNG4 block: 1 if it's been instantiated
  122. * by an external entry, 0 otherwise.
  123. * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
  124. * Caution: this can be done only once; if the keys need to be
  125. * regenerated, a POR is required
  126. *
  127. * Return: - 0 if no error occurred
  128. * - -ENOMEM if there isn't enough memory to allocate the descriptor
  129. * - -ENODEV if DECO0 couldn't be acquired
  130. * - -EAGAIN if an error occurred when executing the descriptor
  131. * f.i. there was a RNG hardware error due to not "good enough"
  132. * entropy being aquired.
  133. */
  134. static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
  135. int gen_sk)
  136. {
  137. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
  138. struct caam_full __iomem *topregs;
  139. struct rng4tst __iomem *r4tst;
  140. u32 *desc, status, rdsta_val;
  141. int ret = 0, sh_idx;
  142. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  143. r4tst = &topregs->ctrl.r4tst[0];
  144. desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
  145. if (!desc)
  146. return -ENOMEM;
  147. for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
  148. /*
  149. * If the corresponding bit is set, this state handle
  150. * was initialized by somebody else, so it's left alone.
  151. */
  152. if ((1 << sh_idx) & state_handle_mask)
  153. continue;
  154. /* Create the descriptor for instantiating RNG State Handle */
  155. build_instantiation_desc(desc, sh_idx, gen_sk);
  156. /* Try to run it through DECO0 */
  157. ret = run_descriptor_deco0(ctrldev, desc, &status);
  158. /*
  159. * If ret is not 0, or descriptor status is not 0, then
  160. * something went wrong. No need to try the next state
  161. * handle (if available), bail out here.
  162. * Also, if for some reason, the State Handle didn't get
  163. * instantiated although the descriptor has finished
  164. * without any error (HW optimizations for later
  165. * CAAM eras), then try again.
  166. */
  167. rdsta_val =
  168. rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
  169. if (status || !(rdsta_val & (1 << sh_idx)))
  170. ret = -EAGAIN;
  171. if (ret)
  172. break;
  173. dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
  174. /* Clear the contents before recreating the descriptor */
  175. memset(desc, 0x00, CAAM_CMD_SZ * 7);
  176. }
  177. kfree(desc);
  178. return ret;
  179. }
  180. /*
  181. * deinstantiate_rng - builds and executes a descriptor on DECO0,
  182. * which deinitializes the RNG block.
  183. * @ctrldev - pointer to device
  184. * @state_handle_mask - bitmask containing the instantiation status
  185. * for the RNG4 state handles which exist in
  186. * the RNG4 block: 1 if it's been instantiated
  187. *
  188. * Return: - 0 if no error occurred
  189. * - -ENOMEM if there isn't enough memory to allocate the descriptor
  190. * - -ENODEV if DECO0 couldn't be acquired
  191. * - -EAGAIN if an error occurred when executing the descriptor
  192. */
  193. static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
  194. {
  195. u32 *desc, status;
  196. int sh_idx, ret = 0;
  197. desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
  198. if (!desc)
  199. return -ENOMEM;
  200. for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
  201. /*
  202. * If the corresponding bit is set, then it means the state
  203. * handle was initialized by us, and thus it needs to be
  204. * deintialized as well
  205. */
  206. if ((1 << sh_idx) & state_handle_mask) {
  207. /*
  208. * Create the descriptor for deinstantating this state
  209. * handle
  210. */
  211. build_deinstantiation_desc(desc, sh_idx);
  212. /* Try to run it through DECO0 */
  213. ret = run_descriptor_deco0(ctrldev, desc, &status);
  214. if (ret || status) {
  215. dev_err(ctrldev,
  216. "Failed to deinstantiate RNG4 SH%d\n",
  217. sh_idx);
  218. break;
  219. }
  220. dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
  221. }
  222. }
  223. kfree(desc);
  224. return ret;
  225. }
  226. static int caam_remove(struct platform_device *pdev)
  227. {
  228. struct device *ctrldev;
  229. struct caam_drv_private *ctrlpriv;
  230. struct caam_full __iomem *topregs;
  231. int ring, ret = 0;
  232. ctrldev = &pdev->dev;
  233. ctrlpriv = dev_get_drvdata(ctrldev);
  234. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  235. /* Remove platform devices for JobRs */
  236. for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
  237. if (ctrlpriv->jrpdev[ring])
  238. of_device_unregister(ctrlpriv->jrpdev[ring]);
  239. }
  240. /* De-initialize RNG state handles initialized by this driver. */
  241. if (ctrlpriv->rng4_sh_init)
  242. deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
  243. /* Shut down debug views */
  244. #ifdef CONFIG_DEBUG_FS
  245. debugfs_remove_recursive(ctrlpriv->dfs_root);
  246. #endif
  247. /* Unmap controller region */
  248. iounmap(&topregs->ctrl);
  249. kfree(ctrlpriv->jrpdev);
  250. kfree(ctrlpriv);
  251. return ret;
  252. }
  253. /*
  254. * kick_trng - sets the various parameters for enabling the initialization
  255. * of the RNG4 block in CAAM
  256. * @pdev - pointer to the platform device
  257. * @ent_delay - Defines the length (in system clocks) of each entropy sample.
  258. */
  259. static void kick_trng(struct platform_device *pdev, int ent_delay)
  260. {
  261. struct device *ctrldev = &pdev->dev;
  262. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
  263. struct caam_full __iomem *topregs;
  264. struct rng4tst __iomem *r4tst;
  265. u32 val;
  266. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  267. r4tst = &topregs->ctrl.r4tst[0];
  268. /* put RNG4 into program mode */
  269. setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
  270. /*
  271. * Performance-wise, it does not make sense to
  272. * set the delay to a value that is lower
  273. * than the last one that worked (i.e. the state handles
  274. * were instantiated properly. Thus, instead of wasting
  275. * time trying to set the values controlling the sample
  276. * frequency, the function simply returns.
  277. */
  278. val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
  279. >> RTSDCTL_ENT_DLY_SHIFT;
  280. if (ent_delay <= val) {
  281. /* put RNG4 into run mode */
  282. clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
  283. return;
  284. }
  285. val = rd_reg32(&r4tst->rtsdctl);
  286. val = (val & ~RTSDCTL_ENT_DLY_MASK) |
  287. (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
  288. wr_reg32(&r4tst->rtsdctl, val);
  289. /* min. freq. count, equal to 1/4 of the entropy sample length */
  290. wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
  291. /* max. freq. count, equal to 8 times the entropy sample length */
  292. wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
  293. /* put RNG4 into run mode */
  294. clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
  295. }
  296. /**
  297. * caam_get_era() - Return the ERA of the SEC on SoC, based
  298. * on the SEC_VID register.
  299. * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
  300. * @caam_id - the value of the SEC_VID register
  301. **/
  302. int caam_get_era(u64 caam_id)
  303. {
  304. struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
  305. static const struct {
  306. u16 ip_id;
  307. u8 maj_rev;
  308. u8 era;
  309. } caam_eras[] = {
  310. {0x0A10, 1, 1},
  311. {0x0A10, 2, 2},
  312. {0x0A12, 1, 3},
  313. {0x0A14, 1, 3},
  314. {0x0A14, 2, 4},
  315. {0x0A16, 1, 4},
  316. {0x0A11, 1, 4}
  317. };
  318. int i;
  319. for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
  320. if (caam_eras[i].ip_id == sec_vid->ip_id &&
  321. caam_eras[i].maj_rev == sec_vid->maj_rev)
  322. return caam_eras[i].era;
  323. return -ENOTSUPP;
  324. }
  325. EXPORT_SYMBOL(caam_get_era);
  326. /* Probe routine for CAAM top (controller) level */
  327. static int caam_probe(struct platform_device *pdev)
  328. {
  329. int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
  330. u64 caam_id;
  331. struct device *dev;
  332. struct device_node *nprop, *np;
  333. struct caam_ctrl __iomem *ctrl;
  334. struct caam_full __iomem *topregs;
  335. struct caam_drv_private *ctrlpriv;
  336. #ifdef CONFIG_DEBUG_FS
  337. struct caam_perfmon *perfmon;
  338. #endif
  339. u64 cha_vid;
  340. ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
  341. if (!ctrlpriv)
  342. return -ENOMEM;
  343. dev = &pdev->dev;
  344. dev_set_drvdata(dev, ctrlpriv);
  345. ctrlpriv->pdev = pdev;
  346. nprop = pdev->dev.of_node;
  347. /* Get configuration properties from device tree */
  348. /* First, get register page */
  349. ctrl = of_iomap(nprop, 0);
  350. if (ctrl == NULL) {
  351. dev_err(dev, "caam: of_iomap() failed\n");
  352. return -ENOMEM;
  353. }
  354. ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
  355. /* topregs used to derive pointers to CAAM sub-blocks only */
  356. topregs = (struct caam_full __iomem *)ctrl;
  357. /* Get the IRQ of the controller (for security violations only) */
  358. ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
  359. /*
  360. * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
  361. * long pointers in master configuration register
  362. */
  363. setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
  364. (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
  365. if (sizeof(dma_addr_t) == sizeof(u64))
  366. if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
  367. dma_set_mask(dev, DMA_BIT_MASK(40));
  368. else
  369. dma_set_mask(dev, DMA_BIT_MASK(36));
  370. else
  371. dma_set_mask(dev, DMA_BIT_MASK(32));
  372. /*
  373. * Detect and enable JobRs
  374. * First, find out how many ring spec'ed, allocate references
  375. * for all, then go probe each one.
  376. */
  377. rspec = 0;
  378. for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
  379. rspec++;
  380. if (!rspec) {
  381. /* for backward compatible with device trees */
  382. for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
  383. rspec++;
  384. }
  385. ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
  386. GFP_KERNEL);
  387. if (ctrlpriv->jrpdev == NULL) {
  388. iounmap(&topregs->ctrl);
  389. return -ENOMEM;
  390. }
  391. ring = 0;
  392. ctrlpriv->total_jobrs = 0;
  393. for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
  394. ctrlpriv->jrpdev[ring] =
  395. of_platform_device_create(np, NULL, dev);
  396. if (!ctrlpriv->jrpdev[ring]) {
  397. pr_warn("JR%d Platform device creation error\n", ring);
  398. continue;
  399. }
  400. ctrlpriv->total_jobrs++;
  401. ring++;
  402. }
  403. if (!ring) {
  404. for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
  405. ctrlpriv->jrpdev[ring] =
  406. of_platform_device_create(np, NULL, dev);
  407. if (!ctrlpriv->jrpdev[ring]) {
  408. pr_warn("JR%d Platform device creation error\n",
  409. ring);
  410. continue;
  411. }
  412. ctrlpriv->total_jobrs++;
  413. ring++;
  414. }
  415. }
  416. /* Check to see if QI present. If so, enable */
  417. ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
  418. CTPR_QI_MASK);
  419. if (ctrlpriv->qi_present) {
  420. ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
  421. /* This is all that's required to physically enable QI */
  422. wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
  423. }
  424. /* If no QI and no rings specified, quit and go home */
  425. if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
  426. dev_err(dev, "no queues configured, terminating\n");
  427. caam_remove(pdev);
  428. return -ENOMEM;
  429. }
  430. cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
  431. /*
  432. * If SEC has RNG version >= 4 and RNG state handle has not been
  433. * already instantiated, do RNG instantiation
  434. */
  435. if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
  436. ctrlpriv->rng4_sh_init =
  437. rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
  438. /*
  439. * If the secure keys (TDKEK, JDKEK, TDSK), were already
  440. * generated, signal this to the function that is instantiating
  441. * the state handles. An error would occur if RNG4 attempts
  442. * to regenerate these keys before the next POR.
  443. */
  444. gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
  445. ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
  446. do {
  447. int inst_handles =
  448. rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
  449. RDSTA_IFMASK;
  450. /*
  451. * If either SH were instantiated by somebody else
  452. * (e.g. u-boot) then it is assumed that the entropy
  453. * parameters are properly set and thus the function
  454. * setting these (kick_trng(...)) is skipped.
  455. * Also, if a handle was instantiated, do not change
  456. * the TRNG parameters.
  457. */
  458. if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
  459. kick_trng(pdev, ent_delay);
  460. ent_delay += 400;
  461. }
  462. /*
  463. * if instantiate_rng(...) fails, the loop will rerun
  464. * and the kick_trng(...) function will modfiy the
  465. * upper and lower limits of the entropy sampling
  466. * interval, leading to a sucessful initialization of
  467. * the RNG.
  468. */
  469. ret = instantiate_rng(dev, inst_handles,
  470. gen_sk);
  471. } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
  472. if (ret) {
  473. dev_err(dev, "failed to instantiate RNG");
  474. caam_remove(pdev);
  475. return ret;
  476. }
  477. /*
  478. * Set handles init'ed by this module as the complement of the
  479. * already initialized ones
  480. */
  481. ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
  482. /* Enable RDB bit so that RNG works faster */
  483. setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
  484. }
  485. /* NOTE: RTIC detection ought to go here, around Si time */
  486. caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
  487. /* Report "alive" for developer to see */
  488. dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
  489. caam_get_era(caam_id));
  490. dev_info(dev, "job rings = %d, qi = %d\n",
  491. ctrlpriv->total_jobrs, ctrlpriv->qi_present);
  492. #ifdef CONFIG_DEBUG_FS
  493. /*
  494. * FIXME: needs better naming distinction, as some amalgamation of
  495. * "caam" and nprop->full_name. The OF name isn't distinctive,
  496. * but does separate instances
  497. */
  498. perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
  499. ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
  500. ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
  501. /* Controller-level - performance monitor counters */
  502. ctrlpriv->ctl_rq_dequeued =
  503. debugfs_create_u64("rq_dequeued",
  504. S_IRUSR | S_IRGRP | S_IROTH,
  505. ctrlpriv->ctl, &perfmon->req_dequeued);
  506. ctrlpriv->ctl_ob_enc_req =
  507. debugfs_create_u64("ob_rq_encrypted",
  508. S_IRUSR | S_IRGRP | S_IROTH,
  509. ctrlpriv->ctl, &perfmon->ob_enc_req);
  510. ctrlpriv->ctl_ib_dec_req =
  511. debugfs_create_u64("ib_rq_decrypted",
  512. S_IRUSR | S_IRGRP | S_IROTH,
  513. ctrlpriv->ctl, &perfmon->ib_dec_req);
  514. ctrlpriv->ctl_ob_enc_bytes =
  515. debugfs_create_u64("ob_bytes_encrypted",
  516. S_IRUSR | S_IRGRP | S_IROTH,
  517. ctrlpriv->ctl, &perfmon->ob_enc_bytes);
  518. ctrlpriv->ctl_ob_prot_bytes =
  519. debugfs_create_u64("ob_bytes_protected",
  520. S_IRUSR | S_IRGRP | S_IROTH,
  521. ctrlpriv->ctl, &perfmon->ob_prot_bytes);
  522. ctrlpriv->ctl_ib_dec_bytes =
  523. debugfs_create_u64("ib_bytes_decrypted",
  524. S_IRUSR | S_IRGRP | S_IROTH,
  525. ctrlpriv->ctl, &perfmon->ib_dec_bytes);
  526. ctrlpriv->ctl_ib_valid_bytes =
  527. debugfs_create_u64("ib_bytes_validated",
  528. S_IRUSR | S_IRGRP | S_IROTH,
  529. ctrlpriv->ctl, &perfmon->ib_valid_bytes);
  530. /* Controller level - global status values */
  531. ctrlpriv->ctl_faultaddr =
  532. debugfs_create_u64("fault_addr",
  533. S_IRUSR | S_IRGRP | S_IROTH,
  534. ctrlpriv->ctl, &perfmon->faultaddr);
  535. ctrlpriv->ctl_faultdetail =
  536. debugfs_create_u32("fault_detail",
  537. S_IRUSR | S_IRGRP | S_IROTH,
  538. ctrlpriv->ctl, &perfmon->faultdetail);
  539. ctrlpriv->ctl_faultstatus =
  540. debugfs_create_u32("fault_status",
  541. S_IRUSR | S_IRGRP | S_IROTH,
  542. ctrlpriv->ctl, &perfmon->status);
  543. /* Internal covering keys (useful in non-secure mode only) */
  544. ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
  545. ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  546. ctrlpriv->ctl_kek = debugfs_create_blob("kek",
  547. S_IRUSR |
  548. S_IRGRP | S_IROTH,
  549. ctrlpriv->ctl,
  550. &ctrlpriv->ctl_kek_wrap);
  551. ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
  552. ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  553. ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
  554. S_IRUSR |
  555. S_IRGRP | S_IROTH,
  556. ctrlpriv->ctl,
  557. &ctrlpriv->ctl_tkek_wrap);
  558. ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
  559. ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  560. ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
  561. S_IRUSR |
  562. S_IRGRP | S_IROTH,
  563. ctrlpriv->ctl,
  564. &ctrlpriv->ctl_tdsk_wrap);
  565. #endif
  566. return 0;
  567. }
  568. static struct of_device_id caam_match[] = {
  569. {
  570. .compatible = "fsl,sec-v4.0",
  571. },
  572. {
  573. .compatible = "fsl,sec4.0",
  574. },
  575. {},
  576. };
  577. MODULE_DEVICE_TABLE(of, caam_match);
  578. static struct platform_driver caam_driver = {
  579. .driver = {
  580. .name = "caam",
  581. .owner = THIS_MODULE,
  582. .of_match_table = caam_match,
  583. },
  584. .probe = caam_probe,
  585. .remove = caam_remove,
  586. };
  587. module_platform_driver(caam_driver);
  588. MODULE_LICENSE("GPL");
  589. MODULE_DESCRIPTION("FSL CAAM request backend");
  590. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");