ctrl.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. /*
  2. * CAAM control-plane driver backend
  3. * Controller-level driver, kernel property detection, initialization
  4. *
  5. * Copyright 2008-2012 Freescale Semiconductor, Inc.
  6. */
  7. #include "compat.h"
  8. #include "regs.h"
  9. #include "intern.h"
  10. #include "jr.h"
  11. #include "desc_constr.h"
  12. #include "error.h"
  13. static int caam_remove(struct platform_device *pdev)
  14. {
  15. struct device *ctrldev;
  16. struct caam_drv_private *ctrlpriv;
  17. struct caam_drv_private_jr *jrpriv;
  18. struct caam_full __iomem *topregs;
  19. int ring, ret = 0;
  20. ctrldev = &pdev->dev;
  21. ctrlpriv = dev_get_drvdata(ctrldev);
  22. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  23. /* shut down JobRs */
  24. for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
  25. ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
  26. jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
  27. irq_dispose_mapping(jrpriv->irq);
  28. }
  29. /* Shut down debug views */
  30. #ifdef CONFIG_DEBUG_FS
  31. debugfs_remove_recursive(ctrlpriv->dfs_root);
  32. #endif
  33. /* Unmap controller region */
  34. iounmap(&topregs->ctrl);
  35. kfree(ctrlpriv->jrdev);
  36. kfree(ctrlpriv);
  37. return ret;
  38. }
  39. /*
  40. * Descriptor to instantiate RNG State Handle 0 in normal mode and
  41. * load the JDKEK, TDKEK and TDSK registers
  42. */
  43. static void build_instantiation_desc(u32 *desc)
  44. {
  45. u32 *jump_cmd;
  46. init_job_desc(desc, 0);
  47. /* INIT RNG in non-test mode */
  48. append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  49. OP_ALG_AS_INIT);
  50. /* wait for done */
  51. jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
  52. set_jump_tgt_here(desc, jump_cmd);
  53. /*
  54. * load 1 to clear written reg:
  55. * resets the done interrrupt and returns the RNG to idle.
  56. */
  57. append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
  58. /* generate secure keys (non-test) */
  59. append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  60. OP_ALG_RNG4_SK);
  61. }
  62. struct instantiate_result {
  63. struct completion completion;
  64. int err;
  65. };
  66. static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
  67. void *context)
  68. {
  69. struct instantiate_result *instantiation = context;
  70. if (err) {
  71. char tmp[CAAM_ERROR_STR_MAX];
  72. dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
  73. }
  74. instantiation->err = err;
  75. complete(&instantiation->completion);
  76. }
  77. static int instantiate_rng(struct device *jrdev)
  78. {
  79. struct instantiate_result instantiation;
  80. dma_addr_t desc_dma;
  81. u32 *desc;
  82. int ret;
  83. desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
  84. if (!desc) {
  85. dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
  86. return -ENOMEM;
  87. }
  88. build_instantiation_desc(desc);
  89. desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
  90. init_completion(&instantiation.completion);
  91. ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
  92. if (!ret) {
  93. wait_for_completion_interruptible(&instantiation.completion);
  94. ret = instantiation.err;
  95. if (ret)
  96. dev_err(jrdev, "unable to instantiate RNG\n");
  97. }
  98. dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
  99. kfree(desc);
  100. return ret;
  101. }
  102. /*
  103. * By default, the TRNG runs for 200 clocks per sample;
  104. * 800 clocks per sample generates better entropy.
  105. */
  106. static void kick_trng(struct platform_device *pdev)
  107. {
  108. struct device *ctrldev = &pdev->dev;
  109. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
  110. struct caam_full __iomem *topregs;
  111. struct rng4tst __iomem *r4tst;
  112. u32 val;
  113. topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
  114. r4tst = &topregs->ctrl.r4tst[0];
  115. /* put RNG4 into program mode */
  116. setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
  117. /* 800 clocks per sample */
  118. val = rd_reg32(&r4tst->rtsdctl);
  119. val = (val & ~RTSDCTL_ENT_DLY_MASK) | (800 << RTSDCTL_ENT_DLY_SHIFT);
  120. wr_reg32(&r4tst->rtsdctl, val);
  121. /* min. freq. count */
  122. wr_reg32(&r4tst->rtfrqmin, 400);
  123. /* max. freq. count */
  124. wr_reg32(&r4tst->rtfrqmax, 6400);
  125. /* put RNG4 into run mode */
  126. clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
  127. }
  128. /* Probe routine for CAAM top (controller) level */
  129. static int caam_probe(struct platform_device *pdev)
  130. {
  131. int ret, ring, rspec;
  132. struct device *dev;
  133. struct device_node *nprop, *np;
  134. struct caam_ctrl __iomem *ctrl;
  135. struct caam_full __iomem *topregs;
  136. struct caam_drv_private *ctrlpriv;
  137. #ifdef CONFIG_DEBUG_FS
  138. struct caam_perfmon *perfmon;
  139. #endif
  140. ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
  141. if (!ctrlpriv)
  142. return -ENOMEM;
  143. dev = &pdev->dev;
  144. dev_set_drvdata(dev, ctrlpriv);
  145. ctrlpriv->pdev = pdev;
  146. nprop = pdev->dev.of_node;
  147. /* Get configuration properties from device tree */
  148. /* First, get register page */
  149. ctrl = of_iomap(nprop, 0);
  150. if (ctrl == NULL) {
  151. dev_err(dev, "caam: of_iomap() failed\n");
  152. return -ENOMEM;
  153. }
  154. ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
  155. /* topregs used to derive pointers to CAAM sub-blocks only */
  156. topregs = (struct caam_full __iomem *)ctrl;
  157. /* Get the IRQ of the controller (for security violations only) */
  158. ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
  159. /*
  160. * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
  161. * long pointers in master configuration register
  162. */
  163. setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
  164. (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
  165. if (sizeof(dma_addr_t) == sizeof(u64))
  166. if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
  167. dma_set_mask(dev, DMA_BIT_MASK(40));
  168. else
  169. dma_set_mask(dev, DMA_BIT_MASK(36));
  170. else
  171. dma_set_mask(dev, DMA_BIT_MASK(32));
  172. /*
  173. * Detect and enable JobRs
  174. * First, find out how many ring spec'ed, allocate references
  175. * for all, then go probe each one.
  176. */
  177. rspec = 0;
  178. for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
  179. rspec++;
  180. if (!rspec) {
  181. /* for backward compatible with device trees */
  182. for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
  183. rspec++;
  184. }
  185. ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
  186. if (ctrlpriv->jrdev == NULL) {
  187. iounmap(&topregs->ctrl);
  188. return -ENOMEM;
  189. }
  190. ring = 0;
  191. ctrlpriv->total_jobrs = 0;
  192. for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
  193. caam_jr_probe(pdev, np, ring);
  194. ctrlpriv->total_jobrs++;
  195. ring++;
  196. }
  197. if (!ring) {
  198. for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
  199. caam_jr_probe(pdev, np, ring);
  200. ctrlpriv->total_jobrs++;
  201. ring++;
  202. }
  203. }
  204. /* Check to see if QI present. If so, enable */
  205. ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
  206. CTPR_QI_MASK);
  207. if (ctrlpriv->qi_present) {
  208. ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
  209. /* This is all that's required to physically enable QI */
  210. wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
  211. }
  212. /* If no QI and no rings specified, quit and go home */
  213. if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
  214. dev_err(dev, "no queues configured, terminating\n");
  215. caam_remove(pdev);
  216. return -ENOMEM;
  217. }
  218. /*
  219. * RNG4 based SECs (v5+) need special initialization prior
  220. * to executing any descriptors
  221. */
  222. if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) {
  223. kick_trng(pdev);
  224. ret = instantiate_rng(ctrlpriv->jrdev[0]);
  225. if (ret) {
  226. caam_remove(pdev);
  227. return ret;
  228. }
  229. }
  230. /* NOTE: RTIC detection ought to go here, around Si time */
  231. /* Initialize queue allocator lock */
  232. spin_lock_init(&ctrlpriv->jr_alloc_lock);
  233. /* Report "alive" for developer to see */
  234. dev_info(dev, "device ID = 0x%016llx\n",
  235. rd_reg64(&topregs->ctrl.perfmon.caam_id));
  236. dev_info(dev, "job rings = %d, qi = %d\n",
  237. ctrlpriv->total_jobrs, ctrlpriv->qi_present);
  238. #ifdef CONFIG_DEBUG_FS
  239. /*
  240. * FIXME: needs better naming distinction, as some amalgamation of
  241. * "caam" and nprop->full_name. The OF name isn't distinctive,
  242. * but does separate instances
  243. */
  244. perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
  245. ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
  246. ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
  247. /* Controller-level - performance monitor counters */
  248. ctrlpriv->ctl_rq_dequeued =
  249. debugfs_create_u64("rq_dequeued",
  250. S_IRUSR | S_IRGRP | S_IROTH,
  251. ctrlpriv->ctl, &perfmon->req_dequeued);
  252. ctrlpriv->ctl_ob_enc_req =
  253. debugfs_create_u64("ob_rq_encrypted",
  254. S_IRUSR | S_IRGRP | S_IROTH,
  255. ctrlpriv->ctl, &perfmon->ob_enc_req);
  256. ctrlpriv->ctl_ib_dec_req =
  257. debugfs_create_u64("ib_rq_decrypted",
  258. S_IRUSR | S_IRGRP | S_IROTH,
  259. ctrlpriv->ctl, &perfmon->ib_dec_req);
  260. ctrlpriv->ctl_ob_enc_bytes =
  261. debugfs_create_u64("ob_bytes_encrypted",
  262. S_IRUSR | S_IRGRP | S_IROTH,
  263. ctrlpriv->ctl, &perfmon->ob_enc_bytes);
  264. ctrlpriv->ctl_ob_prot_bytes =
  265. debugfs_create_u64("ob_bytes_protected",
  266. S_IRUSR | S_IRGRP | S_IROTH,
  267. ctrlpriv->ctl, &perfmon->ob_prot_bytes);
  268. ctrlpriv->ctl_ib_dec_bytes =
  269. debugfs_create_u64("ib_bytes_decrypted",
  270. S_IRUSR | S_IRGRP | S_IROTH,
  271. ctrlpriv->ctl, &perfmon->ib_dec_bytes);
  272. ctrlpriv->ctl_ib_valid_bytes =
  273. debugfs_create_u64("ib_bytes_validated",
  274. S_IRUSR | S_IRGRP | S_IROTH,
  275. ctrlpriv->ctl, &perfmon->ib_valid_bytes);
  276. /* Controller level - global status values */
  277. ctrlpriv->ctl_faultaddr =
  278. debugfs_create_u64("fault_addr",
  279. S_IRUSR | S_IRGRP | S_IROTH,
  280. ctrlpriv->ctl, &perfmon->faultaddr);
  281. ctrlpriv->ctl_faultdetail =
  282. debugfs_create_u32("fault_detail",
  283. S_IRUSR | S_IRGRP | S_IROTH,
  284. ctrlpriv->ctl, &perfmon->faultdetail);
  285. ctrlpriv->ctl_faultstatus =
  286. debugfs_create_u32("fault_status",
  287. S_IRUSR | S_IRGRP | S_IROTH,
  288. ctrlpriv->ctl, &perfmon->status);
  289. /* Internal covering keys (useful in non-secure mode only) */
  290. ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
  291. ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  292. ctrlpriv->ctl_kek = debugfs_create_blob("kek",
  293. S_IRUSR |
  294. S_IRGRP | S_IROTH,
  295. ctrlpriv->ctl,
  296. &ctrlpriv->ctl_kek_wrap);
  297. ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
  298. ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  299. ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
  300. S_IRUSR |
  301. S_IRGRP | S_IROTH,
  302. ctrlpriv->ctl,
  303. &ctrlpriv->ctl_tkek_wrap);
  304. ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
  305. ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
  306. ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
  307. S_IRUSR |
  308. S_IRGRP | S_IROTH,
  309. ctrlpriv->ctl,
  310. &ctrlpriv->ctl_tdsk_wrap);
  311. #endif
  312. return 0;
  313. }
  314. static struct of_device_id caam_match[] = {
  315. {
  316. .compatible = "fsl,sec-v4.0",
  317. },
  318. {
  319. .compatible = "fsl,sec4.0",
  320. },
  321. {},
  322. };
  323. MODULE_DEVICE_TABLE(of, caam_match);
  324. static struct platform_driver caam_driver = {
  325. .driver = {
  326. .name = "caam",
  327. .owner = THIS_MODULE,
  328. .of_match_table = caam_match,
  329. },
  330. .probe = caam_probe,
  331. .remove = __devexit_p(caam_remove),
  332. };
  333. module_platform_driver(caam_driver);
  334. MODULE_LICENSE("GPL");
  335. MODULE_DESCRIPTION("FSL CAAM request backend");
  336. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");