cc_ivgen.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
  3. #include <crypto/ctr.h>
  4. #include "cc_driver.h"
  5. #include "cc_ivgen.h"
  6. #include "cc_request_mgr.h"
  7. #include "cc_sram_mgr.h"
  8. #include "cc_buffer_mgr.h"
  9. /* The max. size of pool *MUST* be <= SRAM total size */
  10. #define CC_IVPOOL_SIZE 1024
  11. /* The first 32B fraction of pool are dedicated to the
  12. * next encryption "key" & "IV" for pool regeneration
  13. */
  14. #define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
  15. #define CC_IVPOOL_GEN_SEQ_LEN 4
  16. /**
  17. * struct cc_ivgen_ctx -IV pool generation context
  18. * @pool: the start address of the iv-pool resides in internal RAM
  19. * @ctr_key_dma: address of pool's encryption key material in internal RAM
  20. * @ctr_iv_dma: address of pool's counter iv in internal RAM
  21. * @next_iv_ofs: the offset to the next available IV in pool
  22. * @pool_meta: virt. address of the initial enc. key/IV
  23. * @pool_meta_dma: phys. address of the initial enc. key/IV
  24. */
  25. struct cc_ivgen_ctx {
  26. cc_sram_addr_t pool;
  27. cc_sram_addr_t ctr_key;
  28. cc_sram_addr_t ctr_iv;
  29. u32 next_iv_ofs;
  30. u8 *pool_meta;
  31. dma_addr_t pool_meta_dma;
  32. };
  33. /*!
  34. * Generates CC_IVPOOL_SIZE of random bytes by
  35. * encrypting 0's using AES128-CTR.
  36. *
  37. * \param ivgen iv-pool context
  38. * \param iv_seq IN/OUT array to the descriptors sequence
  39. * \param iv_seq_len IN/OUT pointer to the sequence length
  40. */
  41. static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
  42. struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
  43. {
  44. unsigned int idx = *iv_seq_len;
  45. if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
  46. /* The sequence will be longer than allowed */
  47. return -EINVAL;
  48. }
  49. /* Setup key */
  50. hw_desc_init(&iv_seq[idx]);
  51. set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
  52. set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
  53. set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  54. set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
  55. set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
  56. set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
  57. idx++;
  58. /* Setup cipher state */
  59. hw_desc_init(&iv_seq[idx]);
  60. set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
  61. set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  62. set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
  63. set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
  64. set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
  65. set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
  66. idx++;
  67. /* Perform dummy encrypt to skip first block */
  68. hw_desc_init(&iv_seq[idx]);
  69. set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
  70. set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
  71. set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
  72. idx++;
  73. /* Generate IV pool */
  74. hw_desc_init(&iv_seq[idx]);
  75. set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
  76. set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
  77. set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
  78. idx++;
  79. *iv_seq_len = idx; /* Update sequence length */
  80. /* queue ordering assures pool readiness */
  81. ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
  82. return 0;
  83. }
  84. /*!
  85. * Generates the initial pool in SRAM.
  86. * This function should be invoked when resuming driver.
  87. *
  88. * \param drvdata
  89. *
  90. * \return int Zero for success, negative value otherwise.
  91. */
  92. int cc_init_iv_sram(struct cc_drvdata *drvdata)
  93. {
  94. struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
  95. struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
  96. unsigned int iv_seq_len = 0;
  97. int rc;
  98. /* Generate initial enc. key/iv */
  99. get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
  100. /* The first 32B reserved for the enc. Key/IV */
  101. ivgen_ctx->ctr_key = ivgen_ctx->pool;
  102. ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
  103. /* Copy initial enc. key and IV to SRAM at a single descriptor */
  104. hw_desc_init(&iv_seq[iv_seq_len]);
  105. set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
  106. CC_IVPOOL_META_SIZE, NS_BIT);
  107. set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
  108. CC_IVPOOL_META_SIZE);
  109. set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
  110. iv_seq_len++;
  111. /* Generate initial pool */
  112. rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
  113. if (rc)
  114. return rc;
  115. /* Fire-and-forget */
  116. return send_request_init(drvdata, iv_seq, iv_seq_len);
  117. }
  118. /*!
  119. * Free iv-pool and ivgen context.
  120. *
  121. * \param drvdata
  122. */
  123. void cc_ivgen_fini(struct cc_drvdata *drvdata)
  124. {
  125. struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
  126. struct device *device = &drvdata->plat_dev->dev;
  127. if (!ivgen_ctx)
  128. return;
  129. if (ivgen_ctx->pool_meta) {
  130. memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
  131. dma_free_coherent(device, CC_IVPOOL_META_SIZE,
  132. ivgen_ctx->pool_meta,
  133. ivgen_ctx->pool_meta_dma);
  134. }
  135. ivgen_ctx->pool = NULL_SRAM_ADDR;
  136. /* release "this" context */
  137. kfree(ivgen_ctx);
  138. }
  139. /*!
  140. * Allocates iv-pool and maps resources.
  141. * This function generates the first IV pool.
  142. *
  143. * \param drvdata Driver's private context
  144. *
  145. * \return int Zero for success, negative value otherwise.
  146. */
  147. int cc_ivgen_init(struct cc_drvdata *drvdata)
  148. {
  149. struct cc_ivgen_ctx *ivgen_ctx;
  150. struct device *device = &drvdata->plat_dev->dev;
  151. int rc;
  152. /* Allocate "this" context */
  153. ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
  154. if (!ivgen_ctx)
  155. return -ENOMEM;
  156. /* Allocate pool's header for initial enc. key/IV */
  157. ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
  158. &ivgen_ctx->pool_meta_dma,
  159. GFP_KERNEL);
  160. if (!ivgen_ctx->pool_meta) {
  161. dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
  162. CC_IVPOOL_META_SIZE);
  163. rc = -ENOMEM;
  164. goto out;
  165. }
  166. /* Allocate IV pool in SRAM */
  167. ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
  168. if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
  169. dev_err(device, "SRAM pool exhausted\n");
  170. rc = -ENOMEM;
  171. goto out;
  172. }
  173. drvdata->ivgen_handle = ivgen_ctx;
  174. return cc_init_iv_sram(drvdata);
  175. out:
  176. cc_ivgen_fini(drvdata);
  177. return rc;
  178. }
  179. /*!
  180. * Acquires 16 Bytes IV from the iv-pool
  181. *
  182. * \param drvdata Driver private context
  183. * \param iv_out_dma Array of physical IV out addresses
  184. * \param iv_out_dma_len Length of iv_out_dma array (additional elements
  185. * of iv_out_dma array are ignore)
  186. * \param iv_out_size May be 8 or 16 bytes long
  187. * \param iv_seq IN/OUT array to the descriptors sequence
  188. * \param iv_seq_len IN/OUT pointer to the sequence length
  189. *
  190. * \return int Zero for success, negative value otherwise.
  191. */
  192. int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
  193. unsigned int iv_out_dma_len, unsigned int iv_out_size,
  194. struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
  195. {
  196. struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
  197. unsigned int idx = *iv_seq_len;
  198. struct device *dev = drvdata_to_dev(drvdata);
  199. unsigned int t;
  200. if (iv_out_size != CC_AES_IV_SIZE &&
  201. iv_out_size != CTR_RFC3686_IV_SIZE) {
  202. return -EINVAL;
  203. }
  204. if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
  205. /* The sequence will be longer than allowed */
  206. return -EINVAL;
  207. }
  208. /* check that number of generated IV is limited to max dma address
  209. * iv buffer size
  210. */
  211. if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
  212. /* The sequence will be longer than allowed */
  213. return -EINVAL;
  214. }
  215. for (t = 0; t < iv_out_dma_len; t++) {
  216. /* Acquire IV from pool */
  217. hw_desc_init(&iv_seq[idx]);
  218. set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
  219. ivgen_ctx->next_iv_ofs),
  220. iv_out_size);
  221. set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
  222. NS_BIT, 0);
  223. set_flow_mode(&iv_seq[idx], BYPASS);
  224. idx++;
  225. }
  226. /* Bypass operation is proceeded by crypto sequence, hence must
  227. * assure bypass-write-transaction by a memory barrier
  228. */
  229. hw_desc_init(&iv_seq[idx]);
  230. set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
  231. set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
  232. idx++;
  233. *iv_seq_len = idx; /* update seq length */
  234. /* Update iv index */
  235. ivgen_ctx->next_iv_ofs += iv_out_size;
  236. if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
  237. dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
  238. /* pool is drained -regenerate it! */
  239. return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
  240. }
  241. return 0;
  242. }