mtk-aes.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Driver for EIP97 AES acceleration.
  5. *
  6. * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Some ideas are from atmel-aes.c drivers.
  13. */
  14. #include <crypto/aes.h>
  15. #include "mtk-platform.h"
  16. #define AES_QUEUE_SIZE 512
  17. #define AES_BUF_ORDER 2
  18. #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
  19. & ~(AES_BLOCK_SIZE - 1))
  20. /* AES command token size */
  21. #define AES_CT_SIZE_ECB 2
  22. #define AES_CT_SIZE_CBC 3
  23. #define AES_CT_SIZE_CTR 3
  24. #define AES_CT_SIZE_GCM_OUT 5
  25. #define AES_CT_SIZE_GCM_IN 6
  26. #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
  27. /* AES-CBC/ECB/CTR command token */
  28. #define AES_CMD0 cpu_to_le32(0x05000000)
  29. #define AES_CMD1 cpu_to_le32(0x2d060000)
  30. #define AES_CMD2 cpu_to_le32(0xe4a63806)
  31. /* AES-GCM command token */
  32. #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
  33. #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
  34. #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
  35. #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
  36. #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
  37. #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
  38. #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
  39. /* AES transform information word 0 fields */
  40. #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
  41. #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
  42. #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
  43. #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
  44. #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
  45. #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
  46. #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
  47. #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
  48. /* AES transform information word 1 fields */
  49. #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
  50. #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
  51. #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
  52. #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
  53. #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
  54. #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
  55. #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
  56. #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
  57. #define AES_TFM_GHASH_DIG cpu_to_le32(0x2 << 21)
  58. #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
  59. /* AES flags */
  60. #define AES_FLAGS_ECB BIT(0)
  61. #define AES_FLAGS_CBC BIT(1)
  62. #define AES_FLAGS_CTR BIT(2)
  63. #define AES_FLAGS_GCM BIT(3)
  64. #define AES_FLAGS_ENCRYPT BIT(4)
  65. #define AES_FLAGS_BUSY BIT(5)
  66. /**
  67. * Command token(CT) is a set of hardware instructions that
  68. * are used to control engine's processing flow of AES.
  69. *
  70. * Transform information(TFM) is used to define AES state and
  71. * contains all keys and initial vectors.
  72. *
  73. * The engine requires CT and TFM to do:
  74. * - Commands decoding and control of the engine's data path.
  75. * - Coordinating hardware data fetch and store operations.
  76. * - Result token construction and output.
  77. *
  78. * Memory map of GCM's TFM:
  79. * /-----------\
  80. * | AES KEY | 128/196/256 bits
  81. * |-----------|
  82. * | HASH KEY | a string 128 zero bits encrypted using the block cipher
  83. * |-----------|
  84. * | IVs | 4 * 4 bytes
  85. * \-----------/
  86. */
  87. struct mtk_aes_ct {
  88. __le32 cmd[AES_CT_SIZE_GCM_IN];
  89. };
  90. struct mtk_aes_tfm {
  91. __le32 ctrl[2];
  92. __le32 state[SIZE_IN_WORDS(AES_KEYSIZE_256 + AES_BLOCK_SIZE * 2)];
  93. };
  94. struct mtk_aes_reqctx {
  95. u64 mode;
  96. };
  97. struct mtk_aes_base_ctx {
  98. struct mtk_cryp *cryp;
  99. u32 keylen;
  100. mtk_aes_fn start;
  101. struct mtk_aes_ct ct;
  102. dma_addr_t ct_dma;
  103. struct mtk_aes_tfm tfm;
  104. dma_addr_t tfm_dma;
  105. __le32 ct_hdr;
  106. u32 ct_size;
  107. };
  108. struct mtk_aes_ctx {
  109. struct mtk_aes_base_ctx base;
  110. };
  111. struct mtk_aes_ctr_ctx {
  112. struct mtk_aes_base_ctx base;
  113. u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
  114. size_t offset;
  115. struct scatterlist src[2];
  116. struct scatterlist dst[2];
  117. };
  118. struct mtk_aes_gcm_ctx {
  119. struct mtk_aes_base_ctx base;
  120. u32 authsize;
  121. size_t textlen;
  122. struct crypto_skcipher *ctr;
  123. };
  124. struct mtk_aes_gcm_setkey_result {
  125. int err;
  126. struct completion completion;
  127. };
  128. struct mtk_aes_drv {
  129. struct list_head dev_list;
  130. /* Device list lock */
  131. spinlock_t lock;
  132. };
  133. static struct mtk_aes_drv mtk_aes = {
  134. .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
  135. .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
  136. };
  137. static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
  138. {
  139. return readl_relaxed(cryp->base + offset);
  140. }
  141. static inline void mtk_aes_write(struct mtk_cryp *cryp,
  142. u32 offset, u32 value)
  143. {
  144. writel_relaxed(value, cryp->base + offset);
  145. }
  146. static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
  147. {
  148. struct mtk_cryp *cryp = NULL;
  149. struct mtk_cryp *tmp;
  150. spin_lock_bh(&mtk_aes.lock);
  151. if (!ctx->cryp) {
  152. list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
  153. cryp = tmp;
  154. break;
  155. }
  156. ctx->cryp = cryp;
  157. } else {
  158. cryp = ctx->cryp;
  159. }
  160. spin_unlock_bh(&mtk_aes.lock);
  161. return cryp;
  162. }
  163. static inline size_t mtk_aes_padlen(size_t len)
  164. {
  165. len &= AES_BLOCK_SIZE - 1;
  166. return len ? AES_BLOCK_SIZE - len : 0;
  167. }
  168. static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
  169. struct mtk_aes_dma *dma)
  170. {
  171. int nents;
  172. if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
  173. return false;
  174. for (nents = 0; sg; sg = sg_next(sg), ++nents) {
  175. if (!IS_ALIGNED(sg->offset, sizeof(u32)))
  176. return false;
  177. if (len <= sg->length) {
  178. if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
  179. return false;
  180. dma->nents = nents + 1;
  181. dma->remainder = sg->length - len;
  182. sg->length = len;
  183. return true;
  184. }
  185. if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
  186. return false;
  187. len -= sg->length;
  188. }
  189. return false;
  190. }
  191. static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
  192. const struct mtk_aes_reqctx *rctx)
  193. {
  194. /* Clear all but persistent flags and set request flags. */
  195. aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
  196. }
  197. static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
  198. {
  199. struct scatterlist *sg = dma->sg;
  200. int nents = dma->nents;
  201. if (!dma->remainder)
  202. return;
  203. while (--nents > 0 && sg)
  204. sg = sg_next(sg);
  205. if (!sg)
  206. return;
  207. sg->length += dma->remainder;
  208. }
  209. /*
  210. * Write descriptors for processing. This will configure the engine, load
  211. * the transform information and then start the packet processing.
  212. */
  213. static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  214. {
  215. struct mtk_ring *ring = cryp->ring[aes->id];
  216. struct mtk_desc *cmd = NULL, *res = NULL;
  217. struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
  218. u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
  219. int nents;
  220. /* Write command descriptors */
  221. for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
  222. cmd = ring->cmd_base + ring->cmd_pos;
  223. cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
  224. cmd->buf = cpu_to_le32(sg_dma_address(ssg));
  225. if (nents == 0) {
  226. cmd->hdr |= MTK_DESC_FIRST |
  227. MTK_DESC_CT_LEN(aes->ctx->ct_size);
  228. cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
  229. cmd->ct_hdr = aes->ctx->ct_hdr;
  230. cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
  231. }
  232. if (++ring->cmd_pos == MTK_DESC_NUM)
  233. ring->cmd_pos = 0;
  234. }
  235. cmd->hdr |= MTK_DESC_LAST;
  236. /* Prepare result descriptors */
  237. for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
  238. res = ring->res_base + ring->res_pos;
  239. res->hdr = MTK_DESC_BUF_LEN(dsg->length);
  240. res->buf = cpu_to_le32(sg_dma_address(dsg));
  241. if (nents == 0)
  242. res->hdr |= MTK_DESC_FIRST;
  243. if (++ring->res_pos == MTK_DESC_NUM)
  244. ring->res_pos = 0;
  245. }
  246. res->hdr |= MTK_DESC_LAST;
  247. /* Prepare enough space for authenticated tag */
  248. if (aes->flags & AES_FLAGS_GCM)
  249. res->hdr += AES_BLOCK_SIZE;
  250. /*
  251. * Make sure that all changes to the DMA ring are done before we
  252. * start engine.
  253. */
  254. wmb();
  255. /* Start DMA transfer */
  256. mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
  257. mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
  258. return -EINPROGRESS;
  259. }
  260. static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  261. {
  262. struct mtk_aes_base_ctx *ctx = aes->ctx;
  263. dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
  264. DMA_TO_DEVICE);
  265. dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
  266. DMA_TO_DEVICE);
  267. if (aes->src.sg == aes->dst.sg) {
  268. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  269. DMA_BIDIRECTIONAL);
  270. if (aes->src.sg != &aes->aligned_sg)
  271. mtk_aes_restore_sg(&aes->src);
  272. } else {
  273. dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
  274. DMA_FROM_DEVICE);
  275. if (aes->dst.sg != &aes->aligned_sg)
  276. mtk_aes_restore_sg(&aes->dst);
  277. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  278. DMA_TO_DEVICE);
  279. if (aes->src.sg != &aes->aligned_sg)
  280. mtk_aes_restore_sg(&aes->src);
  281. }
  282. if (aes->dst.sg == &aes->aligned_sg)
  283. sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
  284. aes->buf, aes->total);
  285. }
  286. static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  287. {
  288. struct mtk_aes_base_ctx *ctx = aes->ctx;
  289. ctx->ct_dma = dma_map_single(cryp->dev, &ctx->ct, sizeof(ctx->ct),
  290. DMA_TO_DEVICE);
  291. if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
  292. return -EINVAL;
  293. ctx->tfm_dma = dma_map_single(cryp->dev, &ctx->tfm, sizeof(ctx->tfm),
  294. DMA_TO_DEVICE);
  295. if (unlikely(dma_mapping_error(cryp->dev, ctx->tfm_dma)))
  296. goto tfm_map_err;
  297. if (aes->src.sg == aes->dst.sg) {
  298. aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
  299. aes->src.nents,
  300. DMA_BIDIRECTIONAL);
  301. aes->dst.sg_len = aes->src.sg_len;
  302. if (unlikely(!aes->src.sg_len))
  303. goto sg_map_err;
  304. } else {
  305. aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
  306. aes->src.nents, DMA_TO_DEVICE);
  307. if (unlikely(!aes->src.sg_len))
  308. goto sg_map_err;
  309. aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
  310. aes->dst.nents, DMA_FROM_DEVICE);
  311. if (unlikely(!aes->dst.sg_len)) {
  312. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  313. DMA_TO_DEVICE);
  314. goto sg_map_err;
  315. }
  316. }
  317. return mtk_aes_xmit(cryp, aes);
  318. sg_map_err:
  319. dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
  320. DMA_TO_DEVICE);
  321. tfm_map_err:
  322. dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
  323. DMA_TO_DEVICE);
  324. return -EINVAL;
  325. }
  326. /* Initialize transform information of CBC/ECB/CTR mode */
  327. static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  328. size_t len)
  329. {
  330. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  331. struct mtk_aes_base_ctx *ctx = aes->ctx;
  332. ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
  333. ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len);
  334. ctx->ct.cmd[1] = AES_CMD1;
  335. if (aes->flags & AES_FLAGS_ENCRYPT)
  336. ctx->tfm.ctrl[0] = AES_TFM_BASIC_OUT;
  337. else
  338. ctx->tfm.ctrl[0] = AES_TFM_BASIC_IN;
  339. if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128))
  340. ctx->tfm.ctrl[0] |= AES_TFM_128BITS;
  341. else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256))
  342. ctx->tfm.ctrl[0] |= AES_TFM_256BITS;
  343. else
  344. ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
  345. if (aes->flags & AES_FLAGS_CBC) {
  346. const u32 *iv = (const u32 *)req->info;
  347. u32 *iv_state = ctx->tfm.state + ctx->keylen;
  348. int i;
  349. ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen +
  350. SIZE_IN_WORDS(AES_BLOCK_SIZE));
  351. ctx->tfm.ctrl[1] = AES_TFM_CBC | AES_TFM_FULL_IV;
  352. for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
  353. iv_state[i] = cpu_to_le32(iv[i]);
  354. ctx->ct.cmd[2] = AES_CMD2;
  355. ctx->ct_size = AES_CT_SIZE_CBC;
  356. } else if (aes->flags & AES_FLAGS_ECB) {
  357. ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen);
  358. ctx->tfm.ctrl[1] = AES_TFM_ECB;
  359. ctx->ct_size = AES_CT_SIZE_ECB;
  360. } else if (aes->flags & AES_FLAGS_CTR) {
  361. ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen +
  362. SIZE_IN_WORDS(AES_BLOCK_SIZE));
  363. ctx->tfm.ctrl[1] = AES_TFM_CTR_LOAD | AES_TFM_FULL_IV;
  364. ctx->ct.cmd[2] = AES_CMD2;
  365. ctx->ct_size = AES_CT_SIZE_CTR;
  366. }
  367. }
  368. static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  369. struct scatterlist *src, struct scatterlist *dst,
  370. size_t len)
  371. {
  372. size_t padlen = 0;
  373. bool src_aligned, dst_aligned;
  374. aes->total = len;
  375. aes->src.sg = src;
  376. aes->dst.sg = dst;
  377. aes->real_dst = dst;
  378. src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
  379. if (src == dst)
  380. dst_aligned = src_aligned;
  381. else
  382. dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
  383. if (!src_aligned || !dst_aligned) {
  384. padlen = mtk_aes_padlen(len);
  385. if (len + padlen > AES_BUF_SIZE)
  386. return -ENOMEM;
  387. if (!src_aligned) {
  388. sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
  389. aes->src.sg = &aes->aligned_sg;
  390. aes->src.nents = 1;
  391. aes->src.remainder = 0;
  392. }
  393. if (!dst_aligned) {
  394. aes->dst.sg = &aes->aligned_sg;
  395. aes->dst.nents = 1;
  396. aes->dst.remainder = 0;
  397. }
  398. sg_init_table(&aes->aligned_sg, 1);
  399. sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
  400. }
  401. mtk_aes_info_init(cryp, aes, len + padlen);
  402. return mtk_aes_map(cryp, aes);
  403. }
  404. static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
  405. struct crypto_async_request *new_areq)
  406. {
  407. struct mtk_aes_rec *aes = cryp->aes[id];
  408. struct crypto_async_request *areq, *backlog;
  409. struct mtk_aes_base_ctx *ctx;
  410. unsigned long flags;
  411. int ret = 0;
  412. spin_lock_irqsave(&aes->lock, flags);
  413. if (new_areq)
  414. ret = crypto_enqueue_request(&aes->queue, new_areq);
  415. if (aes->flags & AES_FLAGS_BUSY) {
  416. spin_unlock_irqrestore(&aes->lock, flags);
  417. return ret;
  418. }
  419. backlog = crypto_get_backlog(&aes->queue);
  420. areq = crypto_dequeue_request(&aes->queue);
  421. if (areq)
  422. aes->flags |= AES_FLAGS_BUSY;
  423. spin_unlock_irqrestore(&aes->lock, flags);
  424. if (!areq)
  425. return ret;
  426. if (backlog)
  427. backlog->complete(backlog, -EINPROGRESS);
  428. ctx = crypto_tfm_ctx(areq->tfm);
  429. aes->areq = areq;
  430. aes->ctx = ctx;
  431. return ctx->start(cryp, aes);
  432. }
  433. static int mtk_aes_complete(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  434. {
  435. aes->flags &= ~AES_FLAGS_BUSY;
  436. aes->areq->complete(aes->areq, 0);
  437. /* Handle new request */
  438. return mtk_aes_handle_queue(cryp, aes->id, NULL);
  439. }
  440. static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  441. {
  442. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  443. struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  444. mtk_aes_set_mode(aes, rctx);
  445. aes->resume = mtk_aes_complete;
  446. return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
  447. }
  448. static inline struct mtk_aes_ctr_ctx *
  449. mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
  450. {
  451. return container_of(ctx, struct mtk_aes_ctr_ctx, base);
  452. }
  453. static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  454. {
  455. struct mtk_aes_base_ctx *ctx = aes->ctx;
  456. struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
  457. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  458. struct scatterlist *src, *dst;
  459. int i;
  460. u32 start, end, ctr, blocks, *iv_state;
  461. size_t datalen;
  462. bool fragmented = false;
  463. /* Check for transfer completion. */
  464. cctx->offset += aes->total;
  465. if (cctx->offset >= req->nbytes)
  466. return mtk_aes_complete(cryp, aes);
  467. /* Compute data length. */
  468. datalen = req->nbytes - cctx->offset;
  469. blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
  470. ctr = be32_to_cpu(cctx->iv[3]);
  471. /* Check 32bit counter overflow. */
  472. start = ctr;
  473. end = start + blocks - 1;
  474. if (end < start) {
  475. ctr |= 0xffffffff;
  476. datalen = AES_BLOCK_SIZE * -start;
  477. fragmented = true;
  478. }
  479. /* Jump to offset. */
  480. src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
  481. dst = ((req->src == req->dst) ? src :
  482. scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
  483. /* Write IVs into transform state buffer. */
  484. iv_state = ctx->tfm.state + ctx->keylen;
  485. for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
  486. iv_state[i] = cpu_to_le32(cctx->iv[i]);
  487. if (unlikely(fragmented)) {
  488. /*
  489. * Increment the counter manually to cope with the hardware
  490. * counter overflow.
  491. */
  492. cctx->iv[3] = cpu_to_be32(ctr);
  493. crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
  494. }
  495. aes->resume = mtk_aes_ctr_transfer;
  496. return mtk_aes_dma(cryp, aes, src, dst, datalen);
  497. }
  498. static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  499. {
  500. struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
  501. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  502. struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  503. mtk_aes_set_mode(aes, rctx);
  504. memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
  505. cctx->offset = 0;
  506. aes->total = 0;
  507. return mtk_aes_ctr_transfer(cryp, aes);
  508. }
  509. /* Check and set the AES key to transform state buffer */
  510. static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
  511. const u8 *key, u32 keylen)
  512. {
  513. struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  514. const u32 *aes_key = (const u32 *)key;
  515. u32 *key_state = ctx->tfm.state;
  516. int i;
  517. if (keylen != AES_KEYSIZE_128 &&
  518. keylen != AES_KEYSIZE_192 &&
  519. keylen != AES_KEYSIZE_256) {
  520. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  521. return -EINVAL;
  522. }
  523. ctx->keylen = SIZE_IN_WORDS(keylen);
  524. for (i = 0; i < ctx->keylen; i++)
  525. key_state[i] = cpu_to_le32(aes_key[i]);
  526. return 0;
  527. }
  528. static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
  529. {
  530. struct mtk_aes_base_ctx *ctx;
  531. struct mtk_aes_reqctx *rctx;
  532. ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
  533. rctx = ablkcipher_request_ctx(req);
  534. rctx->mode = mode;
  535. return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT),
  536. &req->base);
  537. }
  538. static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
  539. {
  540. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
  541. }
  542. static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
  543. {
  544. return mtk_aes_crypt(req, AES_FLAGS_ECB);
  545. }
  546. static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
  547. {
  548. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
  549. }
  550. static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
  551. {
  552. return mtk_aes_crypt(req, AES_FLAGS_CBC);
  553. }
  554. static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
  555. {
  556. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
  557. }
  558. static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
  559. {
  560. return mtk_aes_crypt(req, AES_FLAGS_CTR);
  561. }
  562. static int mtk_aes_cra_init(struct crypto_tfm *tfm)
  563. {
  564. struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  565. struct mtk_cryp *cryp = NULL;
  566. cryp = mtk_aes_find_dev(&ctx->base);
  567. if (!cryp) {
  568. pr_err("can't find crypto device\n");
  569. return -ENODEV;
  570. }
  571. tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
  572. ctx->base.start = mtk_aes_start;
  573. return 0;
  574. }
  575. static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
  576. {
  577. struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  578. struct mtk_cryp *cryp = NULL;
  579. cryp = mtk_aes_find_dev(&ctx->base);
  580. if (!cryp) {
  581. pr_err("can't find crypto device\n");
  582. return -ENODEV;
  583. }
  584. tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
  585. ctx->base.start = mtk_aes_ctr_start;
  586. return 0;
  587. }
  588. static struct crypto_alg aes_algs[] = {
  589. {
  590. .cra_name = "cbc(aes)",
  591. .cra_driver_name = "cbc-aes-mtk",
  592. .cra_priority = 400,
  593. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  594. CRYPTO_ALG_ASYNC,
  595. .cra_init = mtk_aes_cra_init,
  596. .cra_blocksize = AES_BLOCK_SIZE,
  597. .cra_ctxsize = sizeof(struct mtk_aes_ctx),
  598. .cra_alignmask = 0xf,
  599. .cra_type = &crypto_ablkcipher_type,
  600. .cra_module = THIS_MODULE,
  601. .cra_u.ablkcipher = {
  602. .min_keysize = AES_MIN_KEY_SIZE,
  603. .max_keysize = AES_MAX_KEY_SIZE,
  604. .setkey = mtk_aes_setkey,
  605. .encrypt = mtk_aes_cbc_encrypt,
  606. .decrypt = mtk_aes_cbc_decrypt,
  607. .ivsize = AES_BLOCK_SIZE,
  608. }
  609. },
  610. {
  611. .cra_name = "ecb(aes)",
  612. .cra_driver_name = "ecb-aes-mtk",
  613. .cra_priority = 400,
  614. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  615. CRYPTO_ALG_ASYNC,
  616. .cra_init = mtk_aes_cra_init,
  617. .cra_blocksize = AES_BLOCK_SIZE,
  618. .cra_ctxsize = sizeof(struct mtk_aes_ctx),
  619. .cra_alignmask = 0xf,
  620. .cra_type = &crypto_ablkcipher_type,
  621. .cra_module = THIS_MODULE,
  622. .cra_u.ablkcipher = {
  623. .min_keysize = AES_MIN_KEY_SIZE,
  624. .max_keysize = AES_MAX_KEY_SIZE,
  625. .setkey = mtk_aes_setkey,
  626. .encrypt = mtk_aes_ecb_encrypt,
  627. .decrypt = mtk_aes_ecb_decrypt,
  628. }
  629. },
  630. {
  631. .cra_name = "ctr(aes)",
  632. .cra_driver_name = "ctr-aes-mtk",
  633. .cra_priority = 400,
  634. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  635. CRYPTO_ALG_ASYNC,
  636. .cra_init = mtk_aes_ctr_cra_init,
  637. .cra_blocksize = 1,
  638. .cra_ctxsize = sizeof(struct mtk_aes_ctr_ctx),
  639. .cra_alignmask = 0xf,
  640. .cra_type = &crypto_ablkcipher_type,
  641. .cra_module = THIS_MODULE,
  642. .cra_u.ablkcipher = {
  643. .min_keysize = AES_MIN_KEY_SIZE,
  644. .max_keysize = AES_MAX_KEY_SIZE,
  645. .ivsize = AES_BLOCK_SIZE,
  646. .setkey = mtk_aes_setkey,
  647. .encrypt = mtk_aes_ctr_encrypt,
  648. .decrypt = mtk_aes_ctr_decrypt,
  649. }
  650. },
  651. };
  652. static inline struct mtk_aes_gcm_ctx *
  653. mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
  654. {
  655. return container_of(ctx, struct mtk_aes_gcm_ctx, base);
  656. }
  657. /* Initialize transform information of GCM mode */
  658. static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
  659. struct mtk_aes_rec *aes,
  660. size_t len)
  661. {
  662. struct aead_request *req = aead_request_cast(aes->areq);
  663. struct mtk_aes_base_ctx *ctx = aes->ctx;
  664. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  665. const u32 *iv = (const u32 *)req->iv;
  666. u32 *iv_state = ctx->tfm.state + ctx->keylen +
  667. SIZE_IN_WORDS(AES_BLOCK_SIZE);
  668. u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
  669. int i;
  670. ctx->ct_hdr = AES_CT_CTRL_HDR | len;
  671. ctx->ct.cmd[0] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
  672. ctx->ct.cmd[1] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
  673. ctx->ct.cmd[2] = AES_GCM_CMD2;
  674. ctx->ct.cmd[3] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
  675. if (aes->flags & AES_FLAGS_ENCRYPT) {
  676. ctx->ct.cmd[4] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
  677. ctx->ct_size = AES_CT_SIZE_GCM_OUT;
  678. ctx->tfm.ctrl[0] = AES_TFM_GCM_OUT;
  679. } else {
  680. ctx->ct.cmd[4] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
  681. ctx->ct.cmd[5] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
  682. ctx->ct_size = AES_CT_SIZE_GCM_IN;
  683. ctx->tfm.ctrl[0] = AES_TFM_GCM_IN;
  684. }
  685. if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128))
  686. ctx->tfm.ctrl[0] |= AES_TFM_128BITS;
  687. else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256))
  688. ctx->tfm.ctrl[0] |= AES_TFM_256BITS;
  689. else
  690. ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
  691. ctx->tfm.ctrl[0] |= AES_TFM_GHASH_DIG | AES_TFM_GHASH |
  692. AES_TFM_SIZE(ctx->keylen + SIZE_IN_WORDS(
  693. AES_BLOCK_SIZE + ivsize));
  694. ctx->tfm.ctrl[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE |
  695. AES_TFM_3IV | AES_TFM_ENC_HASH;
  696. for (i = 0; i < SIZE_IN_WORDS(ivsize); i++)
  697. iv_state[i] = cpu_to_le32(iv[i]);
  698. }
  699. static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  700. struct scatterlist *src, struct scatterlist *dst,
  701. size_t len)
  702. {
  703. bool src_aligned, dst_aligned;
  704. aes->src.sg = src;
  705. aes->dst.sg = dst;
  706. aes->real_dst = dst;
  707. src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
  708. if (src == dst)
  709. dst_aligned = src_aligned;
  710. else
  711. dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
  712. if (!src_aligned || !dst_aligned) {
  713. if (aes->total > AES_BUF_SIZE)
  714. return -ENOMEM;
  715. if (!src_aligned) {
  716. sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
  717. aes->src.sg = &aes->aligned_sg;
  718. aes->src.nents = 1;
  719. aes->src.remainder = 0;
  720. }
  721. if (!dst_aligned) {
  722. aes->dst.sg = &aes->aligned_sg;
  723. aes->dst.nents = 1;
  724. aes->dst.remainder = 0;
  725. }
  726. sg_init_table(&aes->aligned_sg, 1);
  727. sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
  728. }
  729. mtk_aes_gcm_info_init(cryp, aes, len);
  730. return mtk_aes_map(cryp, aes);
  731. }
  732. /* Todo: GMAC */
  733. static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  734. {
  735. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
  736. struct aead_request *req = aead_request_cast(aes->areq);
  737. struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
  738. u32 len = req->assoclen + req->cryptlen;
  739. mtk_aes_set_mode(aes, rctx);
  740. if (aes->flags & AES_FLAGS_ENCRYPT) {
  741. u32 tag[4];
  742. /* Compute total process length. */
  743. aes->total = len + gctx->authsize;
  744. /* Compute text length. */
  745. gctx->textlen = req->cryptlen;
  746. /* Hardware will append authenticated tag to output buffer */
  747. scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
  748. } else {
  749. aes->total = len;
  750. gctx->textlen = req->cryptlen - gctx->authsize;
  751. }
  752. aes->resume = mtk_aes_complete;
  753. return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
  754. }
  755. static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
  756. {
  757. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  758. struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
  759. rctx->mode = AES_FLAGS_GCM | mode;
  760. return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
  761. &req->base);
  762. }
  763. static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
  764. {
  765. struct mtk_aes_gcm_setkey_result *result = req->data;
  766. if (err == -EINPROGRESS)
  767. return;
  768. result->err = err;
  769. complete(&result->completion);
  770. }
  771. /*
  772. * Because of the hardware limitation, we need to pre-calculate key(H)
  773. * for the GHASH operation. The result of the encryption operation
  774. * need to be stored in the transform state buffer.
  775. */
  776. static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
  777. u32 keylen)
  778. {
  779. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
  780. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  781. struct crypto_skcipher *ctr = gctx->ctr;
  782. struct {
  783. u32 hash[4];
  784. u8 iv[8];
  785. struct mtk_aes_gcm_setkey_result result;
  786. struct scatterlist sg[1];
  787. struct skcipher_request req;
  788. } *data;
  789. const u32 *aes_key;
  790. u32 *key_state, *hash_state;
  791. int err, i;
  792. if (keylen != AES_KEYSIZE_256 &&
  793. keylen != AES_KEYSIZE_192 &&
  794. keylen != AES_KEYSIZE_128) {
  795. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  796. return -EINVAL;
  797. }
  798. key_state = ctx->tfm.state;
  799. aes_key = (u32 *)key;
  800. ctx->keylen = SIZE_IN_WORDS(keylen);
  801. for (i = 0; i < ctx->keylen; i++)
  802. ctx->tfm.state[i] = cpu_to_le32(aes_key[i]);
  803. /* Same as crypto_gcm_setkey() from crypto/gcm.c */
  804. crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
  805. crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
  806. CRYPTO_TFM_REQ_MASK);
  807. err = crypto_skcipher_setkey(ctr, key, keylen);
  808. crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
  809. CRYPTO_TFM_RES_MASK);
  810. if (err)
  811. return err;
  812. data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
  813. GFP_KERNEL);
  814. if (!data)
  815. return -ENOMEM;
  816. init_completion(&data->result.completion);
  817. sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
  818. skcipher_request_set_tfm(&data->req, ctr);
  819. skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
  820. CRYPTO_TFM_REQ_MAY_BACKLOG,
  821. mtk_gcm_setkey_done, &data->result);
  822. skcipher_request_set_crypt(&data->req, data->sg, data->sg,
  823. AES_BLOCK_SIZE, data->iv);
  824. err = crypto_skcipher_encrypt(&data->req);
  825. if (err == -EINPROGRESS || err == -EBUSY) {
  826. err = wait_for_completion_interruptible(
  827. &data->result.completion);
  828. if (!err)
  829. err = data->result.err;
  830. }
  831. if (err)
  832. goto out;
  833. hash_state = key_state + ctx->keylen;
  834. for (i = 0; i < 4; i++)
  835. hash_state[i] = cpu_to_be32(data->hash[i]);
  836. out:
  837. kzfree(data);
  838. return err;
  839. }
  840. static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
  841. u32 authsize)
  842. {
  843. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
  844. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  845. /* Same as crypto_gcm_authsize() from crypto/gcm.c */
  846. switch (authsize) {
  847. case 8:
  848. case 12:
  849. case 16:
  850. break;
  851. default:
  852. return -EINVAL;
  853. }
  854. gctx->authsize = authsize;
  855. return 0;
  856. }
  857. static int mtk_aes_gcm_encrypt(struct aead_request *req)
  858. {
  859. return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
  860. }
  861. static int mtk_aes_gcm_decrypt(struct aead_request *req)
  862. {
  863. return mtk_aes_gcm_crypt(req, 0);
  864. }
  865. static int mtk_aes_gcm_init(struct crypto_aead *aead)
  866. {
  867. struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
  868. struct mtk_cryp *cryp = NULL;
  869. cryp = mtk_aes_find_dev(&ctx->base);
  870. if (!cryp) {
  871. pr_err("can't find crypto device\n");
  872. return -ENODEV;
  873. }
  874. ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
  875. CRYPTO_ALG_ASYNC);
  876. if (IS_ERR(ctx->ctr)) {
  877. pr_err("Error allocating ctr(aes)\n");
  878. return PTR_ERR(ctx->ctr);
  879. }
  880. crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
  881. ctx->base.start = mtk_aes_gcm_start;
  882. return 0;
  883. }
  884. static void mtk_aes_gcm_exit(struct crypto_aead *aead)
  885. {
  886. struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
  887. crypto_free_skcipher(ctx->ctr);
  888. }
  889. static struct aead_alg aes_gcm_alg = {
  890. .setkey = mtk_aes_gcm_setkey,
  891. .setauthsize = mtk_aes_gcm_setauthsize,
  892. .encrypt = mtk_aes_gcm_encrypt,
  893. .decrypt = mtk_aes_gcm_decrypt,
  894. .init = mtk_aes_gcm_init,
  895. .exit = mtk_aes_gcm_exit,
  896. .ivsize = 12,
  897. .maxauthsize = AES_BLOCK_SIZE,
  898. .base = {
  899. .cra_name = "gcm(aes)",
  900. .cra_driver_name = "gcm-aes-mtk",
  901. .cra_priority = 400,
  902. .cra_flags = CRYPTO_ALG_ASYNC,
  903. .cra_blocksize = 1,
  904. .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
  905. .cra_alignmask = 0xf,
  906. .cra_module = THIS_MODULE,
  907. },
  908. };
  909. static void mtk_aes_enc_task(unsigned long data)
  910. {
  911. struct mtk_cryp *cryp = (struct mtk_cryp *)data;
  912. struct mtk_aes_rec *aes = cryp->aes[0];
  913. mtk_aes_unmap(cryp, aes);
  914. aes->resume(cryp, aes);
  915. }
  916. static void mtk_aes_dec_task(unsigned long data)
  917. {
  918. struct mtk_cryp *cryp = (struct mtk_cryp *)data;
  919. struct mtk_aes_rec *aes = cryp->aes[1];
  920. mtk_aes_unmap(cryp, aes);
  921. aes->resume(cryp, aes);
  922. }
  923. static irqreturn_t mtk_aes_enc_irq(int irq, void *dev_id)
  924. {
  925. struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
  926. struct mtk_aes_rec *aes = cryp->aes[0];
  927. u32 val = mtk_aes_read(cryp, RDR_STAT(RING0));
  928. mtk_aes_write(cryp, RDR_STAT(RING0), val);
  929. if (likely(AES_FLAGS_BUSY & aes->flags)) {
  930. mtk_aes_write(cryp, RDR_PROC_COUNT(RING0), MTK_CNT_RST);
  931. mtk_aes_write(cryp, RDR_THRESH(RING0),
  932. MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
  933. tasklet_schedule(&aes->task);
  934. } else {
  935. dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
  936. }
  937. return IRQ_HANDLED;
  938. }
  939. static irqreturn_t mtk_aes_dec_irq(int irq, void *dev_id)
  940. {
  941. struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
  942. struct mtk_aes_rec *aes = cryp->aes[1];
  943. u32 val = mtk_aes_read(cryp, RDR_STAT(RING1));
  944. mtk_aes_write(cryp, RDR_STAT(RING1), val);
  945. if (likely(AES_FLAGS_BUSY & aes->flags)) {
  946. mtk_aes_write(cryp, RDR_PROC_COUNT(RING1), MTK_CNT_RST);
  947. mtk_aes_write(cryp, RDR_THRESH(RING1),
  948. MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
  949. tasklet_schedule(&aes->task);
  950. } else {
  951. dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
  952. }
  953. return IRQ_HANDLED;
  954. }
  955. /*
  956. * The purpose of creating encryption and decryption records is
  957. * to process outbound/inbound data in parallel, it can improve
  958. * performance in most use cases, such as IPSec VPN, especially
  959. * under heavy network traffic.
  960. */
  961. static int mtk_aes_record_init(struct mtk_cryp *cryp)
  962. {
  963. struct mtk_aes_rec **aes = cryp->aes;
  964. int i, err = -ENOMEM;
  965. for (i = 0; i < MTK_REC_NUM; i++) {
  966. aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
  967. if (!aes[i])
  968. goto err_cleanup;
  969. aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
  970. AES_BUF_ORDER);
  971. if (!aes[i]->buf)
  972. goto err_cleanup;
  973. aes[i]->id = i;
  974. spin_lock_init(&aes[i]->lock);
  975. crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
  976. }
  977. tasklet_init(&aes[0]->task, mtk_aes_enc_task, (unsigned long)cryp);
  978. tasklet_init(&aes[1]->task, mtk_aes_dec_task, (unsigned long)cryp);
  979. return 0;
  980. err_cleanup:
  981. for (; i--; ) {
  982. free_page((unsigned long)aes[i]->buf);
  983. kfree(aes[i]);
  984. }
  985. return err;
  986. }
  987. static void mtk_aes_record_free(struct mtk_cryp *cryp)
  988. {
  989. int i;
  990. for (i = 0; i < MTK_REC_NUM; i++) {
  991. tasklet_kill(&cryp->aes[i]->task);
  992. free_page((unsigned long)cryp->aes[i]->buf);
  993. kfree(cryp->aes[i]);
  994. }
  995. }
  996. static void mtk_aes_unregister_algs(void)
  997. {
  998. int i;
  999. crypto_unregister_aead(&aes_gcm_alg);
  1000. for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
  1001. crypto_unregister_alg(&aes_algs[i]);
  1002. }
  1003. static int mtk_aes_register_algs(void)
  1004. {
  1005. int err, i;
  1006. for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
  1007. err = crypto_register_alg(&aes_algs[i]);
  1008. if (err)
  1009. goto err_aes_algs;
  1010. }
  1011. err = crypto_register_aead(&aes_gcm_alg);
  1012. if (err)
  1013. goto err_aes_algs;
  1014. return 0;
  1015. err_aes_algs:
  1016. for (; i--; )
  1017. crypto_unregister_alg(&aes_algs[i]);
  1018. return err;
  1019. }
  1020. int mtk_cipher_alg_register(struct mtk_cryp *cryp)
  1021. {
  1022. int ret;
  1023. INIT_LIST_HEAD(&cryp->aes_list);
  1024. /* Initialize two cipher records */
  1025. ret = mtk_aes_record_init(cryp);
  1026. if (ret)
  1027. goto err_record;
  1028. /* Ring0 is use by encryption record */
  1029. ret = devm_request_irq(cryp->dev, cryp->irq[RING0], mtk_aes_enc_irq,
  1030. IRQF_TRIGGER_LOW, "mtk-aes", cryp);
  1031. if (ret) {
  1032. dev_err(cryp->dev, "unable to request AES encryption irq.\n");
  1033. goto err_res;
  1034. }
  1035. /* Ring1 is use by decryption record */
  1036. ret = devm_request_irq(cryp->dev, cryp->irq[RING1], mtk_aes_dec_irq,
  1037. IRQF_TRIGGER_LOW, "mtk-aes", cryp);
  1038. if (ret) {
  1039. dev_err(cryp->dev, "unable to request AES decryption irq.\n");
  1040. goto err_res;
  1041. }
  1042. /* Enable ring0 and ring1 interrupt */
  1043. mtk_aes_write(cryp, AIC_ENABLE_SET(RING0), MTK_IRQ_RDR0);
  1044. mtk_aes_write(cryp, AIC_ENABLE_SET(RING1), MTK_IRQ_RDR1);
  1045. spin_lock(&mtk_aes.lock);
  1046. list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
  1047. spin_unlock(&mtk_aes.lock);
  1048. ret = mtk_aes_register_algs();
  1049. if (ret)
  1050. goto err_algs;
  1051. return 0;
  1052. err_algs:
  1053. spin_lock(&mtk_aes.lock);
  1054. list_del(&cryp->aes_list);
  1055. spin_unlock(&mtk_aes.lock);
  1056. err_res:
  1057. mtk_aes_record_free(cryp);
  1058. err_record:
  1059. dev_err(cryp->dev, "mtk-aes initialization failed.\n");
  1060. return ret;
  1061. }
  1062. void mtk_cipher_alg_release(struct mtk_cryp *cryp)
  1063. {
  1064. spin_lock(&mtk_aes.lock);
  1065. list_del(&cryp->aes_list);
  1066. spin_unlock(&mtk_aes.lock);
  1067. mtk_aes_unregister_algs();
  1068. mtk_aes_record_free(cryp);
  1069. }