mtk-aes.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Driver for EIP97 AES acceleration.
  5. *
  6. * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Some ideas are from atmel-aes.c drivers.
  13. */
  14. #include <crypto/aes.h>
  15. #include "mtk-platform.h"
  16. #define AES_QUEUE_SIZE 512
  17. #define AES_BUF_ORDER 2
  18. #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
  19. & ~(AES_BLOCK_SIZE - 1))
  20. #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
  21. AES_BLOCK_SIZE * 2)
  22. #define AES_MAX_CT_SIZE 6
  23. #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
  24. /* AES-CBC/ECB/CTR command token */
  25. #define AES_CMD0 cpu_to_le32(0x05000000)
  26. #define AES_CMD1 cpu_to_le32(0x2d060000)
  27. #define AES_CMD2 cpu_to_le32(0xe4a63806)
  28. /* AES-GCM command token */
  29. #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
  30. #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
  31. #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
  32. #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
  33. #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
  34. #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
  35. #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
  36. /* AES transform information word 0 fields */
  37. #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
  38. #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
  39. #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
  40. #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
  41. #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
  42. #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
  43. #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
  44. #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
  45. #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
  46. #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
  47. /* AES transform information word 1 fields */
  48. #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
  49. #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
  50. #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
  51. #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
  52. #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
  53. #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
  54. #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
  55. #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
  56. /* AES flags */
  57. #define AES_FLAGS_CIPHER_MSK GENMASK(2, 0)
  58. #define AES_FLAGS_ECB BIT(0)
  59. #define AES_FLAGS_CBC BIT(1)
  60. #define AES_FLAGS_CTR BIT(2)
  61. #define AES_FLAGS_GCM BIT(3)
  62. #define AES_FLAGS_ENCRYPT BIT(4)
  63. #define AES_FLAGS_BUSY BIT(5)
  64. #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
  65. /**
  66. * mtk_aes_info - hardware information of AES
  67. * @cmd: command token, hardware instruction
  68. * @tfm: transform state of cipher algorithm.
  69. * @state: contains keys and initial vectors.
  70. *
  71. * Memory layout of GCM buffer:
  72. * /-----------\
  73. * | AES KEY | 128/196/256 bits
  74. * |-----------|
  75. * | HASH KEY | a string 128 zero bits encrypted using the block cipher
  76. * |-----------|
  77. * | IVs | 4 * 4 bytes
  78. * \-----------/
  79. *
  80. * The engine requires all these info to do:
  81. * - Commands decoding and control of the engine's data path.
  82. * - Coordinating hardware data fetch and store operations.
  83. * - Result token construction and output.
  84. */
  85. struct mtk_aes_info {
  86. __le32 cmd[AES_MAX_CT_SIZE];
  87. __le32 tfm[2];
  88. __le32 state[AES_MAX_STATE_BUF_SIZE];
  89. };
  90. struct mtk_aes_reqctx {
  91. u64 mode;
  92. };
  93. struct mtk_aes_base_ctx {
  94. struct mtk_cryp *cryp;
  95. u32 keylen;
  96. __le32 keymode;
  97. mtk_aes_fn start;
  98. struct mtk_aes_info info;
  99. dma_addr_t ct_dma;
  100. dma_addr_t tfm_dma;
  101. __le32 ct_hdr;
  102. u32 ct_size;
  103. };
  104. struct mtk_aes_ctx {
  105. struct mtk_aes_base_ctx base;
  106. };
  107. struct mtk_aes_ctr_ctx {
  108. struct mtk_aes_base_ctx base;
  109. u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
  110. size_t offset;
  111. struct scatterlist src[2];
  112. struct scatterlist dst[2];
  113. };
  114. struct mtk_aes_gcm_ctx {
  115. struct mtk_aes_base_ctx base;
  116. u32 authsize;
  117. size_t textlen;
  118. struct crypto_skcipher *ctr;
  119. };
  120. struct mtk_aes_gcm_setkey_result {
  121. int err;
  122. struct completion completion;
  123. };
  124. struct mtk_aes_drv {
  125. struct list_head dev_list;
  126. /* Device list lock */
  127. spinlock_t lock;
  128. };
  129. static struct mtk_aes_drv mtk_aes = {
  130. .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
  131. .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
  132. };
  133. static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
  134. {
  135. return readl_relaxed(cryp->base + offset);
  136. }
  137. static inline void mtk_aes_write(struct mtk_cryp *cryp,
  138. u32 offset, u32 value)
  139. {
  140. writel_relaxed(value, cryp->base + offset);
  141. }
  142. static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
  143. {
  144. struct mtk_cryp *cryp = NULL;
  145. struct mtk_cryp *tmp;
  146. spin_lock_bh(&mtk_aes.lock);
  147. if (!ctx->cryp) {
  148. list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
  149. cryp = tmp;
  150. break;
  151. }
  152. ctx->cryp = cryp;
  153. } else {
  154. cryp = ctx->cryp;
  155. }
  156. spin_unlock_bh(&mtk_aes.lock);
  157. return cryp;
  158. }
  159. static inline size_t mtk_aes_padlen(size_t len)
  160. {
  161. len &= AES_BLOCK_SIZE - 1;
  162. return len ? AES_BLOCK_SIZE - len : 0;
  163. }
  164. static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
  165. struct mtk_aes_dma *dma)
  166. {
  167. int nents;
  168. if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
  169. return false;
  170. for (nents = 0; sg; sg = sg_next(sg), ++nents) {
  171. if (!IS_ALIGNED(sg->offset, sizeof(u32)))
  172. return false;
  173. if (len <= sg->length) {
  174. if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
  175. return false;
  176. dma->nents = nents + 1;
  177. dma->remainder = sg->length - len;
  178. sg->length = len;
  179. return true;
  180. }
  181. if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
  182. return false;
  183. len -= sg->length;
  184. }
  185. return false;
  186. }
  187. static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
  188. const struct mtk_aes_reqctx *rctx)
  189. {
  190. /* Clear all but persistent flags and set request flags. */
  191. aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
  192. }
  193. static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
  194. {
  195. struct scatterlist *sg = dma->sg;
  196. int nents = dma->nents;
  197. if (!dma->remainder)
  198. return;
  199. while (--nents > 0 && sg)
  200. sg = sg_next(sg);
  201. if (!sg)
  202. return;
  203. sg->length += dma->remainder;
  204. }
  205. static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
  206. {
  207. int i;
  208. for (i = 0; i < SIZE_IN_WORDS(size); i++)
  209. dst[i] = cpu_to_le32(src[i]);
  210. }
  211. static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
  212. {
  213. int i;
  214. for (i = 0; i < SIZE_IN_WORDS(size); i++)
  215. dst[i] = cpu_to_be32(src[i]);
  216. }
  217. static inline int mtk_aes_complete(struct mtk_cryp *cryp,
  218. struct mtk_aes_rec *aes,
  219. int err)
  220. {
  221. aes->flags &= ~AES_FLAGS_BUSY;
  222. aes->areq->complete(aes->areq, err);
  223. /* Handle new request */
  224. tasklet_schedule(&aes->queue_task);
  225. return err;
  226. }
  227. /*
  228. * Write descriptors for processing. This will configure the engine, load
  229. * the transform information and then start the packet processing.
  230. */
  231. static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  232. {
  233. struct mtk_ring *ring = cryp->ring[aes->id];
  234. struct mtk_desc *cmd = NULL, *res = NULL;
  235. struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
  236. u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
  237. int nents;
  238. /* Write command descriptors */
  239. for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
  240. cmd = ring->cmd_next;
  241. cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
  242. cmd->buf = cpu_to_le32(sg_dma_address(ssg));
  243. if (nents == 0) {
  244. cmd->hdr |= MTK_DESC_FIRST |
  245. MTK_DESC_CT_LEN(aes->ctx->ct_size);
  246. cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
  247. cmd->ct_hdr = aes->ctx->ct_hdr;
  248. cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
  249. }
  250. /* Shift ring buffer and check boundary */
  251. if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
  252. ring->cmd_next = ring->cmd_base;
  253. }
  254. cmd->hdr |= MTK_DESC_LAST;
  255. /* Prepare result descriptors */
  256. for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
  257. res = ring->res_next;
  258. res->hdr = MTK_DESC_BUF_LEN(dsg->length);
  259. res->buf = cpu_to_le32(sg_dma_address(dsg));
  260. if (nents == 0)
  261. res->hdr |= MTK_DESC_FIRST;
  262. /* Shift ring buffer and check boundary */
  263. if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
  264. ring->res_next = ring->res_base;
  265. }
  266. res->hdr |= MTK_DESC_LAST;
  267. /* Pointer to current result descriptor */
  268. ring->res_prev = res;
  269. /* Prepare enough space for authenticated tag */
  270. if (aes->flags & AES_FLAGS_GCM)
  271. res->hdr += AES_BLOCK_SIZE;
  272. /*
  273. * Make sure that all changes to the DMA ring are done before we
  274. * start engine.
  275. */
  276. wmb();
  277. /* Start DMA transfer */
  278. mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
  279. mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
  280. return -EINPROGRESS;
  281. }
  282. static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  283. {
  284. struct mtk_aes_base_ctx *ctx = aes->ctx;
  285. dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
  286. DMA_TO_DEVICE);
  287. if (aes->src.sg == aes->dst.sg) {
  288. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  289. DMA_BIDIRECTIONAL);
  290. if (aes->src.sg != &aes->aligned_sg)
  291. mtk_aes_restore_sg(&aes->src);
  292. } else {
  293. dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
  294. DMA_FROM_DEVICE);
  295. if (aes->dst.sg != &aes->aligned_sg)
  296. mtk_aes_restore_sg(&aes->dst);
  297. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  298. DMA_TO_DEVICE);
  299. if (aes->src.sg != &aes->aligned_sg)
  300. mtk_aes_restore_sg(&aes->src);
  301. }
  302. if (aes->dst.sg == &aes->aligned_sg)
  303. sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
  304. aes->buf, aes->total);
  305. }
  306. static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  307. {
  308. struct mtk_aes_base_ctx *ctx = aes->ctx;
  309. struct mtk_aes_info *info = &ctx->info;
  310. ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
  311. DMA_TO_DEVICE);
  312. if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
  313. goto exit;
  314. ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
  315. if (aes->src.sg == aes->dst.sg) {
  316. aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
  317. aes->src.nents,
  318. DMA_BIDIRECTIONAL);
  319. aes->dst.sg_len = aes->src.sg_len;
  320. if (unlikely(!aes->src.sg_len))
  321. goto sg_map_err;
  322. } else {
  323. aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
  324. aes->src.nents, DMA_TO_DEVICE);
  325. if (unlikely(!aes->src.sg_len))
  326. goto sg_map_err;
  327. aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
  328. aes->dst.nents, DMA_FROM_DEVICE);
  329. if (unlikely(!aes->dst.sg_len)) {
  330. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  331. DMA_TO_DEVICE);
  332. goto sg_map_err;
  333. }
  334. }
  335. return mtk_aes_xmit(cryp, aes);
  336. sg_map_err:
  337. dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
  338. exit:
  339. return mtk_aes_complete(cryp, aes, -EINVAL);
  340. }
  341. /* Initialize transform information of CBC/ECB/CTR mode */
  342. static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  343. size_t len)
  344. {
  345. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  346. struct mtk_aes_base_ctx *ctx = aes->ctx;
  347. struct mtk_aes_info *info = &ctx->info;
  348. u32 cnt = 0;
  349. ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
  350. info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
  351. info->cmd[cnt++] = AES_CMD1;
  352. info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
  353. if (aes->flags & AES_FLAGS_ENCRYPT)
  354. info->tfm[0] |= AES_TFM_BASIC_OUT;
  355. else
  356. info->tfm[0] |= AES_TFM_BASIC_IN;
  357. switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
  358. case AES_FLAGS_CBC:
  359. info->tfm[1] = AES_TFM_CBC;
  360. break;
  361. case AES_FLAGS_ECB:
  362. info->tfm[1] = AES_TFM_ECB;
  363. goto ecb;
  364. case AES_FLAGS_CTR:
  365. info->tfm[1] = AES_TFM_CTR_LOAD;
  366. goto ctr;
  367. default:
  368. /* Should not happen... */
  369. return;
  370. }
  371. mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
  372. AES_BLOCK_SIZE);
  373. ctr:
  374. info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
  375. info->tfm[1] |= AES_TFM_FULL_IV;
  376. info->cmd[cnt++] = AES_CMD2;
  377. ecb:
  378. ctx->ct_size = cnt;
  379. }
  380. static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  381. struct scatterlist *src, struct scatterlist *dst,
  382. size_t len)
  383. {
  384. size_t padlen = 0;
  385. bool src_aligned, dst_aligned;
  386. aes->total = len;
  387. aes->src.sg = src;
  388. aes->dst.sg = dst;
  389. aes->real_dst = dst;
  390. src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
  391. if (src == dst)
  392. dst_aligned = src_aligned;
  393. else
  394. dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
  395. if (!src_aligned || !dst_aligned) {
  396. padlen = mtk_aes_padlen(len);
  397. if (len + padlen > AES_BUF_SIZE)
  398. return mtk_aes_complete(cryp, aes, -ENOMEM);
  399. if (!src_aligned) {
  400. sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
  401. aes->src.sg = &aes->aligned_sg;
  402. aes->src.nents = 1;
  403. aes->src.remainder = 0;
  404. }
  405. if (!dst_aligned) {
  406. aes->dst.sg = &aes->aligned_sg;
  407. aes->dst.nents = 1;
  408. aes->dst.remainder = 0;
  409. }
  410. sg_init_table(&aes->aligned_sg, 1);
  411. sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
  412. }
  413. mtk_aes_info_init(cryp, aes, len + padlen);
  414. return mtk_aes_map(cryp, aes);
  415. }
  416. static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
  417. struct crypto_async_request *new_areq)
  418. {
  419. struct mtk_aes_rec *aes = cryp->aes[id];
  420. struct crypto_async_request *areq, *backlog;
  421. struct mtk_aes_base_ctx *ctx;
  422. unsigned long flags;
  423. int ret = 0;
  424. spin_lock_irqsave(&aes->lock, flags);
  425. if (new_areq)
  426. ret = crypto_enqueue_request(&aes->queue, new_areq);
  427. if (aes->flags & AES_FLAGS_BUSY) {
  428. spin_unlock_irqrestore(&aes->lock, flags);
  429. return ret;
  430. }
  431. backlog = crypto_get_backlog(&aes->queue);
  432. areq = crypto_dequeue_request(&aes->queue);
  433. if (areq)
  434. aes->flags |= AES_FLAGS_BUSY;
  435. spin_unlock_irqrestore(&aes->lock, flags);
  436. if (!areq)
  437. return ret;
  438. if (backlog)
  439. backlog->complete(backlog, -EINPROGRESS);
  440. ctx = crypto_tfm_ctx(areq->tfm);
  441. aes->areq = areq;
  442. aes->ctx = ctx;
  443. return ctx->start(cryp, aes);
  444. }
  445. static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
  446. struct mtk_aes_rec *aes)
  447. {
  448. return mtk_aes_complete(cryp, aes, 0);
  449. }
  450. static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  451. {
  452. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  453. struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  454. mtk_aes_set_mode(aes, rctx);
  455. aes->resume = mtk_aes_transfer_complete;
  456. return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
  457. }
  458. static inline struct mtk_aes_ctr_ctx *
  459. mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
  460. {
  461. return container_of(ctx, struct mtk_aes_ctr_ctx, base);
  462. }
  463. static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  464. {
  465. struct mtk_aes_base_ctx *ctx = aes->ctx;
  466. struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
  467. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  468. struct scatterlist *src, *dst;
  469. u32 start, end, ctr, blocks;
  470. size_t datalen;
  471. bool fragmented = false;
  472. /* Check for transfer completion. */
  473. cctx->offset += aes->total;
  474. if (cctx->offset >= req->nbytes)
  475. return mtk_aes_transfer_complete(cryp, aes);
  476. /* Compute data length. */
  477. datalen = req->nbytes - cctx->offset;
  478. blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
  479. ctr = be32_to_cpu(cctx->iv[3]);
  480. /* Check 32bit counter overflow. */
  481. start = ctr;
  482. end = start + blocks - 1;
  483. if (end < start) {
  484. ctr |= 0xffffffff;
  485. datalen = AES_BLOCK_SIZE * -start;
  486. fragmented = true;
  487. }
  488. /* Jump to offset. */
  489. src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
  490. dst = ((req->src == req->dst) ? src :
  491. scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
  492. /* Write IVs into transform state buffer. */
  493. mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
  494. AES_BLOCK_SIZE);
  495. if (unlikely(fragmented)) {
  496. /*
  497. * Increment the counter manually to cope with the hardware
  498. * counter overflow.
  499. */
  500. cctx->iv[3] = cpu_to_be32(ctr);
  501. crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
  502. }
  503. return mtk_aes_dma(cryp, aes, src, dst, datalen);
  504. }
  505. static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  506. {
  507. struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
  508. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  509. struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  510. mtk_aes_set_mode(aes, rctx);
  511. memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
  512. cctx->offset = 0;
  513. aes->total = 0;
  514. aes->resume = mtk_aes_ctr_transfer;
  515. return mtk_aes_ctr_transfer(cryp, aes);
  516. }
  517. /* Check and set the AES key to transform state buffer */
  518. static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
  519. const u8 *key, u32 keylen)
  520. {
  521. struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  522. switch (keylen) {
  523. case AES_KEYSIZE_128:
  524. ctx->keymode = AES_TFM_128BITS;
  525. break;
  526. case AES_KEYSIZE_192:
  527. ctx->keymode = AES_TFM_192BITS;
  528. break;
  529. case AES_KEYSIZE_256:
  530. ctx->keymode = AES_TFM_256BITS;
  531. break;
  532. default:
  533. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  534. return -EINVAL;
  535. }
  536. ctx->keylen = SIZE_IN_WORDS(keylen);
  537. mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
  538. return 0;
  539. }
  540. static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
  541. {
  542. struct mtk_aes_base_ctx *ctx;
  543. struct mtk_aes_reqctx *rctx;
  544. ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
  545. rctx = ablkcipher_request_ctx(req);
  546. rctx->mode = mode;
  547. return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT),
  548. &req->base);
  549. }
  550. static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
  551. {
  552. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
  553. }
  554. static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
  555. {
  556. return mtk_aes_crypt(req, AES_FLAGS_ECB);
  557. }
  558. static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
  559. {
  560. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
  561. }
  562. static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
  563. {
  564. return mtk_aes_crypt(req, AES_FLAGS_CBC);
  565. }
  566. static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
  567. {
  568. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
  569. }
  570. static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
  571. {
  572. return mtk_aes_crypt(req, AES_FLAGS_CTR);
  573. }
  574. static int mtk_aes_cra_init(struct crypto_tfm *tfm)
  575. {
  576. struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  577. struct mtk_cryp *cryp = NULL;
  578. cryp = mtk_aes_find_dev(&ctx->base);
  579. if (!cryp) {
  580. pr_err("can't find crypto device\n");
  581. return -ENODEV;
  582. }
  583. tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
  584. ctx->base.start = mtk_aes_start;
  585. return 0;
  586. }
  587. static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
  588. {
  589. struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  590. struct mtk_cryp *cryp = NULL;
  591. cryp = mtk_aes_find_dev(&ctx->base);
  592. if (!cryp) {
  593. pr_err("can't find crypto device\n");
  594. return -ENODEV;
  595. }
  596. tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
  597. ctx->base.start = mtk_aes_ctr_start;
  598. return 0;
  599. }
  600. static struct crypto_alg aes_algs[] = {
  601. {
  602. .cra_name = "cbc(aes)",
  603. .cra_driver_name = "cbc-aes-mtk",
  604. .cra_priority = 400,
  605. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  606. CRYPTO_ALG_ASYNC,
  607. .cra_init = mtk_aes_cra_init,
  608. .cra_blocksize = AES_BLOCK_SIZE,
  609. .cra_ctxsize = sizeof(struct mtk_aes_ctx),
  610. .cra_alignmask = 0xf,
  611. .cra_type = &crypto_ablkcipher_type,
  612. .cra_module = THIS_MODULE,
  613. .cra_u.ablkcipher = {
  614. .min_keysize = AES_MIN_KEY_SIZE,
  615. .max_keysize = AES_MAX_KEY_SIZE,
  616. .setkey = mtk_aes_setkey,
  617. .encrypt = mtk_aes_cbc_encrypt,
  618. .decrypt = mtk_aes_cbc_decrypt,
  619. .ivsize = AES_BLOCK_SIZE,
  620. }
  621. },
  622. {
  623. .cra_name = "ecb(aes)",
  624. .cra_driver_name = "ecb-aes-mtk",
  625. .cra_priority = 400,
  626. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  627. CRYPTO_ALG_ASYNC,
  628. .cra_init = mtk_aes_cra_init,
  629. .cra_blocksize = AES_BLOCK_SIZE,
  630. .cra_ctxsize = sizeof(struct mtk_aes_ctx),
  631. .cra_alignmask = 0xf,
  632. .cra_type = &crypto_ablkcipher_type,
  633. .cra_module = THIS_MODULE,
  634. .cra_u.ablkcipher = {
  635. .min_keysize = AES_MIN_KEY_SIZE,
  636. .max_keysize = AES_MAX_KEY_SIZE,
  637. .setkey = mtk_aes_setkey,
  638. .encrypt = mtk_aes_ecb_encrypt,
  639. .decrypt = mtk_aes_ecb_decrypt,
  640. }
  641. },
  642. {
  643. .cra_name = "ctr(aes)",
  644. .cra_driver_name = "ctr-aes-mtk",
  645. .cra_priority = 400,
  646. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  647. CRYPTO_ALG_ASYNC,
  648. .cra_init = mtk_aes_ctr_cra_init,
  649. .cra_blocksize = 1,
  650. .cra_ctxsize = sizeof(struct mtk_aes_ctr_ctx),
  651. .cra_alignmask = 0xf,
  652. .cra_type = &crypto_ablkcipher_type,
  653. .cra_module = THIS_MODULE,
  654. .cra_u.ablkcipher = {
  655. .min_keysize = AES_MIN_KEY_SIZE,
  656. .max_keysize = AES_MAX_KEY_SIZE,
  657. .ivsize = AES_BLOCK_SIZE,
  658. .setkey = mtk_aes_setkey,
  659. .encrypt = mtk_aes_ctr_encrypt,
  660. .decrypt = mtk_aes_ctr_decrypt,
  661. }
  662. },
  663. };
  664. static inline struct mtk_aes_gcm_ctx *
  665. mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
  666. {
  667. return container_of(ctx, struct mtk_aes_gcm_ctx, base);
  668. }
  669. /*
  670. * Engine will verify and compare tag automatically, so we just need
  671. * to check returned status which stored in the result descriptor.
  672. */
  673. static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
  674. struct mtk_aes_rec *aes)
  675. {
  676. u32 status = cryp->ring[aes->id]->res_prev->ct;
  677. return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
  678. -EBADMSG : 0);
  679. }
  680. /* Initialize transform information of GCM mode */
  681. static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
  682. struct mtk_aes_rec *aes,
  683. size_t len)
  684. {
  685. struct aead_request *req = aead_request_cast(aes->areq);
  686. struct mtk_aes_base_ctx *ctx = aes->ctx;
  687. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  688. struct mtk_aes_info *info = &ctx->info;
  689. u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
  690. u32 cnt = 0;
  691. ctx->ct_hdr = AES_CT_CTRL_HDR | len;
  692. info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
  693. info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
  694. info->cmd[cnt++] = AES_GCM_CMD2;
  695. info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
  696. if (aes->flags & AES_FLAGS_ENCRYPT) {
  697. info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
  698. info->tfm[0] = AES_TFM_GCM_OUT;
  699. } else {
  700. info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
  701. info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
  702. info->tfm[0] = AES_TFM_GCM_IN;
  703. }
  704. ctx->ct_size = cnt;
  705. info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
  706. ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
  707. ctx->keymode;
  708. info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
  709. AES_TFM_ENC_HASH;
  710. mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
  711. AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
  712. }
  713. static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  714. struct scatterlist *src, struct scatterlist *dst,
  715. size_t len)
  716. {
  717. bool src_aligned, dst_aligned;
  718. aes->src.sg = src;
  719. aes->dst.sg = dst;
  720. aes->real_dst = dst;
  721. src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
  722. if (src == dst)
  723. dst_aligned = src_aligned;
  724. else
  725. dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
  726. if (!src_aligned || !dst_aligned) {
  727. if (aes->total > AES_BUF_SIZE)
  728. return mtk_aes_complete(cryp, aes, -ENOMEM);
  729. if (!src_aligned) {
  730. sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
  731. aes->src.sg = &aes->aligned_sg;
  732. aes->src.nents = 1;
  733. aes->src.remainder = 0;
  734. }
  735. if (!dst_aligned) {
  736. aes->dst.sg = &aes->aligned_sg;
  737. aes->dst.nents = 1;
  738. aes->dst.remainder = 0;
  739. }
  740. sg_init_table(&aes->aligned_sg, 1);
  741. sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
  742. }
  743. mtk_aes_gcm_info_init(cryp, aes, len);
  744. return mtk_aes_map(cryp, aes);
  745. }
  746. /* Todo: GMAC */
  747. static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  748. {
  749. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
  750. struct aead_request *req = aead_request_cast(aes->areq);
  751. struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
  752. u32 len = req->assoclen + req->cryptlen;
  753. mtk_aes_set_mode(aes, rctx);
  754. if (aes->flags & AES_FLAGS_ENCRYPT) {
  755. u32 tag[4];
  756. aes->resume = mtk_aes_transfer_complete;
  757. /* Compute total process length. */
  758. aes->total = len + gctx->authsize;
  759. /* Compute text length. */
  760. gctx->textlen = req->cryptlen;
  761. /* Hardware will append authenticated tag to output buffer */
  762. scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
  763. } else {
  764. aes->resume = mtk_aes_gcm_tag_verify;
  765. aes->total = len;
  766. gctx->textlen = req->cryptlen - gctx->authsize;
  767. }
  768. return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
  769. }
  770. static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
  771. {
  772. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  773. struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
  774. rctx->mode = AES_FLAGS_GCM | mode;
  775. return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
  776. &req->base);
  777. }
  778. static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
  779. {
  780. struct mtk_aes_gcm_setkey_result *result = req->data;
  781. if (err == -EINPROGRESS)
  782. return;
  783. result->err = err;
  784. complete(&result->completion);
  785. }
  786. /*
  787. * Because of the hardware limitation, we need to pre-calculate key(H)
  788. * for the GHASH operation. The result of the encryption operation
  789. * need to be stored in the transform state buffer.
  790. */
  791. static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
  792. u32 keylen)
  793. {
  794. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
  795. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  796. struct crypto_skcipher *ctr = gctx->ctr;
  797. struct {
  798. u32 hash[4];
  799. u8 iv[8];
  800. struct mtk_aes_gcm_setkey_result result;
  801. struct scatterlist sg[1];
  802. struct skcipher_request req;
  803. } *data;
  804. int err;
  805. switch (keylen) {
  806. case AES_KEYSIZE_128:
  807. ctx->keymode = AES_TFM_128BITS;
  808. break;
  809. case AES_KEYSIZE_192:
  810. ctx->keymode = AES_TFM_192BITS;
  811. break;
  812. case AES_KEYSIZE_256:
  813. ctx->keymode = AES_TFM_256BITS;
  814. break;
  815. default:
  816. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  817. return -EINVAL;
  818. }
  819. ctx->keylen = SIZE_IN_WORDS(keylen);
  820. /* Same as crypto_gcm_setkey() from crypto/gcm.c */
  821. crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
  822. crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
  823. CRYPTO_TFM_REQ_MASK);
  824. err = crypto_skcipher_setkey(ctr, key, keylen);
  825. crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
  826. CRYPTO_TFM_RES_MASK);
  827. if (err)
  828. return err;
  829. data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
  830. GFP_KERNEL);
  831. if (!data)
  832. return -ENOMEM;
  833. init_completion(&data->result.completion);
  834. sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
  835. skcipher_request_set_tfm(&data->req, ctr);
  836. skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
  837. CRYPTO_TFM_REQ_MAY_BACKLOG,
  838. mtk_gcm_setkey_done, &data->result);
  839. skcipher_request_set_crypt(&data->req, data->sg, data->sg,
  840. AES_BLOCK_SIZE, data->iv);
  841. err = crypto_skcipher_encrypt(&data->req);
  842. if (err == -EINPROGRESS || err == -EBUSY) {
  843. err = wait_for_completion_interruptible(
  844. &data->result.completion);
  845. if (!err)
  846. err = data->result.err;
  847. }
  848. if (err)
  849. goto out;
  850. /* Write key into state buffer */
  851. mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
  852. /* Write key(H) into state buffer */
  853. mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash,
  854. AES_BLOCK_SIZE);
  855. out:
  856. kzfree(data);
  857. return err;
  858. }
  859. static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
  860. u32 authsize)
  861. {
  862. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
  863. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  864. /* Same as crypto_gcm_authsize() from crypto/gcm.c */
  865. switch (authsize) {
  866. case 8:
  867. case 12:
  868. case 16:
  869. break;
  870. default:
  871. return -EINVAL;
  872. }
  873. gctx->authsize = authsize;
  874. return 0;
  875. }
  876. static int mtk_aes_gcm_encrypt(struct aead_request *req)
  877. {
  878. return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
  879. }
  880. static int mtk_aes_gcm_decrypt(struct aead_request *req)
  881. {
  882. return mtk_aes_gcm_crypt(req, 0);
  883. }
  884. static int mtk_aes_gcm_init(struct crypto_aead *aead)
  885. {
  886. struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
  887. struct mtk_cryp *cryp = NULL;
  888. cryp = mtk_aes_find_dev(&ctx->base);
  889. if (!cryp) {
  890. pr_err("can't find crypto device\n");
  891. return -ENODEV;
  892. }
  893. ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
  894. CRYPTO_ALG_ASYNC);
  895. if (IS_ERR(ctx->ctr)) {
  896. pr_err("Error allocating ctr(aes)\n");
  897. return PTR_ERR(ctx->ctr);
  898. }
  899. crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
  900. ctx->base.start = mtk_aes_gcm_start;
  901. return 0;
  902. }
  903. static void mtk_aes_gcm_exit(struct crypto_aead *aead)
  904. {
  905. struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
  906. crypto_free_skcipher(ctx->ctr);
  907. }
  908. static struct aead_alg aes_gcm_alg = {
  909. .setkey = mtk_aes_gcm_setkey,
  910. .setauthsize = mtk_aes_gcm_setauthsize,
  911. .encrypt = mtk_aes_gcm_encrypt,
  912. .decrypt = mtk_aes_gcm_decrypt,
  913. .init = mtk_aes_gcm_init,
  914. .exit = mtk_aes_gcm_exit,
  915. .ivsize = 12,
  916. .maxauthsize = AES_BLOCK_SIZE,
  917. .base = {
  918. .cra_name = "gcm(aes)",
  919. .cra_driver_name = "gcm-aes-mtk",
  920. .cra_priority = 400,
  921. .cra_flags = CRYPTO_ALG_ASYNC,
  922. .cra_blocksize = 1,
  923. .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
  924. .cra_alignmask = 0xf,
  925. .cra_module = THIS_MODULE,
  926. },
  927. };
  928. static void mtk_aes_queue_task(unsigned long data)
  929. {
  930. struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
  931. mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
  932. }
  933. static void mtk_aes_done_task(unsigned long data)
  934. {
  935. struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
  936. struct mtk_cryp *cryp = aes->cryp;
  937. mtk_aes_unmap(cryp, aes);
  938. aes->resume(cryp, aes);
  939. }
  940. static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
  941. {
  942. struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
  943. struct mtk_cryp *cryp = aes->cryp;
  944. u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
  945. mtk_aes_write(cryp, RDR_STAT(aes->id), val);
  946. if (likely(AES_FLAGS_BUSY & aes->flags)) {
  947. mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
  948. mtk_aes_write(cryp, RDR_THRESH(aes->id),
  949. MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
  950. tasklet_schedule(&aes->done_task);
  951. } else {
  952. dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
  953. }
  954. return IRQ_HANDLED;
  955. }
  956. /*
  957. * The purpose of creating encryption and decryption records is
  958. * to process outbound/inbound data in parallel, it can improve
  959. * performance in most use cases, such as IPSec VPN, especially
  960. * under heavy network traffic.
  961. */
  962. static int mtk_aes_record_init(struct mtk_cryp *cryp)
  963. {
  964. struct mtk_aes_rec **aes = cryp->aes;
  965. int i, err = -ENOMEM;
  966. for (i = 0; i < MTK_REC_NUM; i++) {
  967. aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
  968. if (!aes[i])
  969. goto err_cleanup;
  970. aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
  971. AES_BUF_ORDER);
  972. if (!aes[i]->buf)
  973. goto err_cleanup;
  974. aes[i]->cryp = cryp;
  975. spin_lock_init(&aes[i]->lock);
  976. crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
  977. tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
  978. (unsigned long)aes[i]);
  979. tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
  980. (unsigned long)aes[i]);
  981. }
  982. /* Link to ring0 and ring1 respectively */
  983. aes[0]->id = MTK_RING0;
  984. aes[1]->id = MTK_RING1;
  985. return 0;
  986. err_cleanup:
  987. for (; i--; ) {
  988. free_page((unsigned long)aes[i]->buf);
  989. kfree(aes[i]);
  990. }
  991. return err;
  992. }
  993. static void mtk_aes_record_free(struct mtk_cryp *cryp)
  994. {
  995. int i;
  996. for (i = 0; i < MTK_REC_NUM; i++) {
  997. tasklet_kill(&cryp->aes[i]->done_task);
  998. tasklet_kill(&cryp->aes[i]->queue_task);
  999. free_page((unsigned long)cryp->aes[i]->buf);
  1000. kfree(cryp->aes[i]);
  1001. }
  1002. }
  1003. static void mtk_aes_unregister_algs(void)
  1004. {
  1005. int i;
  1006. crypto_unregister_aead(&aes_gcm_alg);
  1007. for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
  1008. crypto_unregister_alg(&aes_algs[i]);
  1009. }
  1010. static int mtk_aes_register_algs(void)
  1011. {
  1012. int err, i;
  1013. for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
  1014. err = crypto_register_alg(&aes_algs[i]);
  1015. if (err)
  1016. goto err_aes_algs;
  1017. }
  1018. err = crypto_register_aead(&aes_gcm_alg);
  1019. if (err)
  1020. goto err_aes_algs;
  1021. return 0;
  1022. err_aes_algs:
  1023. for (; i--; )
  1024. crypto_unregister_alg(&aes_algs[i]);
  1025. return err;
  1026. }
  1027. int mtk_cipher_alg_register(struct mtk_cryp *cryp)
  1028. {
  1029. int ret;
  1030. INIT_LIST_HEAD(&cryp->aes_list);
  1031. /* Initialize two cipher records */
  1032. ret = mtk_aes_record_init(cryp);
  1033. if (ret)
  1034. goto err_record;
  1035. ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
  1036. 0, "mtk-aes", cryp->aes[0]);
  1037. if (ret) {
  1038. dev_err(cryp->dev, "unable to request AES irq.\n");
  1039. goto err_res;
  1040. }
  1041. ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
  1042. 0, "mtk-aes", cryp->aes[1]);
  1043. if (ret) {
  1044. dev_err(cryp->dev, "unable to request AES irq.\n");
  1045. goto err_res;
  1046. }
  1047. /* Enable ring0 and ring1 interrupt */
  1048. mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
  1049. mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
  1050. spin_lock(&mtk_aes.lock);
  1051. list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
  1052. spin_unlock(&mtk_aes.lock);
  1053. ret = mtk_aes_register_algs();
  1054. if (ret)
  1055. goto err_algs;
  1056. return 0;
  1057. err_algs:
  1058. spin_lock(&mtk_aes.lock);
  1059. list_del(&cryp->aes_list);
  1060. spin_unlock(&mtk_aes.lock);
  1061. err_res:
  1062. mtk_aes_record_free(cryp);
  1063. err_record:
  1064. dev_err(cryp->dev, "mtk-aes initialization failed.\n");
  1065. return ret;
  1066. }
  1067. void mtk_cipher_alg_release(struct mtk_cryp *cryp)
  1068. {
  1069. spin_lock(&mtk_aes.lock);
  1070. list_del(&cryp->aes_list);
  1071. spin_unlock(&mtk_aes.lock);
  1072. mtk_aes_unregister_algs();
  1073. mtk_aes_record_free(cryp);
  1074. }