hash.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385
  1. /*
  2. * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
  3. *
  4. * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
  5. * Author: Arnaud Ebalard <arno@natisbad.org>
  6. *
  7. * This work is based on an initial version written by
  8. * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License version 2 as published
  12. * by the Free Software Foundation.
  13. */
  14. #include <crypto/md5.h>
  15. #include <crypto/sha.h>
  16. #include "cesa.h"
  17. struct mv_cesa_ahash_dma_iter {
  18. struct mv_cesa_dma_iter base;
  19. struct mv_cesa_sg_dma_iter src;
  20. };
  21. static inline void
  22. mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
  23. struct ahash_request *req)
  24. {
  25. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  26. unsigned int len = req->nbytes + creq->cache_ptr;
  27. if (!creq->last_req)
  28. len &= ~CESA_HASH_BLOCK_SIZE_MSK;
  29. mv_cesa_req_dma_iter_init(&iter->base, len);
  30. mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
  31. iter->src.op_offset = creq->cache_ptr;
  32. }
  33. static inline bool
  34. mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
  35. {
  36. iter->src.op_offset = 0;
  37. return mv_cesa_req_dma_iter_next_op(&iter->base);
  38. }
  39. static inline int
  40. mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
  41. {
  42. req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
  43. &req->cache_dma);
  44. if (!req->cache)
  45. return -ENOMEM;
  46. return 0;
  47. }
  48. static inline void
  49. mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
  50. {
  51. if (!req->cache)
  52. return;
  53. dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
  54. req->cache_dma);
  55. }
  56. static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
  57. gfp_t flags)
  58. {
  59. if (req->padding)
  60. return 0;
  61. req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
  62. &req->padding_dma);
  63. if (!req->padding)
  64. return -ENOMEM;
  65. return 0;
  66. }
  67. static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
  68. {
  69. if (!req->padding)
  70. return;
  71. dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
  72. req->padding_dma);
  73. req->padding = NULL;
  74. }
  75. static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
  76. {
  77. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  78. mv_cesa_ahash_dma_free_padding(&creq->req.dma);
  79. }
  80. static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
  81. {
  82. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  83. dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
  84. mv_cesa_ahash_dma_free_cache(&creq->req.dma);
  85. mv_cesa_dma_cleanup(&creq->base);
  86. }
  87. static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
  88. {
  89. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  90. if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
  91. mv_cesa_ahash_dma_cleanup(req);
  92. }
  93. static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
  94. {
  95. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  96. if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
  97. mv_cesa_ahash_dma_last_cleanup(req);
  98. }
  99. static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
  100. {
  101. unsigned int index, padlen;
  102. index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
  103. padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
  104. return padlen;
  105. }
  106. static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
  107. {
  108. unsigned int index, padlen;
  109. buf[0] = 0x80;
  110. /* Pad out to 56 mod 64 */
  111. index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
  112. padlen = mv_cesa_ahash_pad_len(creq);
  113. memset(buf + 1, 0, padlen - 1);
  114. if (creq->algo_le) {
  115. __le64 bits = cpu_to_le64(creq->len << 3);
  116. memcpy(buf + padlen, &bits, sizeof(bits));
  117. } else {
  118. __be64 bits = cpu_to_be64(creq->len << 3);
  119. memcpy(buf + padlen, &bits, sizeof(bits));
  120. }
  121. return padlen + 8;
  122. }
  123. static void mv_cesa_ahash_std_step(struct ahash_request *req)
  124. {
  125. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  126. struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
  127. struct mv_cesa_engine *engine = creq->base.engine;
  128. struct mv_cesa_op_ctx *op;
  129. unsigned int new_cache_ptr = 0;
  130. u32 frag_mode;
  131. size_t len;
  132. unsigned int digsize;
  133. int i;
  134. mv_cesa_adjust_op(engine, &creq->op_tmpl);
  135. memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
  136. digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
  137. for (i = 0; i < digsize / 4; i++)
  138. writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
  139. mv_cesa_adjust_op(engine, &creq->op_tmpl);
  140. memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
  141. if (creq->cache_ptr)
  142. memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
  143. creq->cache, creq->cache_ptr);
  144. len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
  145. CESA_SA_SRAM_PAYLOAD_SIZE);
  146. if (!creq->last_req) {
  147. new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
  148. len &= ~CESA_HASH_BLOCK_SIZE_MSK;
  149. }
  150. if (len - creq->cache_ptr)
  151. sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
  152. engine->sram +
  153. CESA_SA_DATA_SRAM_OFFSET +
  154. creq->cache_ptr,
  155. len - creq->cache_ptr,
  156. sreq->offset);
  157. op = &creq->op_tmpl;
  158. frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
  159. if (creq->last_req && sreq->offset == req->nbytes &&
  160. creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
  161. if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
  162. frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
  163. else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
  164. frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
  165. }
  166. if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
  167. frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
  168. if (len &&
  169. creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
  170. mv_cesa_set_mac_op_total_len(op, creq->len);
  171. } else {
  172. int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
  173. if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
  174. len &= CESA_HASH_BLOCK_SIZE_MSK;
  175. new_cache_ptr = 64 - trailerlen;
  176. memcpy_fromio(creq->cache,
  177. engine->sram +
  178. CESA_SA_DATA_SRAM_OFFSET + len,
  179. new_cache_ptr);
  180. } else {
  181. len += mv_cesa_ahash_pad_req(creq,
  182. engine->sram + len +
  183. CESA_SA_DATA_SRAM_OFFSET);
  184. }
  185. if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
  186. frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
  187. else
  188. frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
  189. }
  190. }
  191. mv_cesa_set_mac_op_frag_len(op, len);
  192. mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
  193. /* FIXME: only update enc_len field */
  194. memcpy_toio(engine->sram, op, sizeof(*op));
  195. if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
  196. mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
  197. CESA_SA_DESC_CFG_FRAG_MSK);
  198. creq->cache_ptr = new_cache_ptr;
  199. mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
  200. writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
  201. BUG_ON(readl(engine->regs + CESA_SA_CMD) &
  202. CESA_SA_CMD_EN_CESA_SA_ACCL0);
  203. writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
  204. }
  205. static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
  206. {
  207. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  208. struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
  209. if (sreq->offset < (req->nbytes - creq->cache_ptr))
  210. return -EINPROGRESS;
  211. return 0;
  212. }
  213. static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
  214. {
  215. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  216. struct mv_cesa_req *basereq = &creq->base;
  217. mv_cesa_dma_prepare(basereq, basereq->engine);
  218. }
  219. static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
  220. {
  221. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  222. struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
  223. sreq->offset = 0;
  224. }
  225. static void mv_cesa_ahash_step(struct crypto_async_request *req)
  226. {
  227. struct ahash_request *ahashreq = ahash_request_cast(req);
  228. struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
  229. if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
  230. mv_cesa_dma_step(&creq->base);
  231. else
  232. mv_cesa_ahash_std_step(ahashreq);
  233. }
  234. static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
  235. {
  236. struct ahash_request *ahashreq = ahash_request_cast(req);
  237. struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
  238. if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
  239. return mv_cesa_dma_process(&creq->base, status);
  240. return mv_cesa_ahash_std_process(ahashreq, status);
  241. }
  242. static void mv_cesa_ahash_complete(struct crypto_async_request *req)
  243. {
  244. struct ahash_request *ahashreq = ahash_request_cast(req);
  245. struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
  246. struct mv_cesa_engine *engine = creq->base.engine;
  247. unsigned int digsize;
  248. int i;
  249. digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
  250. for (i = 0; i < digsize / 4; i++)
  251. creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
  252. if (creq->last_req) {
  253. /*
  254. * Hardware's MD5 digest is in little endian format, but
  255. * SHA in big endian format
  256. */
  257. if (creq->algo_le) {
  258. __le32 *result = (void *)ahashreq->result;
  259. for (i = 0; i < digsize / 4; i++)
  260. result[i] = cpu_to_le32(creq->state[i]);
  261. } else {
  262. __be32 *result = (void *)ahashreq->result;
  263. for (i = 0; i < digsize / 4; i++)
  264. result[i] = cpu_to_be32(creq->state[i]);
  265. }
  266. }
  267. atomic_sub(ahashreq->nbytes, &engine->load);
  268. }
  269. static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
  270. struct mv_cesa_engine *engine)
  271. {
  272. struct ahash_request *ahashreq = ahash_request_cast(req);
  273. struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
  274. creq->base.engine = engine;
  275. if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
  276. mv_cesa_ahash_dma_prepare(ahashreq);
  277. else
  278. mv_cesa_ahash_std_prepare(ahashreq);
  279. }
  280. static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
  281. {
  282. struct ahash_request *ahashreq = ahash_request_cast(req);
  283. struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
  284. if (creq->last_req)
  285. mv_cesa_ahash_last_cleanup(ahashreq);
  286. mv_cesa_ahash_cleanup(ahashreq);
  287. if (creq->cache_ptr)
  288. sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
  289. creq->cache,
  290. creq->cache_ptr,
  291. ahashreq->nbytes - creq->cache_ptr);
  292. }
  293. static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
  294. .step = mv_cesa_ahash_step,
  295. .process = mv_cesa_ahash_process,
  296. .cleanup = mv_cesa_ahash_req_cleanup,
  297. .complete = mv_cesa_ahash_complete,
  298. };
  299. static int mv_cesa_ahash_init(struct ahash_request *req,
  300. struct mv_cesa_op_ctx *tmpl, bool algo_le)
  301. {
  302. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  303. memset(creq, 0, sizeof(*creq));
  304. mv_cesa_update_op_cfg(tmpl,
  305. CESA_SA_DESC_CFG_OP_MAC_ONLY |
  306. CESA_SA_DESC_CFG_FIRST_FRAG,
  307. CESA_SA_DESC_CFG_OP_MSK |
  308. CESA_SA_DESC_CFG_FRAG_MSK);
  309. mv_cesa_set_mac_op_total_len(tmpl, 0);
  310. mv_cesa_set_mac_op_frag_len(tmpl, 0);
  311. creq->op_tmpl = *tmpl;
  312. creq->len = 0;
  313. creq->algo_le = algo_le;
  314. return 0;
  315. }
  316. static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
  317. {
  318. struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  319. ctx->base.ops = &mv_cesa_ahash_req_ops;
  320. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  321. sizeof(struct mv_cesa_ahash_req));
  322. return 0;
  323. }
  324. static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
  325. {
  326. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  327. if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
  328. *cached = true;
  329. if (!req->nbytes)
  330. return 0;
  331. sg_pcopy_to_buffer(req->src, creq->src_nents,
  332. creq->cache + creq->cache_ptr,
  333. req->nbytes, 0);
  334. creq->cache_ptr += req->nbytes;
  335. }
  336. return 0;
  337. }
  338. static struct mv_cesa_op_ctx *
  339. mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
  340. struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
  341. gfp_t flags)
  342. {
  343. struct mv_cesa_op_ctx *op;
  344. int ret;
  345. op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
  346. if (IS_ERR(op))
  347. return op;
  348. /* Set the operation block fragment length. */
  349. mv_cesa_set_mac_op_frag_len(op, frag_len);
  350. /* Append dummy desc to launch operation */
  351. ret = mv_cesa_dma_add_dummy_launch(chain, flags);
  352. if (ret)
  353. return ERR_PTR(ret);
  354. if (mv_cesa_mac_op_is_first_frag(tmpl))
  355. mv_cesa_update_op_cfg(tmpl,
  356. CESA_SA_DESC_CFG_MID_FRAG,
  357. CESA_SA_DESC_CFG_FRAG_MSK);
  358. return op;
  359. }
  360. static int
  361. mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
  362. struct mv_cesa_ahash_dma_iter *dma_iter,
  363. struct mv_cesa_ahash_req *creq,
  364. gfp_t flags)
  365. {
  366. struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
  367. int ret;
  368. if (!creq->cache_ptr)
  369. return 0;
  370. ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
  371. if (ret)
  372. return ret;
  373. memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
  374. return mv_cesa_dma_add_data_transfer(chain,
  375. CESA_SA_DATA_SRAM_OFFSET,
  376. ahashdreq->cache_dma,
  377. creq->cache_ptr,
  378. CESA_TDMA_DST_IN_SRAM,
  379. flags);
  380. }
  381. static struct mv_cesa_op_ctx *
  382. mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
  383. struct mv_cesa_ahash_dma_iter *dma_iter,
  384. struct mv_cesa_ahash_req *creq,
  385. unsigned int frag_len, gfp_t flags)
  386. {
  387. struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
  388. unsigned int len, trailerlen, padoff = 0;
  389. struct mv_cesa_op_ctx *op;
  390. int ret;
  391. /*
  392. * If the transfer is smaller than our maximum length, and we have
  393. * some data outstanding, we can ask the engine to finish the hash.
  394. */
  395. if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
  396. op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
  397. flags);
  398. if (IS_ERR(op))
  399. return op;
  400. mv_cesa_set_mac_op_total_len(op, creq->len);
  401. mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
  402. CESA_SA_DESC_CFG_NOT_FRAG :
  403. CESA_SA_DESC_CFG_LAST_FRAG,
  404. CESA_SA_DESC_CFG_FRAG_MSK);
  405. return op;
  406. }
  407. /*
  408. * The request is longer than the engine can handle, or we have
  409. * no data outstanding. Manually generate the padding, adding it
  410. * as a "mid" fragment.
  411. */
  412. ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
  413. if (ret)
  414. return ERR_PTR(ret);
  415. trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
  416. len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
  417. if (len) {
  418. ret = mv_cesa_dma_add_data_transfer(chain,
  419. CESA_SA_DATA_SRAM_OFFSET +
  420. frag_len,
  421. ahashdreq->padding_dma,
  422. len, CESA_TDMA_DST_IN_SRAM,
  423. flags);
  424. if (ret)
  425. return ERR_PTR(ret);
  426. op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
  427. flags);
  428. if (IS_ERR(op))
  429. return op;
  430. if (len == trailerlen)
  431. return op;
  432. padoff += len;
  433. }
  434. ret = mv_cesa_dma_add_data_transfer(chain,
  435. CESA_SA_DATA_SRAM_OFFSET,
  436. ahashdreq->padding_dma +
  437. padoff,
  438. trailerlen - padoff,
  439. CESA_TDMA_DST_IN_SRAM,
  440. flags);
  441. if (ret)
  442. return ERR_PTR(ret);
  443. return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
  444. flags);
  445. }
  446. static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
  447. {
  448. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  449. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  450. GFP_KERNEL : GFP_ATOMIC;
  451. struct mv_cesa_req *basereq = &creq->base;
  452. struct mv_cesa_ahash_dma_iter iter;
  453. struct mv_cesa_op_ctx *op = NULL;
  454. unsigned int frag_len;
  455. int ret;
  456. basereq->chain.first = NULL;
  457. basereq->chain.last = NULL;
  458. if (creq->src_nents) {
  459. ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
  460. DMA_TO_DEVICE);
  461. if (!ret) {
  462. ret = -ENOMEM;
  463. goto err;
  464. }
  465. }
  466. mv_cesa_tdma_desc_iter_init(&basereq->chain);
  467. mv_cesa_ahash_req_iter_init(&iter, req);
  468. /*
  469. * Add the cache (left-over data from a previous block) first.
  470. * This will never overflow the SRAM size.
  471. */
  472. ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, &iter, creq, flags);
  473. if (ret)
  474. goto err_free_tdma;
  475. if (iter.src.sg) {
  476. /*
  477. * Add all the new data, inserting an operation block and
  478. * launch command between each full SRAM block-worth of
  479. * data. We intentionally do not add the final op block.
  480. */
  481. while (true) {
  482. ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
  483. &iter.base,
  484. &iter.src, flags);
  485. if (ret)
  486. goto err_free_tdma;
  487. frag_len = iter.base.op_len;
  488. if (!mv_cesa_ahash_req_iter_next_op(&iter))
  489. break;
  490. op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
  491. frag_len, flags);
  492. if (IS_ERR(op)) {
  493. ret = PTR_ERR(op);
  494. goto err_free_tdma;
  495. }
  496. }
  497. } else {
  498. /* Account for the data that was in the cache. */
  499. frag_len = iter.base.op_len;
  500. }
  501. /*
  502. * At this point, frag_len indicates whether we have any data
  503. * outstanding which needs an operation. Queue up the final
  504. * operation, which depends whether this is the final request.
  505. */
  506. if (creq->last_req)
  507. op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
  508. frag_len, flags);
  509. else if (frag_len)
  510. op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
  511. frag_len, flags);
  512. if (IS_ERR(op)) {
  513. ret = PTR_ERR(op);
  514. goto err_free_tdma;
  515. }
  516. if (op) {
  517. /* Add dummy desc to wait for crypto operation end */
  518. ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
  519. if (ret)
  520. goto err_free_tdma;
  521. }
  522. if (!creq->last_req)
  523. creq->cache_ptr = req->nbytes + creq->cache_ptr -
  524. iter.base.len;
  525. else
  526. creq->cache_ptr = 0;
  527. basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
  528. CESA_TDMA_BREAK_CHAIN);
  529. return 0;
  530. err_free_tdma:
  531. mv_cesa_dma_cleanup(basereq);
  532. dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
  533. err:
  534. mv_cesa_ahash_last_cleanup(req);
  535. return ret;
  536. }
  537. static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
  538. {
  539. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  540. int ret;
  541. creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
  542. if (creq->src_nents < 0) {
  543. dev_err(cesa_dev->dev, "Invalid number of src SG");
  544. return creq->src_nents;
  545. }
  546. ret = mv_cesa_ahash_cache_req(req, cached);
  547. if (ret)
  548. return ret;
  549. if (*cached)
  550. return 0;
  551. if (cesa_dev->caps->has_tdma)
  552. ret = mv_cesa_ahash_dma_req_init(req);
  553. return ret;
  554. }
  555. static int mv_cesa_ahash_queue_req(struct ahash_request *req)
  556. {
  557. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  558. struct mv_cesa_engine *engine;
  559. bool cached = false;
  560. int ret;
  561. ret = mv_cesa_ahash_req_init(req, &cached);
  562. if (ret)
  563. return ret;
  564. if (cached)
  565. return 0;
  566. engine = mv_cesa_select_engine(req->nbytes);
  567. mv_cesa_ahash_prepare(&req->base, engine);
  568. ret = mv_cesa_queue_req(&req->base, &creq->base);
  569. if (mv_cesa_req_needs_cleanup(&req->base, ret))
  570. mv_cesa_ahash_cleanup(req);
  571. return ret;
  572. }
  573. static int mv_cesa_ahash_update(struct ahash_request *req)
  574. {
  575. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  576. creq->len += req->nbytes;
  577. return mv_cesa_ahash_queue_req(req);
  578. }
  579. static int mv_cesa_ahash_final(struct ahash_request *req)
  580. {
  581. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  582. struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
  583. mv_cesa_set_mac_op_total_len(tmpl, creq->len);
  584. creq->last_req = true;
  585. req->nbytes = 0;
  586. return mv_cesa_ahash_queue_req(req);
  587. }
  588. static int mv_cesa_ahash_finup(struct ahash_request *req)
  589. {
  590. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  591. struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
  592. creq->len += req->nbytes;
  593. mv_cesa_set_mac_op_total_len(tmpl, creq->len);
  594. creq->last_req = true;
  595. return mv_cesa_ahash_queue_req(req);
  596. }
  597. static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
  598. u64 *len, void *cache)
  599. {
  600. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  601. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  602. unsigned int digsize = crypto_ahash_digestsize(ahash);
  603. unsigned int blocksize;
  604. blocksize = crypto_ahash_blocksize(ahash);
  605. *len = creq->len;
  606. memcpy(hash, creq->state, digsize);
  607. memset(cache, 0, blocksize);
  608. memcpy(cache, creq->cache, creq->cache_ptr);
  609. return 0;
  610. }
  611. static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
  612. u64 len, const void *cache)
  613. {
  614. struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
  615. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  616. unsigned int digsize = crypto_ahash_digestsize(ahash);
  617. unsigned int blocksize;
  618. unsigned int cache_ptr;
  619. int ret;
  620. ret = crypto_ahash_init(req);
  621. if (ret)
  622. return ret;
  623. blocksize = crypto_ahash_blocksize(ahash);
  624. if (len >= blocksize)
  625. mv_cesa_update_op_cfg(&creq->op_tmpl,
  626. CESA_SA_DESC_CFG_MID_FRAG,
  627. CESA_SA_DESC_CFG_FRAG_MSK);
  628. creq->len = len;
  629. memcpy(creq->state, hash, digsize);
  630. creq->cache_ptr = 0;
  631. cache_ptr = do_div(len, blocksize);
  632. if (!cache_ptr)
  633. return 0;
  634. memcpy(creq->cache, cache, cache_ptr);
  635. creq->cache_ptr = cache_ptr;
  636. return 0;
  637. }
  638. static int mv_cesa_md5_init(struct ahash_request *req)
  639. {
  640. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  641. struct mv_cesa_op_ctx tmpl = { };
  642. mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
  643. creq->state[0] = MD5_H0;
  644. creq->state[1] = MD5_H1;
  645. creq->state[2] = MD5_H2;
  646. creq->state[3] = MD5_H3;
  647. mv_cesa_ahash_init(req, &tmpl, true);
  648. return 0;
  649. }
  650. static int mv_cesa_md5_export(struct ahash_request *req, void *out)
  651. {
  652. struct md5_state *out_state = out;
  653. return mv_cesa_ahash_export(req, out_state->hash,
  654. &out_state->byte_count, out_state->block);
  655. }
  656. static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
  657. {
  658. const struct md5_state *in_state = in;
  659. return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
  660. in_state->block);
  661. }
  662. static int mv_cesa_md5_digest(struct ahash_request *req)
  663. {
  664. int ret;
  665. ret = mv_cesa_md5_init(req);
  666. if (ret)
  667. return ret;
  668. return mv_cesa_ahash_finup(req);
  669. }
  670. struct ahash_alg mv_md5_alg = {
  671. .init = mv_cesa_md5_init,
  672. .update = mv_cesa_ahash_update,
  673. .final = mv_cesa_ahash_final,
  674. .finup = mv_cesa_ahash_finup,
  675. .digest = mv_cesa_md5_digest,
  676. .export = mv_cesa_md5_export,
  677. .import = mv_cesa_md5_import,
  678. .halg = {
  679. .digestsize = MD5_DIGEST_SIZE,
  680. .statesize = sizeof(struct md5_state),
  681. .base = {
  682. .cra_name = "md5",
  683. .cra_driver_name = "mv-md5",
  684. .cra_priority = 300,
  685. .cra_flags = CRYPTO_ALG_ASYNC |
  686. CRYPTO_ALG_KERN_DRIVER_ONLY,
  687. .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
  688. .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
  689. .cra_init = mv_cesa_ahash_cra_init,
  690. .cra_module = THIS_MODULE,
  691. }
  692. }
  693. };
  694. static int mv_cesa_sha1_init(struct ahash_request *req)
  695. {
  696. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  697. struct mv_cesa_op_ctx tmpl = { };
  698. mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
  699. creq->state[0] = SHA1_H0;
  700. creq->state[1] = SHA1_H1;
  701. creq->state[2] = SHA1_H2;
  702. creq->state[3] = SHA1_H3;
  703. creq->state[4] = SHA1_H4;
  704. mv_cesa_ahash_init(req, &tmpl, false);
  705. return 0;
  706. }
  707. static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
  708. {
  709. struct sha1_state *out_state = out;
  710. return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
  711. out_state->buffer);
  712. }
  713. static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
  714. {
  715. const struct sha1_state *in_state = in;
  716. return mv_cesa_ahash_import(req, in_state->state, in_state->count,
  717. in_state->buffer);
  718. }
  719. static int mv_cesa_sha1_digest(struct ahash_request *req)
  720. {
  721. int ret;
  722. ret = mv_cesa_sha1_init(req);
  723. if (ret)
  724. return ret;
  725. return mv_cesa_ahash_finup(req);
  726. }
  727. struct ahash_alg mv_sha1_alg = {
  728. .init = mv_cesa_sha1_init,
  729. .update = mv_cesa_ahash_update,
  730. .final = mv_cesa_ahash_final,
  731. .finup = mv_cesa_ahash_finup,
  732. .digest = mv_cesa_sha1_digest,
  733. .export = mv_cesa_sha1_export,
  734. .import = mv_cesa_sha1_import,
  735. .halg = {
  736. .digestsize = SHA1_DIGEST_SIZE,
  737. .statesize = sizeof(struct sha1_state),
  738. .base = {
  739. .cra_name = "sha1",
  740. .cra_driver_name = "mv-sha1",
  741. .cra_priority = 300,
  742. .cra_flags = CRYPTO_ALG_ASYNC |
  743. CRYPTO_ALG_KERN_DRIVER_ONLY,
  744. .cra_blocksize = SHA1_BLOCK_SIZE,
  745. .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
  746. .cra_init = mv_cesa_ahash_cra_init,
  747. .cra_module = THIS_MODULE,
  748. }
  749. }
  750. };
  751. static int mv_cesa_sha256_init(struct ahash_request *req)
  752. {
  753. struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  754. struct mv_cesa_op_ctx tmpl = { };
  755. mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
  756. creq->state[0] = SHA256_H0;
  757. creq->state[1] = SHA256_H1;
  758. creq->state[2] = SHA256_H2;
  759. creq->state[3] = SHA256_H3;
  760. creq->state[4] = SHA256_H4;
  761. creq->state[5] = SHA256_H5;
  762. creq->state[6] = SHA256_H6;
  763. creq->state[7] = SHA256_H7;
  764. mv_cesa_ahash_init(req, &tmpl, false);
  765. return 0;
  766. }
  767. static int mv_cesa_sha256_digest(struct ahash_request *req)
  768. {
  769. int ret;
  770. ret = mv_cesa_sha256_init(req);
  771. if (ret)
  772. return ret;
  773. return mv_cesa_ahash_finup(req);
  774. }
  775. static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
  776. {
  777. struct sha256_state *out_state = out;
  778. return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
  779. out_state->buf);
  780. }
  781. static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
  782. {
  783. const struct sha256_state *in_state = in;
  784. return mv_cesa_ahash_import(req, in_state->state, in_state->count,
  785. in_state->buf);
  786. }
  787. struct ahash_alg mv_sha256_alg = {
  788. .init = mv_cesa_sha256_init,
  789. .update = mv_cesa_ahash_update,
  790. .final = mv_cesa_ahash_final,
  791. .finup = mv_cesa_ahash_finup,
  792. .digest = mv_cesa_sha256_digest,
  793. .export = mv_cesa_sha256_export,
  794. .import = mv_cesa_sha256_import,
  795. .halg = {
  796. .digestsize = SHA256_DIGEST_SIZE,
  797. .statesize = sizeof(struct sha256_state),
  798. .base = {
  799. .cra_name = "sha256",
  800. .cra_driver_name = "mv-sha256",
  801. .cra_priority = 300,
  802. .cra_flags = CRYPTO_ALG_ASYNC |
  803. CRYPTO_ALG_KERN_DRIVER_ONLY,
  804. .cra_blocksize = SHA256_BLOCK_SIZE,
  805. .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
  806. .cra_init = mv_cesa_ahash_cra_init,
  807. .cra_module = THIS_MODULE,
  808. }
  809. }
  810. };
  811. struct mv_cesa_ahash_result {
  812. struct completion completion;
  813. int error;
  814. };
  815. static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
  816. int error)
  817. {
  818. struct mv_cesa_ahash_result *result = req->data;
  819. if (error == -EINPROGRESS)
  820. return;
  821. result->error = error;
  822. complete(&result->completion);
  823. }
  824. static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
  825. void *state, unsigned int blocksize)
  826. {
  827. struct mv_cesa_ahash_result result;
  828. struct scatterlist sg;
  829. int ret;
  830. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  831. mv_cesa_hmac_ahash_complete, &result);
  832. sg_init_one(&sg, pad, blocksize);
  833. ahash_request_set_crypt(req, &sg, pad, blocksize);
  834. init_completion(&result.completion);
  835. ret = crypto_ahash_init(req);
  836. if (ret)
  837. return ret;
  838. ret = crypto_ahash_update(req);
  839. if (ret && ret != -EINPROGRESS)
  840. return ret;
  841. wait_for_completion_interruptible(&result.completion);
  842. if (result.error)
  843. return result.error;
  844. ret = crypto_ahash_export(req, state);
  845. if (ret)
  846. return ret;
  847. return 0;
  848. }
  849. static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
  850. const u8 *key, unsigned int keylen,
  851. u8 *ipad, u8 *opad,
  852. unsigned int blocksize)
  853. {
  854. struct mv_cesa_ahash_result result;
  855. struct scatterlist sg;
  856. int ret;
  857. int i;
  858. if (keylen <= blocksize) {
  859. memcpy(ipad, key, keylen);
  860. } else {
  861. u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
  862. if (!keydup)
  863. return -ENOMEM;
  864. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  865. mv_cesa_hmac_ahash_complete,
  866. &result);
  867. sg_init_one(&sg, keydup, keylen);
  868. ahash_request_set_crypt(req, &sg, ipad, keylen);
  869. init_completion(&result.completion);
  870. ret = crypto_ahash_digest(req);
  871. if (ret == -EINPROGRESS) {
  872. wait_for_completion_interruptible(&result.completion);
  873. ret = result.error;
  874. }
  875. /* Set the memory region to 0 to avoid any leak. */
  876. memset(keydup, 0, keylen);
  877. kfree(keydup);
  878. if (ret)
  879. return ret;
  880. keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
  881. }
  882. memset(ipad + keylen, 0, blocksize - keylen);
  883. memcpy(opad, ipad, blocksize);
  884. for (i = 0; i < blocksize; i++) {
  885. ipad[i] ^= 0x36;
  886. opad[i] ^= 0x5c;
  887. }
  888. return 0;
  889. }
  890. static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
  891. const u8 *key, unsigned int keylen,
  892. void *istate, void *ostate)
  893. {
  894. struct ahash_request *req;
  895. struct crypto_ahash *tfm;
  896. unsigned int blocksize;
  897. u8 *ipad = NULL;
  898. u8 *opad;
  899. int ret;
  900. tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
  901. CRYPTO_ALG_TYPE_AHASH_MASK);
  902. if (IS_ERR(tfm))
  903. return PTR_ERR(tfm);
  904. req = ahash_request_alloc(tfm, GFP_KERNEL);
  905. if (!req) {
  906. ret = -ENOMEM;
  907. goto free_ahash;
  908. }
  909. crypto_ahash_clear_flags(tfm, ~0);
  910. blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  911. ipad = kzalloc(2 * blocksize, GFP_KERNEL);
  912. if (!ipad) {
  913. ret = -ENOMEM;
  914. goto free_req;
  915. }
  916. opad = ipad + blocksize;
  917. ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
  918. if (ret)
  919. goto free_ipad;
  920. ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
  921. if (ret)
  922. goto free_ipad;
  923. ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
  924. free_ipad:
  925. kfree(ipad);
  926. free_req:
  927. ahash_request_free(req);
  928. free_ahash:
  929. crypto_free_ahash(tfm);
  930. return ret;
  931. }
  932. static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
  933. {
  934. struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
  935. ctx->base.ops = &mv_cesa_ahash_req_ops;
  936. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  937. sizeof(struct mv_cesa_ahash_req));
  938. return 0;
  939. }
  940. static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
  941. {
  942. struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  943. struct mv_cesa_op_ctx tmpl = { };
  944. mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
  945. memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
  946. mv_cesa_ahash_init(req, &tmpl, true);
  947. return 0;
  948. }
  949. static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
  950. unsigned int keylen)
  951. {
  952. struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
  953. struct md5_state istate, ostate;
  954. int ret, i;
  955. ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
  956. if (ret)
  957. return ret;
  958. for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
  959. ctx->iv[i] = be32_to_cpu(istate.hash[i]);
  960. for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
  961. ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
  962. return 0;
  963. }
  964. static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
  965. {
  966. int ret;
  967. ret = mv_cesa_ahmac_md5_init(req);
  968. if (ret)
  969. return ret;
  970. return mv_cesa_ahash_finup(req);
  971. }
  972. struct ahash_alg mv_ahmac_md5_alg = {
  973. .init = mv_cesa_ahmac_md5_init,
  974. .update = mv_cesa_ahash_update,
  975. .final = mv_cesa_ahash_final,
  976. .finup = mv_cesa_ahash_finup,
  977. .digest = mv_cesa_ahmac_md5_digest,
  978. .setkey = mv_cesa_ahmac_md5_setkey,
  979. .export = mv_cesa_md5_export,
  980. .import = mv_cesa_md5_import,
  981. .halg = {
  982. .digestsize = MD5_DIGEST_SIZE,
  983. .statesize = sizeof(struct md5_state),
  984. .base = {
  985. .cra_name = "hmac(md5)",
  986. .cra_driver_name = "mv-hmac-md5",
  987. .cra_priority = 300,
  988. .cra_flags = CRYPTO_ALG_ASYNC |
  989. CRYPTO_ALG_KERN_DRIVER_ONLY,
  990. .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
  991. .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
  992. .cra_init = mv_cesa_ahmac_cra_init,
  993. .cra_module = THIS_MODULE,
  994. }
  995. }
  996. };
  997. static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
  998. {
  999. struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  1000. struct mv_cesa_op_ctx tmpl = { };
  1001. mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
  1002. memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
  1003. mv_cesa_ahash_init(req, &tmpl, false);
  1004. return 0;
  1005. }
  1006. static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
  1007. unsigned int keylen)
  1008. {
  1009. struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
  1010. struct sha1_state istate, ostate;
  1011. int ret, i;
  1012. ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
  1013. if (ret)
  1014. return ret;
  1015. for (i = 0; i < ARRAY_SIZE(istate.state); i++)
  1016. ctx->iv[i] = be32_to_cpu(istate.state[i]);
  1017. for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
  1018. ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
  1019. return 0;
  1020. }
  1021. static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
  1022. {
  1023. int ret;
  1024. ret = mv_cesa_ahmac_sha1_init(req);
  1025. if (ret)
  1026. return ret;
  1027. return mv_cesa_ahash_finup(req);
  1028. }
  1029. struct ahash_alg mv_ahmac_sha1_alg = {
  1030. .init = mv_cesa_ahmac_sha1_init,
  1031. .update = mv_cesa_ahash_update,
  1032. .final = mv_cesa_ahash_final,
  1033. .finup = mv_cesa_ahash_finup,
  1034. .digest = mv_cesa_ahmac_sha1_digest,
  1035. .setkey = mv_cesa_ahmac_sha1_setkey,
  1036. .export = mv_cesa_sha1_export,
  1037. .import = mv_cesa_sha1_import,
  1038. .halg = {
  1039. .digestsize = SHA1_DIGEST_SIZE,
  1040. .statesize = sizeof(struct sha1_state),
  1041. .base = {
  1042. .cra_name = "hmac(sha1)",
  1043. .cra_driver_name = "mv-hmac-sha1",
  1044. .cra_priority = 300,
  1045. .cra_flags = CRYPTO_ALG_ASYNC |
  1046. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1047. .cra_blocksize = SHA1_BLOCK_SIZE,
  1048. .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
  1049. .cra_init = mv_cesa_ahmac_cra_init,
  1050. .cra_module = THIS_MODULE,
  1051. }
  1052. }
  1053. };
  1054. static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
  1055. unsigned int keylen)
  1056. {
  1057. struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
  1058. struct sha256_state istate, ostate;
  1059. int ret, i;
  1060. ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
  1061. if (ret)
  1062. return ret;
  1063. for (i = 0; i < ARRAY_SIZE(istate.state); i++)
  1064. ctx->iv[i] = be32_to_cpu(istate.state[i]);
  1065. for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
  1066. ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
  1067. return 0;
  1068. }
  1069. static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
  1070. {
  1071. struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  1072. struct mv_cesa_op_ctx tmpl = { };
  1073. mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
  1074. memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
  1075. mv_cesa_ahash_init(req, &tmpl, false);
  1076. return 0;
  1077. }
  1078. static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
  1079. {
  1080. int ret;
  1081. ret = mv_cesa_ahmac_sha256_init(req);
  1082. if (ret)
  1083. return ret;
  1084. return mv_cesa_ahash_finup(req);
  1085. }
  1086. struct ahash_alg mv_ahmac_sha256_alg = {
  1087. .init = mv_cesa_ahmac_sha256_init,
  1088. .update = mv_cesa_ahash_update,
  1089. .final = mv_cesa_ahash_final,
  1090. .finup = mv_cesa_ahash_finup,
  1091. .digest = mv_cesa_ahmac_sha256_digest,
  1092. .setkey = mv_cesa_ahmac_sha256_setkey,
  1093. .export = mv_cesa_sha256_export,
  1094. .import = mv_cesa_sha256_import,
  1095. .halg = {
  1096. .digestsize = SHA256_DIGEST_SIZE,
  1097. .statesize = sizeof(struct sha256_state),
  1098. .base = {
  1099. .cra_name = "hmac(sha256)",
  1100. .cra_driver_name = "mv-hmac-sha256",
  1101. .cra_priority = 300,
  1102. .cra_flags = CRYPTO_ALG_ASYNC |
  1103. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1104. .cra_blocksize = SHA256_BLOCK_SIZE,
  1105. .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
  1106. .cra_init = mv_cesa_ahmac_cra_init,
  1107. .cra_module = THIS_MODULE,
  1108. }
  1109. }
  1110. };