safexcel_hash.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <crypto/hmac.h>
  11. #include <crypto/sha.h>
  12. #include <linux/device.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dmapool.h>
  15. #include "safexcel.h"
  16. struct safexcel_ahash_ctx {
  17. struct safexcel_context base;
  18. struct safexcel_crypto_priv *priv;
  19. u32 alg;
  20. u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
  21. u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
  22. };
  23. struct safexcel_ahash_req {
  24. bool last_req;
  25. bool finish;
  26. bool hmac;
  27. bool needs_inv;
  28. int nents;
  29. dma_addr_t result_dma;
  30. u32 digest;
  31. u8 state_sz; /* expected sate size, only set once */
  32. u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
  33. u64 len;
  34. u64 processed;
  35. u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
  36. dma_addr_t cache_dma;
  37. unsigned int cache_sz;
  38. u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
  39. };
  40. struct safexcel_ahash_export_state {
  41. u64 len;
  42. u64 processed;
  43. u32 digest;
  44. u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
  45. u8 cache[SHA256_BLOCK_SIZE];
  46. };
  47. static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
  48. u32 input_length, u32 result_length)
  49. {
  50. struct safexcel_token *token =
  51. (struct safexcel_token *)cdesc->control_data.token;
  52. token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
  53. token[0].packet_length = input_length;
  54. token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
  55. token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
  56. token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
  57. token[1].packet_length = result_length;
  58. token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
  59. EIP197_TOKEN_STAT_LAST_PACKET;
  60. token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
  61. EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
  62. }
  63. static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
  64. struct safexcel_ahash_req *req,
  65. struct safexcel_command_desc *cdesc,
  66. unsigned int digestsize,
  67. unsigned int blocksize)
  68. {
  69. int i;
  70. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
  71. cdesc->control_data.control0 |= ctx->alg;
  72. cdesc->control_data.control0 |= req->digest;
  73. if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
  74. if (req->processed) {
  75. if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
  76. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
  77. else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
  78. ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
  79. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
  80. cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
  81. } else {
  82. cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
  83. }
  84. if (!req->finish)
  85. cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
  86. /*
  87. * Copy the input digest if needed, and setup the context
  88. * fields. Do this now as we need it to setup the first command
  89. * descriptor.
  90. */
  91. if (req->processed) {
  92. for (i = 0; i < digestsize / sizeof(u32); i++)
  93. ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
  94. if (req->finish)
  95. ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
  96. }
  97. } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
  98. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
  99. memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
  100. memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
  101. ctx->opad, req->state_sz);
  102. }
  103. }
  104. static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
  105. struct crypto_async_request *async,
  106. bool *should_complete, int *ret)
  107. {
  108. struct safexcel_result_desc *rdesc;
  109. struct ahash_request *areq = ahash_request_cast(async);
  110. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  111. struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
  112. int cache_len;
  113. *ret = 0;
  114. spin_lock_bh(&priv->ring[ring].egress_lock);
  115. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  116. if (IS_ERR(rdesc)) {
  117. dev_err(priv->dev,
  118. "hash: result: could not retrieve the result descriptor\n");
  119. *ret = PTR_ERR(rdesc);
  120. } else if (rdesc->result_data.error_code) {
  121. dev_err(priv->dev,
  122. "hash: result: result descriptor error (%d)\n",
  123. rdesc->result_data.error_code);
  124. *ret = -EINVAL;
  125. }
  126. safexcel_complete(priv, ring);
  127. spin_unlock_bh(&priv->ring[ring].egress_lock);
  128. if (sreq->nents) {
  129. dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
  130. sreq->nents = 0;
  131. }
  132. if (sreq->result_dma) {
  133. dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
  134. DMA_FROM_DEVICE);
  135. sreq->result_dma = 0;
  136. }
  137. if (sreq->cache_dma) {
  138. dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
  139. DMA_TO_DEVICE);
  140. sreq->cache_dma = 0;
  141. }
  142. if (sreq->finish)
  143. memcpy(areq->result, sreq->state,
  144. crypto_ahash_digestsize(ahash));
  145. cache_len = sreq->len - sreq->processed;
  146. if (cache_len)
  147. memcpy(sreq->cache, sreq->cache_next, cache_len);
  148. *should_complete = true;
  149. return 1;
  150. }
  151. static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
  152. struct safexcel_request *request,
  153. int *commands, int *results)
  154. {
  155. struct ahash_request *areq = ahash_request_cast(async);
  156. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  157. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  158. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  159. struct safexcel_crypto_priv *priv = ctx->priv;
  160. struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
  161. struct safexcel_result_desc *rdesc;
  162. struct scatterlist *sg;
  163. int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
  164. queued = len = req->len - req->processed;
  165. if (queued <= crypto_ahash_blocksize(ahash))
  166. cache_len = queued;
  167. else
  168. cache_len = queued - areq->nbytes;
  169. if (!req->last_req) {
  170. /* If this is not the last request and the queued data does not
  171. * fit into full blocks, cache it for the next send() call.
  172. */
  173. extra = queued & (crypto_ahash_blocksize(ahash) - 1);
  174. if (!extra)
  175. /* If this is not the last request and the queued data
  176. * is a multiple of a block, cache the last one for now.
  177. */
  178. extra = crypto_ahash_blocksize(ahash);
  179. if (extra) {
  180. sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  181. req->cache_next, extra,
  182. areq->nbytes - extra);
  183. queued -= extra;
  184. len -= extra;
  185. if (!queued) {
  186. *commands = 0;
  187. *results = 0;
  188. return 0;
  189. }
  190. }
  191. }
  192. spin_lock_bh(&priv->ring[ring].egress_lock);
  193. /* Add a command descriptor for the cached data, if any */
  194. if (cache_len) {
  195. req->cache_dma = dma_map_single(priv->dev, req->cache,
  196. cache_len, DMA_TO_DEVICE);
  197. if (dma_mapping_error(priv->dev, req->cache_dma)) {
  198. spin_unlock_bh(&priv->ring[ring].egress_lock);
  199. return -EINVAL;
  200. }
  201. req->cache_sz = cache_len;
  202. first_cdesc = safexcel_add_cdesc(priv, ring, 1,
  203. (cache_len == len),
  204. req->cache_dma, cache_len, len,
  205. ctx->base.ctxr_dma);
  206. if (IS_ERR(first_cdesc)) {
  207. ret = PTR_ERR(first_cdesc);
  208. goto unmap_cache;
  209. }
  210. n_cdesc++;
  211. queued -= cache_len;
  212. if (!queued)
  213. goto send_command;
  214. }
  215. /* Now handle the current ahash request buffer(s) */
  216. req->nents = dma_map_sg(priv->dev, areq->src,
  217. sg_nents_for_len(areq->src, areq->nbytes),
  218. DMA_TO_DEVICE);
  219. if (!req->nents) {
  220. ret = -ENOMEM;
  221. goto cdesc_rollback;
  222. }
  223. for_each_sg(areq->src, sg, req->nents, i) {
  224. int sglen = sg_dma_len(sg);
  225. /* Do not overflow the request */
  226. if (queued - sglen < 0)
  227. sglen = queued;
  228. cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
  229. !(queued - sglen), sg_dma_address(sg),
  230. sglen, len, ctx->base.ctxr_dma);
  231. if (IS_ERR(cdesc)) {
  232. ret = PTR_ERR(cdesc);
  233. goto unmap_sg;
  234. }
  235. n_cdesc++;
  236. if (n_cdesc == 1)
  237. first_cdesc = cdesc;
  238. queued -= sglen;
  239. if (!queued)
  240. break;
  241. }
  242. send_command:
  243. /* Setup the context options */
  244. safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
  245. crypto_ahash_blocksize(ahash));
  246. /* Add the token */
  247. safexcel_hash_token(first_cdesc, len, req->state_sz);
  248. req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
  249. DMA_FROM_DEVICE);
  250. if (dma_mapping_error(priv->dev, req->result_dma)) {
  251. ret = -EINVAL;
  252. goto unmap_sg;
  253. }
  254. /* Add a result descriptor */
  255. rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
  256. req->state_sz);
  257. if (IS_ERR(rdesc)) {
  258. ret = PTR_ERR(rdesc);
  259. goto unmap_result;
  260. }
  261. spin_unlock_bh(&priv->ring[ring].egress_lock);
  262. req->processed += len;
  263. request->req = &areq->base;
  264. *commands = n_cdesc;
  265. *results = 1;
  266. return 0;
  267. unmap_result:
  268. dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
  269. DMA_FROM_DEVICE);
  270. unmap_sg:
  271. dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
  272. cdesc_rollback:
  273. for (i = 0; i < n_cdesc; i++)
  274. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  275. unmap_cache:
  276. if (req->cache_dma) {
  277. dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
  278. DMA_TO_DEVICE);
  279. req->cache_sz = 0;
  280. }
  281. spin_unlock_bh(&priv->ring[ring].egress_lock);
  282. return ret;
  283. }
  284. static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
  285. {
  286. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  287. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  288. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  289. unsigned int state_w_sz = req->state_sz / sizeof(u32);
  290. int i;
  291. for (i = 0; i < state_w_sz; i++)
  292. if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
  293. return true;
  294. if (ctx->base.ctxr->data[state_w_sz] !=
  295. cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
  296. return true;
  297. return false;
  298. }
  299. static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
  300. int ring,
  301. struct crypto_async_request *async,
  302. bool *should_complete, int *ret)
  303. {
  304. struct safexcel_result_desc *rdesc;
  305. struct ahash_request *areq = ahash_request_cast(async);
  306. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  307. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
  308. int enq_ret;
  309. *ret = 0;
  310. spin_lock_bh(&priv->ring[ring].egress_lock);
  311. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  312. if (IS_ERR(rdesc)) {
  313. dev_err(priv->dev,
  314. "hash: invalidate: could not retrieve the result descriptor\n");
  315. *ret = PTR_ERR(rdesc);
  316. } else if (rdesc->result_data.error_code) {
  317. dev_err(priv->dev,
  318. "hash: invalidate: result descriptor error (%d)\n",
  319. rdesc->result_data.error_code);
  320. *ret = -EINVAL;
  321. }
  322. safexcel_complete(priv, ring);
  323. spin_unlock_bh(&priv->ring[ring].egress_lock);
  324. if (ctx->base.exit_inv) {
  325. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  326. ctx->base.ctxr_dma);
  327. *should_complete = true;
  328. return 1;
  329. }
  330. ring = safexcel_select_ring(priv);
  331. ctx->base.ring = ring;
  332. spin_lock_bh(&priv->ring[ring].queue_lock);
  333. enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
  334. spin_unlock_bh(&priv->ring[ring].queue_lock);
  335. if (enq_ret != -EINPROGRESS)
  336. *ret = enq_ret;
  337. queue_work(priv->ring[ring].workqueue,
  338. &priv->ring[ring].work_data.work);
  339. *should_complete = false;
  340. return 1;
  341. }
  342. static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
  343. struct crypto_async_request *async,
  344. bool *should_complete, int *ret)
  345. {
  346. struct ahash_request *areq = ahash_request_cast(async);
  347. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  348. int err;
  349. BUG_ON(priv->version == EIP97 && req->needs_inv);
  350. if (req->needs_inv) {
  351. req->needs_inv = false;
  352. err = safexcel_handle_inv_result(priv, ring, async,
  353. should_complete, ret);
  354. } else {
  355. err = safexcel_handle_req_result(priv, ring, async,
  356. should_complete, ret);
  357. }
  358. return err;
  359. }
  360. static int safexcel_ahash_send_inv(struct crypto_async_request *async,
  361. int ring, struct safexcel_request *request,
  362. int *commands, int *results)
  363. {
  364. struct ahash_request *areq = ahash_request_cast(async);
  365. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  366. int ret;
  367. ret = safexcel_invalidate_cache(async, ctx->priv,
  368. ctx->base.ctxr_dma, ring, request);
  369. if (unlikely(ret))
  370. return ret;
  371. *commands = 1;
  372. *results = 1;
  373. return 0;
  374. }
  375. static int safexcel_ahash_send(struct crypto_async_request *async,
  376. int ring, struct safexcel_request *request,
  377. int *commands, int *results)
  378. {
  379. struct ahash_request *areq = ahash_request_cast(async);
  380. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  381. int ret;
  382. if (req->needs_inv)
  383. ret = safexcel_ahash_send_inv(async, ring, request,
  384. commands, results);
  385. else
  386. ret = safexcel_ahash_send_req(async, ring, request,
  387. commands, results);
  388. return ret;
  389. }
  390. static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
  391. {
  392. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
  393. struct safexcel_crypto_priv *priv = ctx->priv;
  394. AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
  395. struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
  396. struct safexcel_inv_result result = {};
  397. int ring = ctx->base.ring;
  398. memset(req, 0, sizeof(struct ahash_request));
  399. /* create invalidation request */
  400. init_completion(&result.completion);
  401. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  402. safexcel_inv_complete, &result);
  403. ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
  404. ctx = crypto_tfm_ctx(req->base.tfm);
  405. ctx->base.exit_inv = true;
  406. rctx->needs_inv = true;
  407. spin_lock_bh(&priv->ring[ring].queue_lock);
  408. crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
  409. spin_unlock_bh(&priv->ring[ring].queue_lock);
  410. queue_work(priv->ring[ring].workqueue,
  411. &priv->ring[ring].work_data.work);
  412. wait_for_completion(&result.completion);
  413. if (result.error) {
  414. dev_warn(priv->dev, "hash: completion error (%d)\n",
  415. result.error);
  416. return result.error;
  417. }
  418. return 0;
  419. }
  420. /* safexcel_ahash_cache: cache data until at least one request can be sent to
  421. * the engine, aka. when there is at least 1 block size in the pipe.
  422. */
  423. static int safexcel_ahash_cache(struct ahash_request *areq)
  424. {
  425. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  426. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  427. int queued, cache_len;
  428. /* cache_len: everyting accepted by the driver but not sent yet,
  429. * tot sz handled by update() - last req sz - tot sz handled by send()
  430. */
  431. cache_len = req->len - areq->nbytes - req->processed;
  432. /* queued: everything accepted by the driver which will be handled by
  433. * the next send() calls.
  434. * tot sz handled by update() - tot sz handled by send()
  435. */
  436. queued = req->len - req->processed;
  437. /*
  438. * In case there isn't enough bytes to proceed (less than a
  439. * block size), cache the data until we have enough.
  440. */
  441. if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
  442. sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  443. req->cache + cache_len,
  444. areq->nbytes, 0);
  445. return areq->nbytes;
  446. }
  447. /* We couldn't cache all the data */
  448. return -E2BIG;
  449. }
  450. static int safexcel_ahash_enqueue(struct ahash_request *areq)
  451. {
  452. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  453. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  454. struct safexcel_crypto_priv *priv = ctx->priv;
  455. int ret, ring;
  456. req->needs_inv = false;
  457. if (ctx->base.ctxr) {
  458. if (priv->version == EIP197 &&
  459. !ctx->base.needs_inv && req->processed &&
  460. req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
  461. /* We're still setting needs_inv here, even though it is
  462. * cleared right away, because the needs_inv flag can be
  463. * set in other functions and we want to keep the same
  464. * logic.
  465. */
  466. ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
  467. if (ctx->base.needs_inv) {
  468. ctx->base.needs_inv = false;
  469. req->needs_inv = true;
  470. }
  471. } else {
  472. ctx->base.ring = safexcel_select_ring(priv);
  473. ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
  474. EIP197_GFP_FLAGS(areq->base),
  475. &ctx->base.ctxr_dma);
  476. if (!ctx->base.ctxr)
  477. return -ENOMEM;
  478. }
  479. ring = ctx->base.ring;
  480. spin_lock_bh(&priv->ring[ring].queue_lock);
  481. ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
  482. spin_unlock_bh(&priv->ring[ring].queue_lock);
  483. queue_work(priv->ring[ring].workqueue,
  484. &priv->ring[ring].work_data.work);
  485. return ret;
  486. }
  487. static int safexcel_ahash_update(struct ahash_request *areq)
  488. {
  489. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  490. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  491. /* If the request is 0 length, do nothing */
  492. if (!areq->nbytes)
  493. return 0;
  494. req->len += areq->nbytes;
  495. safexcel_ahash_cache(areq);
  496. /*
  497. * We're not doing partial updates when performing an hmac request.
  498. * Everything will be handled by the final() call.
  499. */
  500. if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
  501. return 0;
  502. if (req->hmac)
  503. return safexcel_ahash_enqueue(areq);
  504. if (!req->last_req &&
  505. req->len - req->processed > crypto_ahash_blocksize(ahash))
  506. return safexcel_ahash_enqueue(areq);
  507. return 0;
  508. }
  509. static int safexcel_ahash_final(struct ahash_request *areq)
  510. {
  511. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  512. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  513. req->last_req = true;
  514. req->finish = true;
  515. /* If we have an overall 0 length request */
  516. if (!(req->len + areq->nbytes)) {
  517. if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
  518. memcpy(areq->result, sha1_zero_message_hash,
  519. SHA1_DIGEST_SIZE);
  520. else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
  521. memcpy(areq->result, sha224_zero_message_hash,
  522. SHA224_DIGEST_SIZE);
  523. else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
  524. memcpy(areq->result, sha256_zero_message_hash,
  525. SHA256_DIGEST_SIZE);
  526. return 0;
  527. }
  528. return safexcel_ahash_enqueue(areq);
  529. }
  530. static int safexcel_ahash_finup(struct ahash_request *areq)
  531. {
  532. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  533. req->last_req = true;
  534. req->finish = true;
  535. safexcel_ahash_update(areq);
  536. return safexcel_ahash_final(areq);
  537. }
  538. static int safexcel_ahash_export(struct ahash_request *areq, void *out)
  539. {
  540. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  541. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  542. struct safexcel_ahash_export_state *export = out;
  543. export->len = req->len;
  544. export->processed = req->processed;
  545. export->digest = req->digest;
  546. memcpy(export->state, req->state, req->state_sz);
  547. memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
  548. return 0;
  549. }
  550. static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
  551. {
  552. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  553. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  554. const struct safexcel_ahash_export_state *export = in;
  555. int ret;
  556. ret = crypto_ahash_init(areq);
  557. if (ret)
  558. return ret;
  559. req->len = export->len;
  560. req->processed = export->processed;
  561. req->digest = export->digest;
  562. memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
  563. memcpy(req->state, export->state, req->state_sz);
  564. return 0;
  565. }
  566. static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
  567. {
  568. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
  569. struct safexcel_alg_template *tmpl =
  570. container_of(__crypto_ahash_alg(tfm->__crt_alg),
  571. struct safexcel_alg_template, alg.ahash);
  572. ctx->priv = tmpl->priv;
  573. ctx->base.send = safexcel_ahash_send;
  574. ctx->base.handle_result = safexcel_handle_result;
  575. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  576. sizeof(struct safexcel_ahash_req));
  577. return 0;
  578. }
  579. static int safexcel_sha1_init(struct ahash_request *areq)
  580. {
  581. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  582. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  583. memset(req, 0, sizeof(*req));
  584. req->state[0] = SHA1_H0;
  585. req->state[1] = SHA1_H1;
  586. req->state[2] = SHA1_H2;
  587. req->state[3] = SHA1_H3;
  588. req->state[4] = SHA1_H4;
  589. ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
  590. req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
  591. req->state_sz = SHA1_DIGEST_SIZE;
  592. return 0;
  593. }
  594. static int safexcel_sha1_digest(struct ahash_request *areq)
  595. {
  596. int ret = safexcel_sha1_init(areq);
  597. if (ret)
  598. return ret;
  599. return safexcel_ahash_finup(areq);
  600. }
  601. static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
  602. {
  603. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
  604. struct safexcel_crypto_priv *priv = ctx->priv;
  605. int ret;
  606. /* context not allocated, skip invalidation */
  607. if (!ctx->base.ctxr)
  608. return;
  609. if (priv->version == EIP197) {
  610. ret = safexcel_ahash_exit_inv(tfm);
  611. if (ret)
  612. dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
  613. } else {
  614. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  615. ctx->base.ctxr_dma);
  616. }
  617. }
  618. struct safexcel_alg_template safexcel_alg_sha1 = {
  619. .type = SAFEXCEL_ALG_TYPE_AHASH,
  620. .alg.ahash = {
  621. .init = safexcel_sha1_init,
  622. .update = safexcel_ahash_update,
  623. .final = safexcel_ahash_final,
  624. .finup = safexcel_ahash_finup,
  625. .digest = safexcel_sha1_digest,
  626. .export = safexcel_ahash_export,
  627. .import = safexcel_ahash_import,
  628. .halg = {
  629. .digestsize = SHA1_DIGEST_SIZE,
  630. .statesize = sizeof(struct safexcel_ahash_export_state),
  631. .base = {
  632. .cra_name = "sha1",
  633. .cra_driver_name = "safexcel-sha1",
  634. .cra_priority = 300,
  635. .cra_flags = CRYPTO_ALG_ASYNC |
  636. CRYPTO_ALG_KERN_DRIVER_ONLY,
  637. .cra_blocksize = SHA1_BLOCK_SIZE,
  638. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  639. .cra_init = safexcel_ahash_cra_init,
  640. .cra_exit = safexcel_ahash_cra_exit,
  641. .cra_module = THIS_MODULE,
  642. },
  643. },
  644. },
  645. };
  646. static int safexcel_hmac_sha1_init(struct ahash_request *areq)
  647. {
  648. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  649. safexcel_sha1_init(areq);
  650. req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
  651. return 0;
  652. }
  653. static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
  654. {
  655. int ret = safexcel_hmac_sha1_init(areq);
  656. if (ret)
  657. return ret;
  658. return safexcel_ahash_finup(areq);
  659. }
  660. struct safexcel_ahash_result {
  661. struct completion completion;
  662. int error;
  663. };
  664. static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
  665. {
  666. struct safexcel_ahash_result *result = req->data;
  667. if (error == -EINPROGRESS)
  668. return;
  669. result->error = error;
  670. complete(&result->completion);
  671. }
  672. static int safexcel_hmac_init_pad(struct ahash_request *areq,
  673. unsigned int blocksize, const u8 *key,
  674. unsigned int keylen, u8 *ipad, u8 *opad)
  675. {
  676. struct safexcel_ahash_result result;
  677. struct scatterlist sg;
  678. int ret, i;
  679. u8 *keydup;
  680. if (keylen <= blocksize) {
  681. memcpy(ipad, key, keylen);
  682. } else {
  683. keydup = kmemdup(key, keylen, GFP_KERNEL);
  684. if (!keydup)
  685. return -ENOMEM;
  686. ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
  687. safexcel_ahash_complete, &result);
  688. sg_init_one(&sg, keydup, keylen);
  689. ahash_request_set_crypt(areq, &sg, ipad, keylen);
  690. init_completion(&result.completion);
  691. ret = crypto_ahash_digest(areq);
  692. if (ret == -EINPROGRESS || ret == -EBUSY) {
  693. wait_for_completion_interruptible(&result.completion);
  694. ret = result.error;
  695. }
  696. /* Avoid leaking */
  697. memzero_explicit(keydup, keylen);
  698. kfree(keydup);
  699. if (ret)
  700. return ret;
  701. keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
  702. }
  703. memset(ipad + keylen, 0, blocksize - keylen);
  704. memcpy(opad, ipad, blocksize);
  705. for (i = 0; i < blocksize; i++) {
  706. ipad[i] ^= HMAC_IPAD_VALUE;
  707. opad[i] ^= HMAC_OPAD_VALUE;
  708. }
  709. return 0;
  710. }
  711. static int safexcel_hmac_init_iv(struct ahash_request *areq,
  712. unsigned int blocksize, u8 *pad, void *state)
  713. {
  714. struct safexcel_ahash_result result;
  715. struct safexcel_ahash_req *req;
  716. struct scatterlist sg;
  717. int ret;
  718. ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
  719. safexcel_ahash_complete, &result);
  720. sg_init_one(&sg, pad, blocksize);
  721. ahash_request_set_crypt(areq, &sg, pad, blocksize);
  722. init_completion(&result.completion);
  723. ret = crypto_ahash_init(areq);
  724. if (ret)
  725. return ret;
  726. req = ahash_request_ctx(areq);
  727. req->hmac = true;
  728. req->last_req = true;
  729. ret = crypto_ahash_update(areq);
  730. if (ret && ret != -EINPROGRESS && ret != -EBUSY)
  731. return ret;
  732. wait_for_completion_interruptible(&result.completion);
  733. if (result.error)
  734. return result.error;
  735. return crypto_ahash_export(areq, state);
  736. }
  737. static int safexcel_hmac_setkey(const char *alg, const u8 *key,
  738. unsigned int keylen, void *istate, void *ostate)
  739. {
  740. struct ahash_request *areq;
  741. struct crypto_ahash *tfm;
  742. unsigned int blocksize;
  743. u8 *ipad, *opad;
  744. int ret;
  745. tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
  746. CRYPTO_ALG_TYPE_AHASH_MASK);
  747. if (IS_ERR(tfm))
  748. return PTR_ERR(tfm);
  749. areq = ahash_request_alloc(tfm, GFP_KERNEL);
  750. if (!areq) {
  751. ret = -ENOMEM;
  752. goto free_ahash;
  753. }
  754. crypto_ahash_clear_flags(tfm, ~0);
  755. blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  756. ipad = kzalloc(2 * blocksize, GFP_KERNEL);
  757. if (!ipad) {
  758. ret = -ENOMEM;
  759. goto free_request;
  760. }
  761. opad = ipad + blocksize;
  762. ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
  763. if (ret)
  764. goto free_ipad;
  765. ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
  766. if (ret)
  767. goto free_ipad;
  768. ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
  769. free_ipad:
  770. kfree(ipad);
  771. free_request:
  772. ahash_request_free(areq);
  773. free_ahash:
  774. crypto_free_ahash(tfm);
  775. return ret;
  776. }
  777. static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
  778. unsigned int keylen, const char *alg,
  779. unsigned int state_sz)
  780. {
  781. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
  782. struct safexcel_crypto_priv *priv = ctx->priv;
  783. struct safexcel_ahash_export_state istate, ostate;
  784. int ret, i;
  785. ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
  786. if (ret)
  787. return ret;
  788. if (priv->version == EIP197 && ctx->base.ctxr) {
  789. for (i = 0; i < state_sz / sizeof(u32); i++) {
  790. if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
  791. ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
  792. ctx->base.needs_inv = true;
  793. break;
  794. }
  795. }
  796. }
  797. memcpy(ctx->ipad, &istate.state, state_sz);
  798. memcpy(ctx->opad, &ostate.state, state_sz);
  799. return 0;
  800. }
  801. static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
  802. unsigned int keylen)
  803. {
  804. return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
  805. SHA1_DIGEST_SIZE);
  806. }
  807. struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
  808. .type = SAFEXCEL_ALG_TYPE_AHASH,
  809. .alg.ahash = {
  810. .init = safexcel_hmac_sha1_init,
  811. .update = safexcel_ahash_update,
  812. .final = safexcel_ahash_final,
  813. .finup = safexcel_ahash_finup,
  814. .digest = safexcel_hmac_sha1_digest,
  815. .setkey = safexcel_hmac_sha1_setkey,
  816. .export = safexcel_ahash_export,
  817. .import = safexcel_ahash_import,
  818. .halg = {
  819. .digestsize = SHA1_DIGEST_SIZE,
  820. .statesize = sizeof(struct safexcel_ahash_export_state),
  821. .base = {
  822. .cra_name = "hmac(sha1)",
  823. .cra_driver_name = "safexcel-hmac-sha1",
  824. .cra_priority = 300,
  825. .cra_flags = CRYPTO_ALG_ASYNC |
  826. CRYPTO_ALG_KERN_DRIVER_ONLY,
  827. .cra_blocksize = SHA1_BLOCK_SIZE,
  828. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  829. .cra_init = safexcel_ahash_cra_init,
  830. .cra_exit = safexcel_ahash_cra_exit,
  831. .cra_module = THIS_MODULE,
  832. },
  833. },
  834. },
  835. };
  836. static int safexcel_sha256_init(struct ahash_request *areq)
  837. {
  838. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  839. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  840. memset(req, 0, sizeof(*req));
  841. req->state[0] = SHA256_H0;
  842. req->state[1] = SHA256_H1;
  843. req->state[2] = SHA256_H2;
  844. req->state[3] = SHA256_H3;
  845. req->state[4] = SHA256_H4;
  846. req->state[5] = SHA256_H5;
  847. req->state[6] = SHA256_H6;
  848. req->state[7] = SHA256_H7;
  849. ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
  850. req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
  851. req->state_sz = SHA256_DIGEST_SIZE;
  852. return 0;
  853. }
  854. static int safexcel_sha256_digest(struct ahash_request *areq)
  855. {
  856. int ret = safexcel_sha256_init(areq);
  857. if (ret)
  858. return ret;
  859. return safexcel_ahash_finup(areq);
  860. }
  861. struct safexcel_alg_template safexcel_alg_sha256 = {
  862. .type = SAFEXCEL_ALG_TYPE_AHASH,
  863. .alg.ahash = {
  864. .init = safexcel_sha256_init,
  865. .update = safexcel_ahash_update,
  866. .final = safexcel_ahash_final,
  867. .finup = safexcel_ahash_finup,
  868. .digest = safexcel_sha256_digest,
  869. .export = safexcel_ahash_export,
  870. .import = safexcel_ahash_import,
  871. .halg = {
  872. .digestsize = SHA256_DIGEST_SIZE,
  873. .statesize = sizeof(struct safexcel_ahash_export_state),
  874. .base = {
  875. .cra_name = "sha256",
  876. .cra_driver_name = "safexcel-sha256",
  877. .cra_priority = 300,
  878. .cra_flags = CRYPTO_ALG_ASYNC |
  879. CRYPTO_ALG_KERN_DRIVER_ONLY,
  880. .cra_blocksize = SHA256_BLOCK_SIZE,
  881. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  882. .cra_init = safexcel_ahash_cra_init,
  883. .cra_exit = safexcel_ahash_cra_exit,
  884. .cra_module = THIS_MODULE,
  885. },
  886. },
  887. },
  888. };
  889. static int safexcel_sha224_init(struct ahash_request *areq)
  890. {
  891. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  892. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  893. memset(req, 0, sizeof(*req));
  894. req->state[0] = SHA224_H0;
  895. req->state[1] = SHA224_H1;
  896. req->state[2] = SHA224_H2;
  897. req->state[3] = SHA224_H3;
  898. req->state[4] = SHA224_H4;
  899. req->state[5] = SHA224_H5;
  900. req->state[6] = SHA224_H6;
  901. req->state[7] = SHA224_H7;
  902. ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
  903. req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
  904. req->state_sz = SHA256_DIGEST_SIZE;
  905. return 0;
  906. }
  907. static int safexcel_sha224_digest(struct ahash_request *areq)
  908. {
  909. int ret = safexcel_sha224_init(areq);
  910. if (ret)
  911. return ret;
  912. return safexcel_ahash_finup(areq);
  913. }
  914. struct safexcel_alg_template safexcel_alg_sha224 = {
  915. .type = SAFEXCEL_ALG_TYPE_AHASH,
  916. .alg.ahash = {
  917. .init = safexcel_sha224_init,
  918. .update = safexcel_ahash_update,
  919. .final = safexcel_ahash_final,
  920. .finup = safexcel_ahash_finup,
  921. .digest = safexcel_sha224_digest,
  922. .export = safexcel_ahash_export,
  923. .import = safexcel_ahash_import,
  924. .halg = {
  925. .digestsize = SHA224_DIGEST_SIZE,
  926. .statesize = sizeof(struct safexcel_ahash_export_state),
  927. .base = {
  928. .cra_name = "sha224",
  929. .cra_driver_name = "safexcel-sha224",
  930. .cra_priority = 300,
  931. .cra_flags = CRYPTO_ALG_ASYNC |
  932. CRYPTO_ALG_KERN_DRIVER_ONLY,
  933. .cra_blocksize = SHA224_BLOCK_SIZE,
  934. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  935. .cra_init = safexcel_ahash_cra_init,
  936. .cra_exit = safexcel_ahash_cra_exit,
  937. .cra_module = THIS_MODULE,
  938. },
  939. },
  940. },
  941. };
  942. static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
  943. unsigned int keylen)
  944. {
  945. return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
  946. SHA256_DIGEST_SIZE);
  947. }
  948. static int safexcel_hmac_sha224_init(struct ahash_request *areq)
  949. {
  950. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  951. safexcel_sha224_init(areq);
  952. req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
  953. return 0;
  954. }
  955. static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
  956. {
  957. int ret = safexcel_hmac_sha224_init(areq);
  958. if (ret)
  959. return ret;
  960. return safexcel_ahash_finup(areq);
  961. }
  962. struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
  963. .type = SAFEXCEL_ALG_TYPE_AHASH,
  964. .alg.ahash = {
  965. .init = safexcel_hmac_sha224_init,
  966. .update = safexcel_ahash_update,
  967. .final = safexcel_ahash_final,
  968. .finup = safexcel_ahash_finup,
  969. .digest = safexcel_hmac_sha224_digest,
  970. .setkey = safexcel_hmac_sha224_setkey,
  971. .export = safexcel_ahash_export,
  972. .import = safexcel_ahash_import,
  973. .halg = {
  974. .digestsize = SHA224_DIGEST_SIZE,
  975. .statesize = sizeof(struct safexcel_ahash_export_state),
  976. .base = {
  977. .cra_name = "hmac(sha224)",
  978. .cra_driver_name = "safexcel-hmac-sha224",
  979. .cra_priority = 300,
  980. .cra_flags = CRYPTO_ALG_ASYNC |
  981. CRYPTO_ALG_KERN_DRIVER_ONLY,
  982. .cra_blocksize = SHA224_BLOCK_SIZE,
  983. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  984. .cra_init = safexcel_ahash_cra_init,
  985. .cra_exit = safexcel_ahash_cra_exit,
  986. .cra_module = THIS_MODULE,
  987. },
  988. },
  989. },
  990. };
  991. static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
  992. unsigned int keylen)
  993. {
  994. return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
  995. SHA256_DIGEST_SIZE);
  996. }
  997. static int safexcel_hmac_sha256_init(struct ahash_request *areq)
  998. {
  999. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  1000. safexcel_sha256_init(areq);
  1001. req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
  1002. return 0;
  1003. }
  1004. static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
  1005. {
  1006. int ret = safexcel_hmac_sha256_init(areq);
  1007. if (ret)
  1008. return ret;
  1009. return safexcel_ahash_finup(areq);
  1010. }
  1011. struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
  1012. .type = SAFEXCEL_ALG_TYPE_AHASH,
  1013. .alg.ahash = {
  1014. .init = safexcel_hmac_sha256_init,
  1015. .update = safexcel_ahash_update,
  1016. .final = safexcel_ahash_final,
  1017. .finup = safexcel_ahash_finup,
  1018. .digest = safexcel_hmac_sha256_digest,
  1019. .setkey = safexcel_hmac_sha256_setkey,
  1020. .export = safexcel_ahash_export,
  1021. .import = safexcel_ahash_import,
  1022. .halg = {
  1023. .digestsize = SHA256_DIGEST_SIZE,
  1024. .statesize = sizeof(struct safexcel_ahash_export_state),
  1025. .base = {
  1026. .cra_name = "hmac(sha256)",
  1027. .cra_driver_name = "safexcel-hmac-sha256",
  1028. .cra_priority = 300,
  1029. .cra_flags = CRYPTO_ALG_ASYNC |
  1030. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1031. .cra_blocksize = SHA256_BLOCK_SIZE,
  1032. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  1033. .cra_init = safexcel_ahash_cra_init,
  1034. .cra_exit = safexcel_ahash_cra_exit,
  1035. .cra_module = THIS_MODULE,
  1036. },
  1037. },
  1038. },
  1039. };