safexcel_hash.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <crypto/hmac.h>
  11. #include <crypto/sha.h>
  12. #include <linux/device.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dmapool.h>
  15. #include "safexcel.h"
  16. struct safexcel_ahash_ctx {
  17. struct safexcel_context base;
  18. struct safexcel_crypto_priv *priv;
  19. u32 alg;
  20. u32 digest;
  21. u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
  22. u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
  23. };
  24. struct safexcel_ahash_req {
  25. bool last_req;
  26. bool finish;
  27. bool hmac;
  28. bool needs_inv;
  29. int nents;
  30. dma_addr_t result_dma;
  31. u8 state_sz; /* expected sate size, only set once */
  32. u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
  33. u64 len;
  34. u64 processed;
  35. u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
  36. dma_addr_t cache_dma;
  37. unsigned int cache_sz;
  38. u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
  39. };
  40. struct safexcel_ahash_export_state {
  41. u64 len;
  42. u64 processed;
  43. u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
  44. u8 cache[SHA256_BLOCK_SIZE];
  45. };
  46. static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
  47. u32 input_length, u32 result_length)
  48. {
  49. struct safexcel_token *token =
  50. (struct safexcel_token *)cdesc->control_data.token;
  51. token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
  52. token[0].packet_length = input_length;
  53. token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
  54. token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
  55. token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
  56. token[1].packet_length = result_length;
  57. token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
  58. EIP197_TOKEN_STAT_LAST_PACKET;
  59. token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
  60. EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
  61. }
  62. static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
  63. struct safexcel_ahash_req *req,
  64. struct safexcel_command_desc *cdesc,
  65. unsigned int digestsize,
  66. unsigned int blocksize)
  67. {
  68. int i;
  69. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
  70. cdesc->control_data.control0 |= ctx->alg;
  71. cdesc->control_data.control0 |= ctx->digest;
  72. if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
  73. if (req->processed) {
  74. if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
  75. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
  76. else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
  77. ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
  78. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
  79. cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
  80. } else {
  81. cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
  82. }
  83. if (!req->finish)
  84. cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
  85. /*
  86. * Copy the input digest if needed, and setup the context
  87. * fields. Do this now as we need it to setup the first command
  88. * descriptor.
  89. */
  90. if (req->processed) {
  91. for (i = 0; i < digestsize / sizeof(u32); i++)
  92. ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
  93. if (req->finish)
  94. ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
  95. }
  96. } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
  97. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
  98. memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
  99. memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
  100. ctx->opad, digestsize);
  101. }
  102. }
  103. static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
  104. struct crypto_async_request *async,
  105. bool *should_complete, int *ret)
  106. {
  107. struct safexcel_result_desc *rdesc;
  108. struct ahash_request *areq = ahash_request_cast(async);
  109. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  110. struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
  111. int cache_len;
  112. *ret = 0;
  113. spin_lock_bh(&priv->ring[ring].egress_lock);
  114. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  115. if (IS_ERR(rdesc)) {
  116. dev_err(priv->dev,
  117. "hash: result: could not retrieve the result descriptor\n");
  118. *ret = PTR_ERR(rdesc);
  119. } else if (rdesc->result_data.error_code) {
  120. dev_err(priv->dev,
  121. "hash: result: result descriptor error (%d)\n",
  122. rdesc->result_data.error_code);
  123. *ret = -EINVAL;
  124. }
  125. safexcel_complete(priv, ring);
  126. spin_unlock_bh(&priv->ring[ring].egress_lock);
  127. if (sreq->finish)
  128. memcpy(areq->result, sreq->state,
  129. crypto_ahash_digestsize(ahash));
  130. if (sreq->nents) {
  131. dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
  132. sreq->nents = 0;
  133. }
  134. if (sreq->result_dma) {
  135. dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
  136. DMA_FROM_DEVICE);
  137. sreq->result_dma = 0;
  138. }
  139. if (sreq->cache_dma) {
  140. dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
  141. DMA_TO_DEVICE);
  142. sreq->cache_dma = 0;
  143. }
  144. cache_len = sreq->len - sreq->processed;
  145. if (cache_len)
  146. memcpy(sreq->cache, sreq->cache_next, cache_len);
  147. *should_complete = true;
  148. return 1;
  149. }
  150. static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
  151. struct safexcel_request *request,
  152. int *commands, int *results)
  153. {
  154. struct ahash_request *areq = ahash_request_cast(async);
  155. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  156. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  157. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  158. struct safexcel_crypto_priv *priv = ctx->priv;
  159. struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
  160. struct safexcel_result_desc *rdesc;
  161. struct scatterlist *sg;
  162. int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
  163. queued = len = req->len - req->processed;
  164. if (queued <= crypto_ahash_blocksize(ahash))
  165. cache_len = queued;
  166. else
  167. cache_len = queued - areq->nbytes;
  168. if (!req->last_req) {
  169. /* If this is not the last request and the queued data does not
  170. * fit into full blocks, cache it for the next send() call.
  171. */
  172. extra = queued & (crypto_ahash_blocksize(ahash) - 1);
  173. if (!extra)
  174. /* If this is not the last request and the queued data
  175. * is a multiple of a block, cache the last one for now.
  176. */
  177. extra = crypto_ahash_blocksize(ahash);
  178. if (extra) {
  179. sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  180. req->cache_next, extra,
  181. areq->nbytes - extra);
  182. queued -= extra;
  183. len -= extra;
  184. if (!queued) {
  185. *commands = 0;
  186. *results = 0;
  187. return 0;
  188. }
  189. }
  190. }
  191. spin_lock_bh(&priv->ring[ring].egress_lock);
  192. /* Add a command descriptor for the cached data, if any */
  193. if (cache_len) {
  194. req->cache_dma = dma_map_single(priv->dev, req->cache,
  195. cache_len, DMA_TO_DEVICE);
  196. if (dma_mapping_error(priv->dev, req->cache_dma)) {
  197. spin_unlock_bh(&priv->ring[ring].egress_lock);
  198. return -EINVAL;
  199. }
  200. req->cache_sz = cache_len;
  201. first_cdesc = safexcel_add_cdesc(priv, ring, 1,
  202. (cache_len == len),
  203. req->cache_dma, cache_len, len,
  204. ctx->base.ctxr_dma);
  205. if (IS_ERR(first_cdesc)) {
  206. ret = PTR_ERR(first_cdesc);
  207. goto unmap_cache;
  208. }
  209. n_cdesc++;
  210. queued -= cache_len;
  211. if (!queued)
  212. goto send_command;
  213. }
  214. /* Now handle the current ahash request buffer(s) */
  215. req->nents = dma_map_sg(priv->dev, areq->src,
  216. sg_nents_for_len(areq->src, areq->nbytes),
  217. DMA_TO_DEVICE);
  218. if (!req->nents) {
  219. ret = -ENOMEM;
  220. goto cdesc_rollback;
  221. }
  222. for_each_sg(areq->src, sg, req->nents, i) {
  223. int sglen = sg_dma_len(sg);
  224. /* Do not overflow the request */
  225. if (queued - sglen < 0)
  226. sglen = queued;
  227. cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
  228. !(queued - sglen), sg_dma_address(sg),
  229. sglen, len, ctx->base.ctxr_dma);
  230. if (IS_ERR(cdesc)) {
  231. ret = PTR_ERR(cdesc);
  232. goto cdesc_rollback;
  233. }
  234. n_cdesc++;
  235. if (n_cdesc == 1)
  236. first_cdesc = cdesc;
  237. queued -= sglen;
  238. if (!queued)
  239. break;
  240. }
  241. send_command:
  242. /* Setup the context options */
  243. safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
  244. crypto_ahash_blocksize(ahash));
  245. /* Add the token */
  246. safexcel_hash_token(first_cdesc, len, req->state_sz);
  247. req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
  248. DMA_FROM_DEVICE);
  249. if (dma_mapping_error(priv->dev, req->result_dma)) {
  250. ret = -EINVAL;
  251. goto cdesc_rollback;
  252. }
  253. /* Add a result descriptor */
  254. rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
  255. req->state_sz);
  256. if (IS_ERR(rdesc)) {
  257. ret = PTR_ERR(rdesc);
  258. goto unmap_result;
  259. }
  260. spin_unlock_bh(&priv->ring[ring].egress_lock);
  261. req->processed += len;
  262. request->req = &areq->base;
  263. *commands = n_cdesc;
  264. *results = 1;
  265. return 0;
  266. unmap_result:
  267. dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
  268. cdesc_rollback:
  269. for (i = 0; i < n_cdesc; i++)
  270. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  271. unmap_cache:
  272. if (req->cache_dma) {
  273. dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
  274. DMA_TO_DEVICE);
  275. req->cache_sz = 0;
  276. }
  277. spin_unlock_bh(&priv->ring[ring].egress_lock);
  278. return ret;
  279. }
  280. static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
  281. {
  282. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  283. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  284. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  285. unsigned int state_w_sz = req->state_sz / sizeof(u32);
  286. int i;
  287. for (i = 0; i < state_w_sz; i++)
  288. if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
  289. return true;
  290. if (ctx->base.ctxr->data[state_w_sz] !=
  291. cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
  292. return true;
  293. return false;
  294. }
  295. static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
  296. int ring,
  297. struct crypto_async_request *async,
  298. bool *should_complete, int *ret)
  299. {
  300. struct safexcel_result_desc *rdesc;
  301. struct ahash_request *areq = ahash_request_cast(async);
  302. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  303. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
  304. int enq_ret;
  305. *ret = 0;
  306. spin_lock_bh(&priv->ring[ring].egress_lock);
  307. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  308. if (IS_ERR(rdesc)) {
  309. dev_err(priv->dev,
  310. "hash: invalidate: could not retrieve the result descriptor\n");
  311. *ret = PTR_ERR(rdesc);
  312. } else if (rdesc->result_data.error_code) {
  313. dev_err(priv->dev,
  314. "hash: invalidate: result descriptor error (%d)\n",
  315. rdesc->result_data.error_code);
  316. *ret = -EINVAL;
  317. }
  318. safexcel_complete(priv, ring);
  319. spin_unlock_bh(&priv->ring[ring].egress_lock);
  320. if (ctx->base.exit_inv) {
  321. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  322. ctx->base.ctxr_dma);
  323. *should_complete = true;
  324. return 1;
  325. }
  326. ring = safexcel_select_ring(priv);
  327. ctx->base.ring = ring;
  328. spin_lock_bh(&priv->ring[ring].queue_lock);
  329. enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
  330. spin_unlock_bh(&priv->ring[ring].queue_lock);
  331. if (enq_ret != -EINPROGRESS)
  332. *ret = enq_ret;
  333. queue_work(priv->ring[ring].workqueue,
  334. &priv->ring[ring].work_data.work);
  335. *should_complete = false;
  336. return 1;
  337. }
  338. static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
  339. struct crypto_async_request *async,
  340. bool *should_complete, int *ret)
  341. {
  342. struct ahash_request *areq = ahash_request_cast(async);
  343. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  344. int err;
  345. BUG_ON(priv->version == EIP97 && req->needs_inv);
  346. if (req->needs_inv) {
  347. req->needs_inv = false;
  348. err = safexcel_handle_inv_result(priv, ring, async,
  349. should_complete, ret);
  350. } else {
  351. err = safexcel_handle_req_result(priv, ring, async,
  352. should_complete, ret);
  353. }
  354. return err;
  355. }
  356. static int safexcel_ahash_send_inv(struct crypto_async_request *async,
  357. int ring, struct safexcel_request *request,
  358. int *commands, int *results)
  359. {
  360. struct ahash_request *areq = ahash_request_cast(async);
  361. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  362. int ret;
  363. ret = safexcel_invalidate_cache(async, ctx->priv,
  364. ctx->base.ctxr_dma, ring, request);
  365. if (unlikely(ret))
  366. return ret;
  367. *commands = 1;
  368. *results = 1;
  369. return 0;
  370. }
  371. static int safexcel_ahash_send(struct crypto_async_request *async,
  372. int ring, struct safexcel_request *request,
  373. int *commands, int *results)
  374. {
  375. struct ahash_request *areq = ahash_request_cast(async);
  376. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  377. int ret;
  378. if (req->needs_inv)
  379. ret = safexcel_ahash_send_inv(async, ring, request,
  380. commands, results);
  381. else
  382. ret = safexcel_ahash_send_req(async, ring, request,
  383. commands, results);
  384. return ret;
  385. }
  386. static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
  387. {
  388. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
  389. struct safexcel_crypto_priv *priv = ctx->priv;
  390. AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
  391. struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
  392. struct safexcel_inv_result result = {};
  393. int ring = ctx->base.ring;
  394. memset(req, 0, sizeof(struct ahash_request));
  395. /* create invalidation request */
  396. init_completion(&result.completion);
  397. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  398. safexcel_inv_complete, &result);
  399. ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
  400. ctx = crypto_tfm_ctx(req->base.tfm);
  401. ctx->base.exit_inv = true;
  402. rctx->needs_inv = true;
  403. spin_lock_bh(&priv->ring[ring].queue_lock);
  404. crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
  405. spin_unlock_bh(&priv->ring[ring].queue_lock);
  406. queue_work(priv->ring[ring].workqueue,
  407. &priv->ring[ring].work_data.work);
  408. wait_for_completion(&result.completion);
  409. if (result.error) {
  410. dev_warn(priv->dev, "hash: completion error (%d)\n",
  411. result.error);
  412. return result.error;
  413. }
  414. return 0;
  415. }
  416. /* safexcel_ahash_cache: cache data until at least one request can be sent to
  417. * the engine, aka. when there is at least 1 block size in the pipe.
  418. */
  419. static int safexcel_ahash_cache(struct ahash_request *areq)
  420. {
  421. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  422. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  423. int queued, cache_len;
  424. /* cache_len: everyting accepted by the driver but not sent yet,
  425. * tot sz handled by update() - last req sz - tot sz handled by send()
  426. */
  427. cache_len = req->len - areq->nbytes - req->processed;
  428. /* queued: everything accepted by the driver which will be handled by
  429. * the next send() calls.
  430. * tot sz handled by update() - tot sz handled by send()
  431. */
  432. queued = req->len - req->processed;
  433. /*
  434. * In case there isn't enough bytes to proceed (less than a
  435. * block size), cache the data until we have enough.
  436. */
  437. if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
  438. sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  439. req->cache + cache_len,
  440. areq->nbytes, 0);
  441. return areq->nbytes;
  442. }
  443. /* We couldn't cache all the data */
  444. return -E2BIG;
  445. }
  446. static int safexcel_ahash_enqueue(struct ahash_request *areq)
  447. {
  448. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  449. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  450. struct safexcel_crypto_priv *priv = ctx->priv;
  451. int ret, ring;
  452. req->needs_inv = false;
  453. if (ctx->base.ctxr) {
  454. if (priv->version == EIP197 &&
  455. !ctx->base.needs_inv && req->processed &&
  456. ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
  457. /* We're still setting needs_inv here, even though it is
  458. * cleared right away, because the needs_inv flag can be
  459. * set in other functions and we want to keep the same
  460. * logic.
  461. */
  462. ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
  463. if (ctx->base.needs_inv) {
  464. ctx->base.needs_inv = false;
  465. req->needs_inv = true;
  466. }
  467. } else {
  468. ctx->base.ring = safexcel_select_ring(priv);
  469. ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
  470. EIP197_GFP_FLAGS(areq->base),
  471. &ctx->base.ctxr_dma);
  472. if (!ctx->base.ctxr)
  473. return -ENOMEM;
  474. }
  475. ring = ctx->base.ring;
  476. spin_lock_bh(&priv->ring[ring].queue_lock);
  477. ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
  478. spin_unlock_bh(&priv->ring[ring].queue_lock);
  479. queue_work(priv->ring[ring].workqueue,
  480. &priv->ring[ring].work_data.work);
  481. return ret;
  482. }
  483. static int safexcel_ahash_update(struct ahash_request *areq)
  484. {
  485. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  486. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  487. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  488. /* If the request is 0 length, do nothing */
  489. if (!areq->nbytes)
  490. return 0;
  491. req->len += areq->nbytes;
  492. safexcel_ahash_cache(areq);
  493. /*
  494. * We're not doing partial updates when performing an hmac request.
  495. * Everything will be handled by the final() call.
  496. */
  497. if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
  498. return 0;
  499. if (req->hmac)
  500. return safexcel_ahash_enqueue(areq);
  501. if (!req->last_req &&
  502. req->len - req->processed > crypto_ahash_blocksize(ahash))
  503. return safexcel_ahash_enqueue(areq);
  504. return 0;
  505. }
  506. static int safexcel_ahash_final(struct ahash_request *areq)
  507. {
  508. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  509. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  510. req->last_req = true;
  511. req->finish = true;
  512. /* If we have an overall 0 length request */
  513. if (!(req->len + areq->nbytes)) {
  514. if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
  515. memcpy(areq->result, sha1_zero_message_hash,
  516. SHA1_DIGEST_SIZE);
  517. else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
  518. memcpy(areq->result, sha224_zero_message_hash,
  519. SHA224_DIGEST_SIZE);
  520. else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
  521. memcpy(areq->result, sha256_zero_message_hash,
  522. SHA256_DIGEST_SIZE);
  523. return 0;
  524. }
  525. return safexcel_ahash_enqueue(areq);
  526. }
  527. static int safexcel_ahash_finup(struct ahash_request *areq)
  528. {
  529. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  530. req->last_req = true;
  531. req->finish = true;
  532. safexcel_ahash_update(areq);
  533. return safexcel_ahash_final(areq);
  534. }
  535. static int safexcel_ahash_export(struct ahash_request *areq, void *out)
  536. {
  537. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  538. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  539. struct safexcel_ahash_export_state *export = out;
  540. export->len = req->len;
  541. export->processed = req->processed;
  542. memcpy(export->state, req->state, req->state_sz);
  543. memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
  544. return 0;
  545. }
  546. static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
  547. {
  548. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  549. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  550. const struct safexcel_ahash_export_state *export = in;
  551. int ret;
  552. ret = crypto_ahash_init(areq);
  553. if (ret)
  554. return ret;
  555. req->len = export->len;
  556. req->processed = export->processed;
  557. memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
  558. memcpy(req->state, export->state, req->state_sz);
  559. return 0;
  560. }
  561. static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
  562. {
  563. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
  564. struct safexcel_alg_template *tmpl =
  565. container_of(__crypto_ahash_alg(tfm->__crt_alg),
  566. struct safexcel_alg_template, alg.ahash);
  567. ctx->priv = tmpl->priv;
  568. ctx->base.send = safexcel_ahash_send;
  569. ctx->base.handle_result = safexcel_handle_result;
  570. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  571. sizeof(struct safexcel_ahash_req));
  572. return 0;
  573. }
  574. static int safexcel_sha1_init(struct ahash_request *areq)
  575. {
  576. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  577. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  578. memset(req, 0, sizeof(*req));
  579. req->state[0] = SHA1_H0;
  580. req->state[1] = SHA1_H1;
  581. req->state[2] = SHA1_H2;
  582. req->state[3] = SHA1_H3;
  583. req->state[4] = SHA1_H4;
  584. ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
  585. ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
  586. req->state_sz = SHA1_DIGEST_SIZE;
  587. return 0;
  588. }
  589. static int safexcel_sha1_digest(struct ahash_request *areq)
  590. {
  591. int ret = safexcel_sha1_init(areq);
  592. if (ret)
  593. return ret;
  594. return safexcel_ahash_finup(areq);
  595. }
  596. static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
  597. {
  598. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
  599. struct safexcel_crypto_priv *priv = ctx->priv;
  600. int ret;
  601. /* context not allocated, skip invalidation */
  602. if (!ctx->base.ctxr)
  603. return;
  604. if (priv->version == EIP197) {
  605. ret = safexcel_ahash_exit_inv(tfm);
  606. if (ret)
  607. dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
  608. } else {
  609. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  610. ctx->base.ctxr_dma);
  611. }
  612. }
  613. struct safexcel_alg_template safexcel_alg_sha1 = {
  614. .type = SAFEXCEL_ALG_TYPE_AHASH,
  615. .alg.ahash = {
  616. .init = safexcel_sha1_init,
  617. .update = safexcel_ahash_update,
  618. .final = safexcel_ahash_final,
  619. .finup = safexcel_ahash_finup,
  620. .digest = safexcel_sha1_digest,
  621. .export = safexcel_ahash_export,
  622. .import = safexcel_ahash_import,
  623. .halg = {
  624. .digestsize = SHA1_DIGEST_SIZE,
  625. .statesize = sizeof(struct safexcel_ahash_export_state),
  626. .base = {
  627. .cra_name = "sha1",
  628. .cra_driver_name = "safexcel-sha1",
  629. .cra_priority = 300,
  630. .cra_flags = CRYPTO_ALG_ASYNC |
  631. CRYPTO_ALG_KERN_DRIVER_ONLY,
  632. .cra_blocksize = SHA1_BLOCK_SIZE,
  633. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  634. .cra_init = safexcel_ahash_cra_init,
  635. .cra_exit = safexcel_ahash_cra_exit,
  636. .cra_module = THIS_MODULE,
  637. },
  638. },
  639. },
  640. };
  641. static int safexcel_hmac_sha1_init(struct ahash_request *areq)
  642. {
  643. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  644. safexcel_sha1_init(areq);
  645. ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
  646. return 0;
  647. }
  648. static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
  649. {
  650. int ret = safexcel_hmac_sha1_init(areq);
  651. if (ret)
  652. return ret;
  653. return safexcel_ahash_finup(areq);
  654. }
  655. struct safexcel_ahash_result {
  656. struct completion completion;
  657. int error;
  658. };
  659. static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
  660. {
  661. struct safexcel_ahash_result *result = req->data;
  662. if (error == -EINPROGRESS)
  663. return;
  664. result->error = error;
  665. complete(&result->completion);
  666. }
  667. static int safexcel_hmac_init_pad(struct ahash_request *areq,
  668. unsigned int blocksize, const u8 *key,
  669. unsigned int keylen, u8 *ipad, u8 *opad)
  670. {
  671. struct safexcel_ahash_result result;
  672. struct scatterlist sg;
  673. int ret, i;
  674. u8 *keydup;
  675. if (keylen <= blocksize) {
  676. memcpy(ipad, key, keylen);
  677. } else {
  678. keydup = kmemdup(key, keylen, GFP_KERNEL);
  679. if (!keydup)
  680. return -ENOMEM;
  681. ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
  682. safexcel_ahash_complete, &result);
  683. sg_init_one(&sg, keydup, keylen);
  684. ahash_request_set_crypt(areq, &sg, ipad, keylen);
  685. init_completion(&result.completion);
  686. ret = crypto_ahash_digest(areq);
  687. if (ret == -EINPROGRESS || ret == -EBUSY) {
  688. wait_for_completion_interruptible(&result.completion);
  689. ret = result.error;
  690. }
  691. /* Avoid leaking */
  692. memzero_explicit(keydup, keylen);
  693. kfree(keydup);
  694. if (ret)
  695. return ret;
  696. keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
  697. }
  698. memset(ipad + keylen, 0, blocksize - keylen);
  699. memcpy(opad, ipad, blocksize);
  700. for (i = 0; i < blocksize; i++) {
  701. ipad[i] ^= HMAC_IPAD_VALUE;
  702. opad[i] ^= HMAC_OPAD_VALUE;
  703. }
  704. return 0;
  705. }
  706. static int safexcel_hmac_init_iv(struct ahash_request *areq,
  707. unsigned int blocksize, u8 *pad, void *state)
  708. {
  709. struct safexcel_ahash_result result;
  710. struct safexcel_ahash_req *req;
  711. struct scatterlist sg;
  712. int ret;
  713. ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
  714. safexcel_ahash_complete, &result);
  715. sg_init_one(&sg, pad, blocksize);
  716. ahash_request_set_crypt(areq, &sg, pad, blocksize);
  717. init_completion(&result.completion);
  718. ret = crypto_ahash_init(areq);
  719. if (ret)
  720. return ret;
  721. req = ahash_request_ctx(areq);
  722. req->hmac = true;
  723. req->last_req = true;
  724. ret = crypto_ahash_update(areq);
  725. if (ret && ret != -EINPROGRESS && ret != -EBUSY)
  726. return ret;
  727. wait_for_completion_interruptible(&result.completion);
  728. if (result.error)
  729. return result.error;
  730. return crypto_ahash_export(areq, state);
  731. }
  732. static int safexcel_hmac_setkey(const char *alg, const u8 *key,
  733. unsigned int keylen, void *istate, void *ostate)
  734. {
  735. struct ahash_request *areq;
  736. struct crypto_ahash *tfm;
  737. unsigned int blocksize;
  738. u8 *ipad, *opad;
  739. int ret;
  740. tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
  741. CRYPTO_ALG_TYPE_AHASH_MASK);
  742. if (IS_ERR(tfm))
  743. return PTR_ERR(tfm);
  744. areq = ahash_request_alloc(tfm, GFP_KERNEL);
  745. if (!areq) {
  746. ret = -ENOMEM;
  747. goto free_ahash;
  748. }
  749. crypto_ahash_clear_flags(tfm, ~0);
  750. blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  751. ipad = kzalloc(2 * blocksize, GFP_KERNEL);
  752. if (!ipad) {
  753. ret = -ENOMEM;
  754. goto free_request;
  755. }
  756. opad = ipad + blocksize;
  757. ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
  758. if (ret)
  759. goto free_ipad;
  760. ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
  761. if (ret)
  762. goto free_ipad;
  763. ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
  764. free_ipad:
  765. kfree(ipad);
  766. free_request:
  767. ahash_request_free(areq);
  768. free_ahash:
  769. crypto_free_ahash(tfm);
  770. return ret;
  771. }
  772. static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
  773. unsigned int keylen)
  774. {
  775. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
  776. struct safexcel_crypto_priv *priv = ctx->priv;
  777. struct safexcel_ahash_export_state istate, ostate;
  778. int ret, i;
  779. ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
  780. if (ret)
  781. return ret;
  782. if (priv->version == EIP197 && ctx->base.ctxr) {
  783. for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
  784. if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
  785. ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
  786. ctx->base.needs_inv = true;
  787. break;
  788. }
  789. }
  790. }
  791. memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
  792. memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
  793. return 0;
  794. }
  795. struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
  796. .type = SAFEXCEL_ALG_TYPE_AHASH,
  797. .alg.ahash = {
  798. .init = safexcel_hmac_sha1_init,
  799. .update = safexcel_ahash_update,
  800. .final = safexcel_ahash_final,
  801. .finup = safexcel_ahash_finup,
  802. .digest = safexcel_hmac_sha1_digest,
  803. .setkey = safexcel_hmac_sha1_setkey,
  804. .export = safexcel_ahash_export,
  805. .import = safexcel_ahash_import,
  806. .halg = {
  807. .digestsize = SHA1_DIGEST_SIZE,
  808. .statesize = sizeof(struct safexcel_ahash_export_state),
  809. .base = {
  810. .cra_name = "hmac(sha1)",
  811. .cra_driver_name = "safexcel-hmac-sha1",
  812. .cra_priority = 300,
  813. .cra_flags = CRYPTO_ALG_ASYNC |
  814. CRYPTO_ALG_KERN_DRIVER_ONLY,
  815. .cra_blocksize = SHA1_BLOCK_SIZE,
  816. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  817. .cra_init = safexcel_ahash_cra_init,
  818. .cra_exit = safexcel_ahash_cra_exit,
  819. .cra_module = THIS_MODULE,
  820. },
  821. },
  822. },
  823. };
  824. static int safexcel_sha256_init(struct ahash_request *areq)
  825. {
  826. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  827. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  828. memset(req, 0, sizeof(*req));
  829. req->state[0] = SHA256_H0;
  830. req->state[1] = SHA256_H1;
  831. req->state[2] = SHA256_H2;
  832. req->state[3] = SHA256_H3;
  833. req->state[4] = SHA256_H4;
  834. req->state[5] = SHA256_H5;
  835. req->state[6] = SHA256_H6;
  836. req->state[7] = SHA256_H7;
  837. ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
  838. ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
  839. req->state_sz = SHA256_DIGEST_SIZE;
  840. return 0;
  841. }
  842. static int safexcel_sha256_digest(struct ahash_request *areq)
  843. {
  844. int ret = safexcel_sha256_init(areq);
  845. if (ret)
  846. return ret;
  847. return safexcel_ahash_finup(areq);
  848. }
  849. struct safexcel_alg_template safexcel_alg_sha256 = {
  850. .type = SAFEXCEL_ALG_TYPE_AHASH,
  851. .alg.ahash = {
  852. .init = safexcel_sha256_init,
  853. .update = safexcel_ahash_update,
  854. .final = safexcel_ahash_final,
  855. .finup = safexcel_ahash_finup,
  856. .digest = safexcel_sha256_digest,
  857. .export = safexcel_ahash_export,
  858. .import = safexcel_ahash_import,
  859. .halg = {
  860. .digestsize = SHA256_DIGEST_SIZE,
  861. .statesize = sizeof(struct safexcel_ahash_export_state),
  862. .base = {
  863. .cra_name = "sha256",
  864. .cra_driver_name = "safexcel-sha256",
  865. .cra_priority = 300,
  866. .cra_flags = CRYPTO_ALG_ASYNC |
  867. CRYPTO_ALG_KERN_DRIVER_ONLY,
  868. .cra_blocksize = SHA256_BLOCK_SIZE,
  869. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  870. .cra_init = safexcel_ahash_cra_init,
  871. .cra_exit = safexcel_ahash_cra_exit,
  872. .cra_module = THIS_MODULE,
  873. },
  874. },
  875. },
  876. };
  877. static int safexcel_sha224_init(struct ahash_request *areq)
  878. {
  879. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  880. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  881. memset(req, 0, sizeof(*req));
  882. req->state[0] = SHA224_H0;
  883. req->state[1] = SHA224_H1;
  884. req->state[2] = SHA224_H2;
  885. req->state[3] = SHA224_H3;
  886. req->state[4] = SHA224_H4;
  887. req->state[5] = SHA224_H5;
  888. req->state[6] = SHA224_H6;
  889. req->state[7] = SHA224_H7;
  890. ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
  891. ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
  892. req->state_sz = SHA256_DIGEST_SIZE;
  893. return 0;
  894. }
  895. static int safexcel_sha224_digest(struct ahash_request *areq)
  896. {
  897. int ret = safexcel_sha224_init(areq);
  898. if (ret)
  899. return ret;
  900. return safexcel_ahash_finup(areq);
  901. }
  902. struct safexcel_alg_template safexcel_alg_sha224 = {
  903. .type = SAFEXCEL_ALG_TYPE_AHASH,
  904. .alg.ahash = {
  905. .init = safexcel_sha224_init,
  906. .update = safexcel_ahash_update,
  907. .final = safexcel_ahash_final,
  908. .finup = safexcel_ahash_finup,
  909. .digest = safexcel_sha224_digest,
  910. .export = safexcel_ahash_export,
  911. .import = safexcel_ahash_import,
  912. .halg = {
  913. .digestsize = SHA224_DIGEST_SIZE,
  914. .statesize = sizeof(struct safexcel_ahash_export_state),
  915. .base = {
  916. .cra_name = "sha224",
  917. .cra_driver_name = "safexcel-sha224",
  918. .cra_priority = 300,
  919. .cra_flags = CRYPTO_ALG_ASYNC |
  920. CRYPTO_ALG_KERN_DRIVER_ONLY,
  921. .cra_blocksize = SHA224_BLOCK_SIZE,
  922. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  923. .cra_init = safexcel_ahash_cra_init,
  924. .cra_exit = safexcel_ahash_cra_exit,
  925. .cra_module = THIS_MODULE,
  926. },
  927. },
  928. },
  929. };