safexcel_hash.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <crypto/hmac.h>
  11. #include <crypto/sha.h>
  12. #include <linux/device.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dmapool.h>
  15. #include "safexcel.h"
  16. struct safexcel_ahash_ctx {
  17. struct safexcel_context base;
  18. struct safexcel_crypto_priv *priv;
  19. u32 alg;
  20. u32 digest;
  21. u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
  22. u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
  23. };
  24. struct safexcel_ahash_req {
  25. bool last_req;
  26. bool finish;
  27. bool hmac;
  28. u8 state_sz; /* expected sate size, only set once */
  29. u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
  30. u64 len;
  31. u64 processed;
  32. u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
  33. u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
  34. };
  35. struct safexcel_ahash_export_state {
  36. u64 len;
  37. u64 processed;
  38. u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
  39. u8 cache[SHA256_BLOCK_SIZE];
  40. };
  41. static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
  42. u32 input_length, u32 result_length)
  43. {
  44. struct safexcel_token *token =
  45. (struct safexcel_token *)cdesc->control_data.token;
  46. token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
  47. token[0].packet_length = input_length;
  48. token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
  49. token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
  50. token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
  51. token[1].packet_length = result_length;
  52. token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
  53. EIP197_TOKEN_STAT_LAST_PACKET;
  54. token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
  55. EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
  56. }
  57. static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
  58. struct safexcel_ahash_req *req,
  59. struct safexcel_command_desc *cdesc,
  60. unsigned int digestsize,
  61. unsigned int blocksize)
  62. {
  63. int i;
  64. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
  65. cdesc->control_data.control0 |= ctx->alg;
  66. cdesc->control_data.control0 |= ctx->digest;
  67. if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
  68. if (req->processed) {
  69. if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
  70. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
  71. else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
  72. ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
  73. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
  74. cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
  75. } else {
  76. cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
  77. }
  78. if (!req->finish)
  79. cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
  80. /*
  81. * Copy the input digest if needed, and setup the context
  82. * fields. Do this now as we need it to setup the first command
  83. * descriptor.
  84. */
  85. if (req->processed) {
  86. for (i = 0; i < digestsize / sizeof(u32); i++)
  87. ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
  88. if (req->finish)
  89. ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
  90. }
  91. } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
  92. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
  93. memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
  94. memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
  95. ctx->opad, digestsize);
  96. }
  97. }
  98. static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
  99. struct crypto_async_request *async,
  100. bool *should_complete, int *ret)
  101. {
  102. struct safexcel_result_desc *rdesc;
  103. struct ahash_request *areq = ahash_request_cast(async);
  104. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  105. struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
  106. int cache_len, result_sz = sreq->state_sz;
  107. *ret = 0;
  108. spin_lock_bh(&priv->ring[ring].egress_lock);
  109. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  110. if (IS_ERR(rdesc)) {
  111. dev_err(priv->dev,
  112. "hash: result: could not retrieve the result descriptor\n");
  113. *ret = PTR_ERR(rdesc);
  114. } else if (rdesc->result_data.error_code) {
  115. dev_err(priv->dev,
  116. "hash: result: result descriptor error (%d)\n",
  117. rdesc->result_data.error_code);
  118. *ret = -EINVAL;
  119. }
  120. safexcel_complete(priv, ring);
  121. spin_unlock_bh(&priv->ring[ring].egress_lock);
  122. if (sreq->finish)
  123. result_sz = crypto_ahash_digestsize(ahash);
  124. memcpy(sreq->state, areq->result, result_sz);
  125. dma_unmap_sg(priv->dev, areq->src,
  126. sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
  127. safexcel_free_context(priv, async, sreq->state_sz);
  128. cache_len = sreq->len - sreq->processed;
  129. if (cache_len)
  130. memcpy(sreq->cache, sreq->cache_next, cache_len);
  131. *should_complete = true;
  132. return 1;
  133. }
  134. static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
  135. struct safexcel_request *request, int *commands,
  136. int *results)
  137. {
  138. struct ahash_request *areq = ahash_request_cast(async);
  139. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  140. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  141. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  142. struct safexcel_crypto_priv *priv = ctx->priv;
  143. struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
  144. struct safexcel_result_desc *rdesc;
  145. struct scatterlist *sg;
  146. int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
  147. queued = len = req->len - req->processed;
  148. if (queued < crypto_ahash_blocksize(ahash))
  149. cache_len = queued;
  150. else
  151. cache_len = queued - areq->nbytes;
  152. /*
  153. * If this is not the last request and the queued data does not fit
  154. * into full blocks, cache it for the next send() call.
  155. */
  156. extra = queued & (crypto_ahash_blocksize(ahash) - 1);
  157. if (!req->last_req && extra) {
  158. sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  159. req->cache_next, extra, areq->nbytes - extra);
  160. queued -= extra;
  161. len -= extra;
  162. }
  163. spin_lock_bh(&priv->ring[ring].egress_lock);
  164. /* Add a command descriptor for the cached data, if any */
  165. if (cache_len) {
  166. ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
  167. if (!ctx->base.cache) {
  168. ret = -ENOMEM;
  169. goto unlock;
  170. }
  171. memcpy(ctx->base.cache, req->cache, cache_len);
  172. ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
  173. cache_len, DMA_TO_DEVICE);
  174. if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
  175. ret = -EINVAL;
  176. goto free_cache;
  177. }
  178. ctx->base.cache_sz = cache_len;
  179. first_cdesc = safexcel_add_cdesc(priv, ring, 1,
  180. (cache_len == len),
  181. ctx->base.cache_dma,
  182. cache_len, len,
  183. ctx->base.ctxr_dma);
  184. if (IS_ERR(first_cdesc)) {
  185. ret = PTR_ERR(first_cdesc);
  186. goto unmap_cache;
  187. }
  188. n_cdesc++;
  189. queued -= cache_len;
  190. if (!queued)
  191. goto send_command;
  192. }
  193. /* Now handle the current ahash request buffer(s) */
  194. nents = dma_map_sg(priv->dev, areq->src,
  195. sg_nents_for_len(areq->src, areq->nbytes),
  196. DMA_TO_DEVICE);
  197. if (!nents) {
  198. ret = -ENOMEM;
  199. goto cdesc_rollback;
  200. }
  201. for_each_sg(areq->src, sg, nents, i) {
  202. int sglen = sg_dma_len(sg);
  203. /* Do not overflow the request */
  204. if (queued - sglen < 0)
  205. sglen = queued;
  206. cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
  207. !(queued - sglen), sg_dma_address(sg),
  208. sglen, len, ctx->base.ctxr_dma);
  209. if (IS_ERR(cdesc)) {
  210. ret = PTR_ERR(cdesc);
  211. goto cdesc_rollback;
  212. }
  213. n_cdesc++;
  214. if (n_cdesc == 1)
  215. first_cdesc = cdesc;
  216. queued -= sglen;
  217. if (!queued)
  218. break;
  219. }
  220. send_command:
  221. /* Setup the context options */
  222. safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
  223. crypto_ahash_blocksize(ahash));
  224. /* Add the token */
  225. safexcel_hash_token(first_cdesc, len, req->state_sz);
  226. ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
  227. req->state_sz, DMA_FROM_DEVICE);
  228. if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
  229. ret = -EINVAL;
  230. goto cdesc_rollback;
  231. }
  232. /* Add a result descriptor */
  233. rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
  234. req->state_sz);
  235. if (IS_ERR(rdesc)) {
  236. ret = PTR_ERR(rdesc);
  237. goto cdesc_rollback;
  238. }
  239. spin_unlock_bh(&priv->ring[ring].egress_lock);
  240. req->processed += len;
  241. request->req = &areq->base;
  242. ctx->base.handle_result = safexcel_handle_result;
  243. *commands = n_cdesc;
  244. *results = 1;
  245. return 0;
  246. cdesc_rollback:
  247. for (i = 0; i < n_cdesc; i++)
  248. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  249. unmap_cache:
  250. if (ctx->base.cache_dma) {
  251. dma_unmap_single(priv->dev, ctx->base.cache_dma,
  252. ctx->base.cache_sz, DMA_TO_DEVICE);
  253. ctx->base.cache_sz = 0;
  254. }
  255. free_cache:
  256. if (ctx->base.cache) {
  257. kfree(ctx->base.cache);
  258. ctx->base.cache = NULL;
  259. }
  260. unlock:
  261. spin_unlock_bh(&priv->ring[ring].egress_lock);
  262. return ret;
  263. }
  264. static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
  265. {
  266. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  267. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  268. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  269. unsigned int state_w_sz = req->state_sz / sizeof(u32);
  270. int i;
  271. for (i = 0; i < state_w_sz; i++)
  272. if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
  273. return true;
  274. if (ctx->base.ctxr->data[state_w_sz] !=
  275. cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
  276. return true;
  277. return false;
  278. }
  279. static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
  280. int ring,
  281. struct crypto_async_request *async,
  282. bool *should_complete, int *ret)
  283. {
  284. struct safexcel_result_desc *rdesc;
  285. struct ahash_request *areq = ahash_request_cast(async);
  286. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  287. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
  288. int enq_ret;
  289. *ret = 0;
  290. spin_lock_bh(&priv->ring[ring].egress_lock);
  291. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  292. if (IS_ERR(rdesc)) {
  293. dev_err(priv->dev,
  294. "hash: invalidate: could not retrieve the result descriptor\n");
  295. *ret = PTR_ERR(rdesc);
  296. } else if (rdesc->result_data.error_code) {
  297. dev_err(priv->dev,
  298. "hash: invalidate: result descriptor error (%d)\n",
  299. rdesc->result_data.error_code);
  300. *ret = -EINVAL;
  301. }
  302. safexcel_complete(priv, ring);
  303. spin_unlock_bh(&priv->ring[ring].egress_lock);
  304. if (ctx->base.exit_inv) {
  305. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  306. ctx->base.ctxr_dma);
  307. *should_complete = true;
  308. return 1;
  309. }
  310. ring = safexcel_select_ring(priv);
  311. ctx->base.ring = ring;
  312. ctx->base.needs_inv = false;
  313. ctx->base.send = safexcel_ahash_send;
  314. spin_lock_bh(&priv->ring[ring].queue_lock);
  315. enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
  316. spin_unlock_bh(&priv->ring[ring].queue_lock);
  317. if (enq_ret != -EINPROGRESS)
  318. *ret = enq_ret;
  319. if (!priv->ring[ring].need_dequeue)
  320. safexcel_dequeue(priv, ring);
  321. *should_complete = false;
  322. return 1;
  323. }
  324. static int safexcel_ahash_send_inv(struct crypto_async_request *async,
  325. int ring, struct safexcel_request *request,
  326. int *commands, int *results)
  327. {
  328. struct ahash_request *areq = ahash_request_cast(async);
  329. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  330. int ret;
  331. ctx->base.handle_result = safexcel_handle_inv_result;
  332. ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
  333. ctx->base.ctxr_dma, ring, request);
  334. if (unlikely(ret))
  335. return ret;
  336. *commands = 1;
  337. *results = 1;
  338. return 0;
  339. }
  340. static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
  341. {
  342. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
  343. struct safexcel_crypto_priv *priv = ctx->priv;
  344. struct ahash_request req;
  345. struct safexcel_inv_result result = { 0 };
  346. int ring = ctx->base.ring;
  347. memset(&req, 0, sizeof(struct ahash_request));
  348. /* create invalidation request */
  349. init_completion(&result.completion);
  350. ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  351. safexcel_inv_complete, &result);
  352. ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
  353. ctx = crypto_tfm_ctx(req.base.tfm);
  354. ctx->base.exit_inv = true;
  355. ctx->base.send = safexcel_ahash_send_inv;
  356. spin_lock_bh(&priv->ring[ring].queue_lock);
  357. crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
  358. spin_unlock_bh(&priv->ring[ring].queue_lock);
  359. if (!priv->ring[ring].need_dequeue)
  360. safexcel_dequeue(priv, ring);
  361. wait_for_completion_interruptible(&result.completion);
  362. if (result.error) {
  363. dev_warn(priv->dev, "hash: completion error (%d)\n",
  364. result.error);
  365. return result.error;
  366. }
  367. return 0;
  368. }
  369. static int safexcel_ahash_cache(struct ahash_request *areq)
  370. {
  371. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  372. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  373. int queued, cache_len;
  374. cache_len = req->len - areq->nbytes - req->processed;
  375. queued = req->len - req->processed;
  376. /*
  377. * In case there isn't enough bytes to proceed (less than a
  378. * block size), cache the data until we have enough.
  379. */
  380. if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
  381. sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  382. req->cache + cache_len,
  383. areq->nbytes, 0);
  384. return areq->nbytes;
  385. }
  386. /* We could'nt cache all the data */
  387. return -E2BIG;
  388. }
  389. static int safexcel_ahash_enqueue(struct ahash_request *areq)
  390. {
  391. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  392. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  393. struct safexcel_crypto_priv *priv = ctx->priv;
  394. int ret, ring;
  395. ctx->base.send = safexcel_ahash_send;
  396. if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
  397. ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
  398. if (ctx->base.ctxr) {
  399. if (ctx->base.needs_inv)
  400. ctx->base.send = safexcel_ahash_send_inv;
  401. } else {
  402. ctx->base.ring = safexcel_select_ring(priv);
  403. ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
  404. EIP197_GFP_FLAGS(areq->base),
  405. &ctx->base.ctxr_dma);
  406. if (!ctx->base.ctxr)
  407. return -ENOMEM;
  408. }
  409. ring = ctx->base.ring;
  410. spin_lock_bh(&priv->ring[ring].queue_lock);
  411. ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
  412. spin_unlock_bh(&priv->ring[ring].queue_lock);
  413. if (!priv->ring[ring].need_dequeue)
  414. safexcel_dequeue(priv, ring);
  415. return ret;
  416. }
  417. static int safexcel_ahash_update(struct ahash_request *areq)
  418. {
  419. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  420. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  421. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  422. /* If the request is 0 length, do nothing */
  423. if (!areq->nbytes)
  424. return 0;
  425. req->len += areq->nbytes;
  426. safexcel_ahash_cache(areq);
  427. /*
  428. * We're not doing partial updates when performing an hmac request.
  429. * Everything will be handled by the final() call.
  430. */
  431. if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
  432. return 0;
  433. if (req->hmac)
  434. return safexcel_ahash_enqueue(areq);
  435. if (!req->last_req &&
  436. req->len - req->processed > crypto_ahash_blocksize(ahash))
  437. return safexcel_ahash_enqueue(areq);
  438. return 0;
  439. }
  440. static int safexcel_ahash_final(struct ahash_request *areq)
  441. {
  442. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  443. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  444. req->last_req = true;
  445. req->finish = true;
  446. /* If we have an overall 0 length request */
  447. if (!(req->len + areq->nbytes)) {
  448. if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
  449. memcpy(areq->result, sha1_zero_message_hash,
  450. SHA1_DIGEST_SIZE);
  451. else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
  452. memcpy(areq->result, sha224_zero_message_hash,
  453. SHA224_DIGEST_SIZE);
  454. else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
  455. memcpy(areq->result, sha256_zero_message_hash,
  456. SHA256_DIGEST_SIZE);
  457. return 0;
  458. }
  459. return safexcel_ahash_enqueue(areq);
  460. }
  461. static int safexcel_ahash_finup(struct ahash_request *areq)
  462. {
  463. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  464. req->last_req = true;
  465. req->finish = true;
  466. safexcel_ahash_update(areq);
  467. return safexcel_ahash_final(areq);
  468. }
  469. static int safexcel_ahash_export(struct ahash_request *areq, void *out)
  470. {
  471. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  472. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  473. struct safexcel_ahash_export_state *export = out;
  474. export->len = req->len;
  475. export->processed = req->processed;
  476. memcpy(export->state, req->state, req->state_sz);
  477. memset(export->cache, 0, crypto_ahash_blocksize(ahash));
  478. memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
  479. return 0;
  480. }
  481. static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
  482. {
  483. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  484. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  485. const struct safexcel_ahash_export_state *export = in;
  486. int ret;
  487. ret = crypto_ahash_init(areq);
  488. if (ret)
  489. return ret;
  490. req->len = export->len;
  491. req->processed = export->processed;
  492. memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
  493. memcpy(req->state, export->state, req->state_sz);
  494. return 0;
  495. }
  496. static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
  497. {
  498. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
  499. struct safexcel_alg_template *tmpl =
  500. container_of(__crypto_ahash_alg(tfm->__crt_alg),
  501. struct safexcel_alg_template, alg.ahash);
  502. ctx->priv = tmpl->priv;
  503. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  504. sizeof(struct safexcel_ahash_req));
  505. return 0;
  506. }
  507. static int safexcel_sha1_init(struct ahash_request *areq)
  508. {
  509. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  510. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  511. memset(req, 0, sizeof(*req));
  512. req->state[0] = SHA1_H0;
  513. req->state[1] = SHA1_H1;
  514. req->state[2] = SHA1_H2;
  515. req->state[3] = SHA1_H3;
  516. req->state[4] = SHA1_H4;
  517. ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
  518. ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
  519. req->state_sz = SHA1_DIGEST_SIZE;
  520. return 0;
  521. }
  522. static int safexcel_sha1_digest(struct ahash_request *areq)
  523. {
  524. int ret = safexcel_sha1_init(areq);
  525. if (ret)
  526. return ret;
  527. return safexcel_ahash_finup(areq);
  528. }
  529. static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
  530. {
  531. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
  532. struct safexcel_crypto_priv *priv = ctx->priv;
  533. int ret;
  534. /* context not allocated, skip invalidation */
  535. if (!ctx->base.ctxr)
  536. return;
  537. ret = safexcel_ahash_exit_inv(tfm);
  538. if (ret)
  539. dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
  540. }
  541. struct safexcel_alg_template safexcel_alg_sha1 = {
  542. .type = SAFEXCEL_ALG_TYPE_AHASH,
  543. .alg.ahash = {
  544. .init = safexcel_sha1_init,
  545. .update = safexcel_ahash_update,
  546. .final = safexcel_ahash_final,
  547. .finup = safexcel_ahash_finup,
  548. .digest = safexcel_sha1_digest,
  549. .export = safexcel_ahash_export,
  550. .import = safexcel_ahash_import,
  551. .halg = {
  552. .digestsize = SHA1_DIGEST_SIZE,
  553. .statesize = sizeof(struct safexcel_ahash_export_state),
  554. .base = {
  555. .cra_name = "sha1",
  556. .cra_driver_name = "safexcel-sha1",
  557. .cra_priority = 300,
  558. .cra_flags = CRYPTO_ALG_ASYNC |
  559. CRYPTO_ALG_KERN_DRIVER_ONLY,
  560. .cra_blocksize = SHA1_BLOCK_SIZE,
  561. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  562. .cra_init = safexcel_ahash_cra_init,
  563. .cra_exit = safexcel_ahash_cra_exit,
  564. .cra_module = THIS_MODULE,
  565. },
  566. },
  567. },
  568. };
  569. static int safexcel_hmac_sha1_init(struct ahash_request *areq)
  570. {
  571. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  572. safexcel_sha1_init(areq);
  573. ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
  574. return 0;
  575. }
  576. static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
  577. {
  578. int ret = safexcel_hmac_sha1_init(areq);
  579. if (ret)
  580. return ret;
  581. return safexcel_ahash_finup(areq);
  582. }
  583. struct safexcel_ahash_result {
  584. struct completion completion;
  585. int error;
  586. };
  587. static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
  588. {
  589. struct safexcel_ahash_result *result = req->data;
  590. if (error == -EINPROGRESS)
  591. return;
  592. result->error = error;
  593. complete(&result->completion);
  594. }
  595. static int safexcel_hmac_init_pad(struct ahash_request *areq,
  596. unsigned int blocksize, const u8 *key,
  597. unsigned int keylen, u8 *ipad, u8 *opad)
  598. {
  599. struct safexcel_ahash_result result;
  600. struct scatterlist sg;
  601. int ret, i;
  602. u8 *keydup;
  603. if (keylen <= blocksize) {
  604. memcpy(ipad, key, keylen);
  605. } else {
  606. keydup = kmemdup(key, keylen, GFP_KERNEL);
  607. if (!keydup)
  608. return -ENOMEM;
  609. ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
  610. safexcel_ahash_complete, &result);
  611. sg_init_one(&sg, keydup, keylen);
  612. ahash_request_set_crypt(areq, &sg, ipad, keylen);
  613. init_completion(&result.completion);
  614. ret = crypto_ahash_digest(areq);
  615. if (ret == -EINPROGRESS) {
  616. wait_for_completion_interruptible(&result.completion);
  617. ret = result.error;
  618. }
  619. /* Avoid leaking */
  620. memzero_explicit(keydup, keylen);
  621. kfree(keydup);
  622. if (ret)
  623. return ret;
  624. keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
  625. }
  626. memset(ipad + keylen, 0, blocksize - keylen);
  627. memcpy(opad, ipad, blocksize);
  628. for (i = 0; i < blocksize; i++) {
  629. ipad[i] ^= HMAC_IPAD_VALUE;
  630. opad[i] ^= HMAC_OPAD_VALUE;
  631. }
  632. return 0;
  633. }
  634. static int safexcel_hmac_init_iv(struct ahash_request *areq,
  635. unsigned int blocksize, u8 *pad, void *state)
  636. {
  637. struct safexcel_ahash_result result;
  638. struct safexcel_ahash_req *req;
  639. struct scatterlist sg;
  640. int ret;
  641. ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
  642. safexcel_ahash_complete, &result);
  643. sg_init_one(&sg, pad, blocksize);
  644. ahash_request_set_crypt(areq, &sg, pad, blocksize);
  645. init_completion(&result.completion);
  646. ret = crypto_ahash_init(areq);
  647. if (ret)
  648. return ret;
  649. req = ahash_request_ctx(areq);
  650. req->hmac = true;
  651. req->last_req = true;
  652. ret = crypto_ahash_update(areq);
  653. if (ret && ret != -EINPROGRESS)
  654. return ret;
  655. wait_for_completion_interruptible(&result.completion);
  656. if (result.error)
  657. return result.error;
  658. return crypto_ahash_export(areq, state);
  659. }
  660. static int safexcel_hmac_setkey(const char *alg, const u8 *key,
  661. unsigned int keylen, void *istate, void *ostate)
  662. {
  663. struct ahash_request *areq;
  664. struct crypto_ahash *tfm;
  665. unsigned int blocksize;
  666. u8 *ipad, *opad;
  667. int ret;
  668. tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
  669. CRYPTO_ALG_TYPE_AHASH_MASK);
  670. if (IS_ERR(tfm))
  671. return PTR_ERR(tfm);
  672. areq = ahash_request_alloc(tfm, GFP_KERNEL);
  673. if (!areq) {
  674. ret = -ENOMEM;
  675. goto free_ahash;
  676. }
  677. crypto_ahash_clear_flags(tfm, ~0);
  678. blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  679. ipad = kzalloc(2 * blocksize, GFP_KERNEL);
  680. if (!ipad) {
  681. ret = -ENOMEM;
  682. goto free_request;
  683. }
  684. opad = ipad + blocksize;
  685. ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
  686. if (ret)
  687. goto free_ipad;
  688. ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
  689. if (ret)
  690. goto free_ipad;
  691. ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
  692. free_ipad:
  693. kfree(ipad);
  694. free_request:
  695. ahash_request_free(areq);
  696. free_ahash:
  697. crypto_free_ahash(tfm);
  698. return ret;
  699. }
  700. static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
  701. unsigned int keylen)
  702. {
  703. struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
  704. struct safexcel_ahash_export_state istate, ostate;
  705. int ret, i;
  706. ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
  707. if (ret)
  708. return ret;
  709. for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
  710. if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
  711. ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
  712. ctx->base.needs_inv = true;
  713. break;
  714. }
  715. }
  716. memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
  717. memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
  718. return 0;
  719. }
  720. struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
  721. .type = SAFEXCEL_ALG_TYPE_AHASH,
  722. .alg.ahash = {
  723. .init = safexcel_hmac_sha1_init,
  724. .update = safexcel_ahash_update,
  725. .final = safexcel_ahash_final,
  726. .finup = safexcel_ahash_finup,
  727. .digest = safexcel_hmac_sha1_digest,
  728. .setkey = safexcel_hmac_sha1_setkey,
  729. .export = safexcel_ahash_export,
  730. .import = safexcel_ahash_import,
  731. .halg = {
  732. .digestsize = SHA1_DIGEST_SIZE,
  733. .statesize = sizeof(struct safexcel_ahash_export_state),
  734. .base = {
  735. .cra_name = "hmac(sha1)",
  736. .cra_driver_name = "safexcel-hmac-sha1",
  737. .cra_priority = 300,
  738. .cra_flags = CRYPTO_ALG_ASYNC |
  739. CRYPTO_ALG_KERN_DRIVER_ONLY,
  740. .cra_blocksize = SHA1_BLOCK_SIZE,
  741. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  742. .cra_init = safexcel_ahash_cra_init,
  743. .cra_exit = safexcel_ahash_cra_exit,
  744. .cra_module = THIS_MODULE,
  745. },
  746. },
  747. },
  748. };
  749. static int safexcel_sha256_init(struct ahash_request *areq)
  750. {
  751. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  752. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  753. memset(req, 0, sizeof(*req));
  754. req->state[0] = SHA256_H0;
  755. req->state[1] = SHA256_H1;
  756. req->state[2] = SHA256_H2;
  757. req->state[3] = SHA256_H3;
  758. req->state[4] = SHA256_H4;
  759. req->state[5] = SHA256_H5;
  760. req->state[6] = SHA256_H6;
  761. req->state[7] = SHA256_H7;
  762. ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
  763. ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
  764. req->state_sz = SHA256_DIGEST_SIZE;
  765. return 0;
  766. }
  767. static int safexcel_sha256_digest(struct ahash_request *areq)
  768. {
  769. int ret = safexcel_sha256_init(areq);
  770. if (ret)
  771. return ret;
  772. return safexcel_ahash_finup(areq);
  773. }
  774. struct safexcel_alg_template safexcel_alg_sha256 = {
  775. .type = SAFEXCEL_ALG_TYPE_AHASH,
  776. .alg.ahash = {
  777. .init = safexcel_sha256_init,
  778. .update = safexcel_ahash_update,
  779. .final = safexcel_ahash_final,
  780. .finup = safexcel_ahash_finup,
  781. .digest = safexcel_sha256_digest,
  782. .export = safexcel_ahash_export,
  783. .import = safexcel_ahash_import,
  784. .halg = {
  785. .digestsize = SHA256_DIGEST_SIZE,
  786. .statesize = sizeof(struct safexcel_ahash_export_state),
  787. .base = {
  788. .cra_name = "sha256",
  789. .cra_driver_name = "safexcel-sha256",
  790. .cra_priority = 300,
  791. .cra_flags = CRYPTO_ALG_ASYNC |
  792. CRYPTO_ALG_KERN_DRIVER_ONLY,
  793. .cra_blocksize = SHA256_BLOCK_SIZE,
  794. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  795. .cra_init = safexcel_ahash_cra_init,
  796. .cra_exit = safexcel_ahash_cra_exit,
  797. .cra_module = THIS_MODULE,
  798. },
  799. },
  800. },
  801. };
  802. static int safexcel_sha224_init(struct ahash_request *areq)
  803. {
  804. struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
  805. struct safexcel_ahash_req *req = ahash_request_ctx(areq);
  806. memset(req, 0, sizeof(*req));
  807. req->state[0] = SHA224_H0;
  808. req->state[1] = SHA224_H1;
  809. req->state[2] = SHA224_H2;
  810. req->state[3] = SHA224_H3;
  811. req->state[4] = SHA224_H4;
  812. req->state[5] = SHA224_H5;
  813. req->state[6] = SHA224_H6;
  814. req->state[7] = SHA224_H7;
  815. ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
  816. ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
  817. req->state_sz = SHA256_DIGEST_SIZE;
  818. return 0;
  819. }
  820. static int safexcel_sha224_digest(struct ahash_request *areq)
  821. {
  822. int ret = safexcel_sha224_init(areq);
  823. if (ret)
  824. return ret;
  825. return safexcel_ahash_finup(areq);
  826. }
  827. struct safexcel_alg_template safexcel_alg_sha224 = {
  828. .type = SAFEXCEL_ALG_TYPE_AHASH,
  829. .alg.ahash = {
  830. .init = safexcel_sha224_init,
  831. .update = safexcel_ahash_update,
  832. .final = safexcel_ahash_final,
  833. .finup = safexcel_ahash_finup,
  834. .digest = safexcel_sha224_digest,
  835. .export = safexcel_ahash_export,
  836. .import = safexcel_ahash_import,
  837. .halg = {
  838. .digestsize = SHA224_DIGEST_SIZE,
  839. .statesize = sizeof(struct safexcel_ahash_export_state),
  840. .base = {
  841. .cra_name = "sha224",
  842. .cra_driver_name = "safexcel-sha224",
  843. .cra_priority = 300,
  844. .cra_flags = CRYPTO_ALG_ASYNC |
  845. CRYPTO_ALG_KERN_DRIVER_ONLY,
  846. .cra_blocksize = SHA224_BLOCK_SIZE,
  847. .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
  848. .cra_init = safexcel_ahash_cra_init,
  849. .cra_exit = safexcel_ahash_cra_exit,
  850. .cra_module = THIS_MODULE,
  851. },
  852. },
  853. },
  854. };