|
@@ -33,6 +33,8 @@ struct safexcel_ahash_req {
|
|
|
bool hmac;
|
|
|
bool needs_inv;
|
|
|
|
|
|
+ int nents;
|
|
|
+
|
|
|
u8 state_sz; /* expected sate size, only set once */
|
|
|
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
|
|
|
|
|
@@ -151,8 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
|
|
|
memcpy(areq->result, sreq->state,
|
|
|
crypto_ahash_digestsize(ahash));
|
|
|
|
|
|
- dma_unmap_sg(priv->dev, areq->src,
|
|
|
- sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
|
|
|
+ if (sreq->nents) {
|
|
|
+ dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
|
|
|
+ sreq->nents = 0;
|
|
|
+ }
|
|
|
|
|
|
safexcel_free_context(priv, async, sreq->state_sz);
|
|
|
|
|
@@ -177,7 +181,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
|
|
|
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
|
|
|
struct safexcel_result_desc *rdesc;
|
|
|
struct scatterlist *sg;
|
|
|
- int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
|
|
|
+ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
|
|
|
|
|
|
queued = len = req->len - req->processed;
|
|
|
if (queued < crypto_ahash_blocksize(ahash))
|
|
@@ -233,15 +237,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
|
|
|
}
|
|
|
|
|
|
/* Now handle the current ahash request buffer(s) */
|
|
|
- nents = dma_map_sg(priv->dev, areq->src,
|
|
|
- sg_nents_for_len(areq->src, areq->nbytes),
|
|
|
- DMA_TO_DEVICE);
|
|
|
- if (!nents) {
|
|
|
+ req->nents = dma_map_sg(priv->dev, areq->src,
|
|
|
+ sg_nents_for_len(areq->src, areq->nbytes),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ if (!req->nents) {
|
|
|
ret = -ENOMEM;
|
|
|
goto cdesc_rollback;
|
|
|
}
|
|
|
|
|
|
- for_each_sg(areq->src, sg, nents, i) {
|
|
|
+ for_each_sg(areq->src, sg, req->nents, i) {
|
|
|
int sglen = sg_dma_len(sg);
|
|
|
|
|
|
/* Do not overflow the request */
|