Browse Source

crypto: inside-secure - do not access buffers mapped to the device

This patches update the way the digest is copied from the state buffer
to the result buffer, so that the copy only happen after the state
buffer was DMA unmapped, as otherwise the buffer would be owned by the
device.

Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Antoine Tenart 7 years ago
parent
commit
b89a815975
1 changed files with 4 additions and 4 deletions
  1. 4 4
      drivers/crypto/inside-secure/safexcel_hash.c

+ 4 - 4
drivers/crypto/inside-secure/safexcel_hash.c

@@ -156,10 +156,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 	safexcel_complete(priv, ring);
 	safexcel_complete(priv, ring);
 	spin_unlock_bh(&priv->ring[ring].egress_lock);
 	spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 
-	if (sreq->finish)
-		memcpy(areq->result, sreq->state,
-		       crypto_ahash_digestsize(ahash));
-
 	if (sreq->nents) {
 	if (sreq->nents) {
 		dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
 		dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
 		sreq->nents = 0;
 		sreq->nents = 0;
@@ -177,6 +173,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 		sreq->cache_dma = 0;
 		sreq->cache_dma = 0;
 	}
 	}
 
 
+	if (sreq->finish)
+		memcpy(areq->result, sreq->state,
+		       crypto_ahash_digestsize(ahash));
+
 	cache_len = sreq->len - sreq->processed;
 	cache_len = sreq->len - sreq->processed;
 	if (cache_len)
 	if (cache_len)
 		memcpy(sreq->cache, sreq->cache_next, cache_len);
 		memcpy(sreq->cache, sreq->cache_next, cache_len);