Forráskód Böngészése

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes a number of regressions in the marvell cesa driver caused
  by the chaining work, and a regression in lib/mpi that leads to a
  GFP_KERNEL allocation with preemption disabled"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: marvell - Don't copy IV vectors from the _process op for ciphers
  lib/mpi: Fix SG miter leak
  crypto: marvell - Update cache with input sg only when it is unmapped
  crypto: marvell - Don't chain at DMA level when backlog is disabled
  crypto: marvell - Fix memory leaks in TDMA chain for cipher requests
Linus Torvalds 9 éve
szülő
commit
f38d2e5313

+ 4 - 3
drivers/crypto/marvell/cesa.c

@@ -180,10 +180,11 @@ int mv_cesa_queue_req(struct crypto_async_request *req,
 	struct mv_cesa_engine *engine = creq->engine;
 
 	spin_lock_bh(&engine->lock);
-	if (mv_cesa_req_get_type(creq) == CESA_DMA_REQ)
-		mv_cesa_tdma_chain(engine, creq);
-
 	ret = crypto_enqueue_request(&engine->queue, req);
+	if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
+	    (ret == -EINPROGRESS ||
+	    (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+		mv_cesa_tdma_chain(engine, creq);
 	spin_unlock_bh(&engine->lock);
 
 	if (ret != -EINPROGRESS)

+ 7 - 18
drivers/crypto/marvell/cipher.c

@@ -139,20 +139,11 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
 	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
 	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
 	struct mv_cesa_req *basereq = &creq->base;
-	unsigned int ivsize;
-	int ret;
 
 	if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
 		return mv_cesa_ablkcipher_std_process(ablkreq, status);
 
-	ret = mv_cesa_dma_process(basereq, status);
-	if (ret)
-		return ret;
-
-	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
-	memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize);
-
-	return 0;
+	return mv_cesa_dma_process(basereq, status);
 }
 
 static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
@@ -320,7 +311,6 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
 		      GFP_KERNEL : GFP_ATOMIC;
 	struct mv_cesa_req *basereq = &creq->base;
 	struct mv_cesa_ablkcipher_dma_iter iter;
-	struct mv_cesa_tdma_chain chain;
 	bool skip_ctx = false;
 	int ret;
 	unsigned int ivsize;
@@ -347,13 +337,13 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
 			return -ENOMEM;
 	}
 
-	mv_cesa_tdma_desc_iter_init(&chain);
+	mv_cesa_tdma_desc_iter_init(&basereq->chain);
 	mv_cesa_ablkcipher_req_iter_init(&iter, req);
 
 	do {
 		struct mv_cesa_op_ctx *op;
 
-		op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags);
+		op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
 		if (IS_ERR(op)) {
 			ret = PTR_ERR(op);
 			goto err_free_tdma;
@@ -363,18 +353,18 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
 		mv_cesa_set_crypt_op_len(op, iter.base.op_len);
 
 		/* Add input transfers */
-		ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
+		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
 						   &iter.src, flags);
 		if (ret)
 			goto err_free_tdma;
 
 		/* Add dummy desc to launch the crypto operation */
-		ret = mv_cesa_dma_add_dummy_launch(&chain, flags);
+		ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
 		if (ret)
 			goto err_free_tdma;
 
 		/* Add output transfers */
-		ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
+		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
 						   &iter.dst, flags);
 		if (ret)
 			goto err_free_tdma;
@@ -383,13 +373,12 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
 
 	/* Add output data for IV */
 	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
-	ret = mv_cesa_dma_add_iv_op(&chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
+	ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
 				    ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
 
 	if (ret)
 		goto err_free_tdma;
 
-	basereq->chain = chain;
 	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
 
 	return 0;

+ 6 - 6
drivers/crypto/marvell/hash.c

@@ -315,12 +315,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
 	for (i = 0; i < digsize / 4; i++)
 		creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
 
-	if (creq->cache_ptr)
-		sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
-				   creq->cache,
-				   creq->cache_ptr,
-				   ahashreq->nbytes - creq->cache_ptr);
-
 	if (creq->last_req) {
 		/*
 		 * Hardware's MD5 digest is in little endian format, but
@@ -365,6 +359,12 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
 		mv_cesa_ahash_last_cleanup(ahashreq);
 
 	mv_cesa_ahash_cleanup(ahashreq);
+
+	if (creq->cache_ptr)
+		sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
+				   creq->cache,
+				   creq->cache_ptr,
+				   ahashreq->nbytes - creq->cache_ptr);
 }
 
 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {

+ 7 - 7
lib/mpi/mpicoder.c

@@ -363,6 +363,9 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
 		lzeros = 0;
 	}
 
+	miter.consumed = lzeros;
+	sg_miter_stop(&miter);
+
 	nbytes -= lzeros;
 	nbits = nbytes * 8;
 	if (nbits > MAX_EXTERN_MPI_BITS) {
@@ -390,7 +393,10 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
 	z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
 	z %= BYTES_PER_MPI_LIMB;
 
-	for (;;) {
+	while (sg_miter_next(&miter)) {
+		buff = miter.addr;
+		len = miter.length;
+
 		for (x = 0; x < len; x++) {
 			a <<= 8;
 			a |= *buff++;
@@ -400,12 +406,6 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
 			}
 		}
 		z += x;
-
-		if (!sg_miter_next(&miter))
-			break;
-
-		buff = miter.addr;
-		len = miter.length;
 	}
 
 	return val;