|
@@ -683,7 +683,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
|
|
if (srclen <= dstlen)
|
|
if (srclen <= dstlen)
|
|
break;
|
|
break;
|
|
less = min_t(unsigned int, sg_dma_len(dst) - offset -
|
|
less = min_t(unsigned int, sg_dma_len(dst) - offset -
|
|
- dstskip, CHCR_DST_SG_SIZE);
|
|
|
|
|
|
+ dstskip, CHCR_DST_SG_SIZE);
|
|
dstlen += less;
|
|
dstlen += less;
|
|
offset += less;
|
|
offset += less;
|
|
if (offset == sg_dma_len(dst)) {
|
|
if (offset == sg_dma_len(dst)) {
|
|
@@ -694,7 +694,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
|
|
dstskip = 0;
|
|
dstskip = 0;
|
|
}
|
|
}
|
|
src = sg_next(src);
|
|
src = sg_next(src);
|
|
- srcskip = 0;
|
|
|
|
|
|
+ srcskip = 0;
|
|
}
|
|
}
|
|
return min(srclen, dstlen);
|
|
return min(srclen, dstlen);
|
|
}
|
|
}
|
|
@@ -1142,10 +1142,10 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
|
|
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
|
|
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
|
|
SPACE_LEFT(ablkctx->enckey_len),
|
|
SPACE_LEFT(ablkctx->enckey_len),
|
|
reqctx->src_ofst, reqctx->dst_ofst);
|
|
reqctx->src_ofst, reqctx->dst_ofst);
|
|
- if ((bytes + reqctx->processed) >= req->nbytes)
|
|
|
|
- bytes = req->nbytes - reqctx->processed;
|
|
|
|
- else
|
|
|
|
- bytes = ROUND_16(bytes);
|
|
|
|
|
|
+ if ((bytes + reqctx->processed) >= req->nbytes)
|
|
|
|
+ bytes = req->nbytes - reqctx->processed;
|
|
|
|
+ else
|
|
|
|
+ bytes = ROUND_16(bytes);
|
|
} else {
|
|
} else {
|
|
/*CTR mode counter overfloa*/
|
|
/*CTR mode counter overfloa*/
|
|
bytes = req->nbytes - reqctx->processed;
|
|
bytes = req->nbytes - reqctx->processed;
|
|
@@ -1246,15 +1246,15 @@ static int process_cipher(struct ablkcipher_request *req,
|
|
MIN_CIPHER_SG,
|
|
MIN_CIPHER_SG,
|
|
SPACE_LEFT(ablkctx->enckey_len),
|
|
SPACE_LEFT(ablkctx->enckey_len),
|
|
0, 0);
|
|
0, 0);
|
|
- if ((bytes + reqctx->processed) >= req->nbytes)
|
|
|
|
- bytes = req->nbytes - reqctx->processed;
|
|
|
|
- else
|
|
|
|
- bytes = ROUND_16(bytes);
|
|
|
|
|
|
+ if ((bytes + reqctx->processed) >= req->nbytes)
|
|
|
|
+ bytes = req->nbytes - reqctx->processed;
|
|
|
|
+ else
|
|
|
|
+ bytes = ROUND_16(bytes);
|
|
} else {
|
|
} else {
|
|
bytes = req->nbytes;
|
|
bytes = req->nbytes;
|
|
}
|
|
}
|
|
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
|
|
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
|
|
- CRYPTO_ALG_SUB_TYPE_CTR) {
|
|
|
|
|
|
+ CRYPTO_ALG_SUB_TYPE_CTR) {
|
|
bytes = adjust_ctr_overflow(req->info, bytes);
|
|
bytes = adjust_ctr_overflow(req->info, bytes);
|
|
}
|
|
}
|
|
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
|
|
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
|
|
@@ -2399,10 +2399,8 @@ void chcr_add_hash_src_ent(struct ahash_request *req,
|
|
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
|
|
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
|
|
&reqctx->dma_addr);
|
|
&reqctx->dma_addr);
|
|
ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
|
|
ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
|
|
- 0);
|
|
|
|
-// reqctx->srcsg = ulp_walk.last_sg;
|
|
|
|
-// reqctx->src_ofst = ulp_walk.last_sg_len;
|
|
|
|
- ulptx_walk_end(&ulp_walk);
|
|
|
|
|
|
+ 0);
|
|
|
|
+ ulptx_walk_end(&ulp_walk);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|