|
@@ -189,17 +189,31 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
|
|
|
else
|
|
|
cache_len = queued - areq->nbytes;
|
|
|
|
|
|
- /*
|
|
|
- * If this is not the last request and the queued data does not fit
|
|
|
- * into full blocks, cache it for the next send() call.
|
|
|
- */
|
|
|
- extra = queued & (crypto_ahash_blocksize(ahash) - 1);
|
|
|
- if (!req->last_req && extra) {
|
|
|
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
|
|
|
- req->cache_next, extra, areq->nbytes - extra);
|
|
|
+ if (!req->last_req) {
|
|
|
+ /* If this is not the last request and the queued data does not
|
|
|
+ * fit into full blocks, cache it for the next send() call.
|
|
|
+ */
|
|
|
+ extra = queued & (crypto_ahash_blocksize(ahash) - 1);
|
|
|
+ if (!extra)
|
|
|
+ /* If this is not the last request and the queued data
|
|
|
+ * is a multiple of a block, cache the last one for now.
|
|
|
+ */
|
|
|
+ extra = queued - crypto_ahash_blocksize(ahash);
|
|
|
|
|
|
- queued -= extra;
|
|
|
- len -= extra;
|
|
|
+ if (extra) {
|
|
|
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
|
|
|
+ req->cache_next, extra,
|
|
|
+ areq->nbytes - extra);
|
|
|
+
|
|
|
+ queued -= extra;
|
|
|
+ len -= extra;
|
|
|
+
|
|
|
+ if (!queued) {
|
|
|
+ *commands = 0;
|
|
|
+ *results = 0;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
spin_lock_bh(&priv->ring[ring].egress_lock);
|