|
@@ -201,22 +201,34 @@ static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
|
|
|
memcpy(priv->result, req->result,
|
|
|
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
|
|
|
|
|
|
+ /* Restore the original crypto request. */
|
|
|
+ req->result = priv->result;
|
|
|
+ req->base.complete = priv->complete;
|
|
|
+ req->base.data = priv->data;
|
|
|
+ req->priv = NULL;
|
|
|
+
|
|
|
+ /* Free the req->priv.priv from the ADJUSTED request. */
|
|
|
kzfree(priv);
|
|
|
}
|
|
|
|
|
|
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
|
|
|
{
|
|
|
struct ahash_request *areq = req->data;
|
|
|
- struct ahash_request_priv *priv = areq->priv;
|
|
|
- crypto_completion_t complete = priv->complete;
|
|
|
- void *data = priv->data;
|
|
|
|
|
|
- ahash_op_unaligned_finish(areq, err);
|
|
|
+ /*
|
|
|
+ * Restore the original request, see ahash_op_unaligned() for what
|
|
|
+ * goes where.
|
|
|
+ *
|
|
|
+ * The "struct ahash_request *req" here is in fact the "req.base"
|
|
|
+ * from the ADJUSTED request from ahash_op_unaligned(), thus as it
|
|
|
+ * is a pointer to self, it is also the ADJUSTED "req" .
|
|
|
+ */
|
|
|
|
|
|
- areq->base.complete = complete;
|
|
|
- areq->base.data = data;
|
|
|
+ /* First copy areq->result into areq->priv.result */
|
|
|
+ ahash_op_unaligned_finish(areq, err);
|
|
|
|
|
|
- complete(&areq->base, err);
|
|
|
+ /* Complete the ORIGINAL request. */
|
|
|
+ areq->base.complete(&areq->base, err);
|
|
|
}
|
|
|
|
|
|
static int ahash_op_unaligned(struct ahash_request *req,
|
|
@@ -234,9 +246,39 @@ static int ahash_op_unaligned(struct ahash_request *req,
|
|
|
if (!priv)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ /*
|
|
|
+ * WARNING: Voodoo programming below!
|
|
|
+ *
|
|
|
+ * The code below is obscure and hard to understand, thus explanation
|
|
|
+ * is necessary. See include/crypto/hash.h and include/linux/crypto.h
|
|
|
+ * to understand the layout of structures used here!
|
|
|
+ *
|
|
|
+ * The code here will replace portions of the ORIGINAL request with
|
|
|
+ * pointers to new code and buffers so the hashing operation can store
|
|
|
+ * the result in aligned buffer. We will call the modified request
|
|
|
+ * an ADJUSTED request.
|
|
|
+ *
|
|
|
+ * The newly mangled request will look as such:
|
|
|
+ *
|
|
|
+ * req {
|
|
|
+ * .result = ADJUSTED[new aligned buffer]
|
|
|
+ * .base.complete = ADJUSTED[pointer to completion function]
|
|
|
+ * .base.data = ADJUSTED[*req (pointer to self)]
|
|
|
+ * .priv = ADJUSTED[new priv] {
|
|
|
+ * .result = ORIGINAL(result)
|
|
|
+ * .complete = ORIGINAL(base.complete)
|
|
|
+ * .data = ORIGINAL(base.data)
|
|
|
+ * }
|
|
|
+ */
|
|
|
+
|
|
|
priv->result = req->result;
|
|
|
priv->complete = req->base.complete;
|
|
|
priv->data = req->base.data;
|
|
|
+ /*
|
|
|
+ * WARNING: We do not backup req->priv here! The req->priv
|
|
|
+ * is for internal use of the Crypto API and the
|
|
|
+ * user must _NOT_ _EVER_ depend on it's content!
|
|
|
+ */
|
|
|
|
|
|
req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
|
|
|
req->base.complete = ahash_op_unaligned_done;
|