|
@@ -80,10 +80,10 @@ struct sha1_mb_ctx {
|
|
static inline struct mcryptd_hash_request_ctx
|
|
static inline struct mcryptd_hash_request_ctx
|
|
*cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
|
|
*cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
|
|
{
|
|
{
|
|
- struct shash_desc *desc;
|
|
|
|
|
|
+ struct ahash_request *areq;
|
|
|
|
|
|
- desc = container_of((void *) hash_ctx, struct shash_desc, __ctx);
|
|
|
|
- return container_of(desc, struct mcryptd_hash_request_ctx, desc);
|
|
|
|
|
|
+ areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
|
|
|
|
+ return container_of(areq, struct mcryptd_hash_request_ctx, areq);
|
|
}
|
|
}
|
|
|
|
|
|
static inline struct ahash_request
|
|
static inline struct ahash_request
|
|
@@ -93,7 +93,7 @@ static inline struct ahash_request
|
|
}
|
|
}
|
|
|
|
|
|
static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
|
|
static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
|
|
- struct shash_desc *desc)
|
|
|
|
|
|
+ struct ahash_request *areq)
|
|
{
|
|
{
|
|
rctx->flag = HASH_UPDATE;
|
|
rctx->flag = HASH_UPDATE;
|
|
}
|
|
}
|
|
@@ -375,9 +375,9 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static int sha1_mb_init(struct shash_desc *desc)
|
|
|
|
|
|
+static int sha1_mb_init(struct ahash_request *areq)
|
|
{
|
|
{
|
|
- struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
|
|
|
|
|
|
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
|
|
|
|
|
|
hash_ctx_init(sctx);
|
|
hash_ctx_init(sctx);
|
|
sctx->job.result_digest[0] = SHA1_H0;
|
|
sctx->job.result_digest[0] = SHA1_H0;
|
|
@@ -395,7 +395,7 @@ static int sha1_mb_init(struct shash_desc *desc)
|
|
static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
|
|
static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
- struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc);
|
|
|
|
|
|
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
|
|
__be32 *dst = (__be32 *) rctx->out;
|
|
__be32 *dst = (__be32 *) rctx->out;
|
|
|
|
|
|
for (i = 0; i < 5; ++i)
|
|
for (i = 0; i < 5; ++i)
|
|
@@ -427,7 +427,7 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
|
|
|
|
|
|
}
|
|
}
|
|
sha_ctx = (struct sha1_hash_ctx *)
|
|
sha_ctx = (struct sha1_hash_ctx *)
|
|
- shash_desc_ctx(&rctx->desc);
|
|
|
|
|
|
+ ahash_request_ctx(&rctx->areq);
|
|
kernel_fpu_begin();
|
|
kernel_fpu_begin();
|
|
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
|
|
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
|
|
rctx->walk.data, nbytes, flag);
|
|
rctx->walk.data, nbytes, flag);
|
|
@@ -519,11 +519,10 @@ static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
|
|
mcryptd_arm_flusher(cstate, delay);
|
|
mcryptd_arm_flusher(cstate, delay);
|
|
}
|
|
}
|
|
|
|
|
|
-static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
|
|
|
|
- unsigned int len)
|
|
|
|
|
|
+static int sha1_mb_update(struct ahash_request *areq)
|
|
{
|
|
{
|
|
struct mcryptd_hash_request_ctx *rctx =
|
|
struct mcryptd_hash_request_ctx *rctx =
|
|
- container_of(desc, struct mcryptd_hash_request_ctx, desc);
|
|
|
|
|
|
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
|
|
struct mcryptd_alg_cstate *cstate =
|
|
struct mcryptd_alg_cstate *cstate =
|
|
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
|
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
|
|
|
|
|
@@ -539,7 +538,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
|
|
}
|
|
}
|
|
|
|
|
|
/* need to init context */
|
|
/* need to init context */
|
|
- req_ctx_init(rctx, desc);
|
|
|
|
|
|
+ req_ctx_init(rctx, areq);
|
|
|
|
|
|
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
|
|
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
|
|
|
|
|
|
@@ -552,7 +551,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
|
|
rctx->flag |= HASH_DONE;
|
|
rctx->flag |= HASH_DONE;
|
|
|
|
|
|
/* submit */
|
|
/* submit */
|
|
- sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
|
|
|
|
|
|
+ sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
|
|
sha1_mb_add_list(rctx, cstate);
|
|
sha1_mb_add_list(rctx, cstate);
|
|
kernel_fpu_begin();
|
|
kernel_fpu_begin();
|
|
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
|
|
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
|
|
@@ -579,11 +578,10 @@ done:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
|
|
|
|
- unsigned int len, u8 *out)
|
|
|
|
|
|
+static int sha1_mb_finup(struct ahash_request *areq)
|
|
{
|
|
{
|
|
struct mcryptd_hash_request_ctx *rctx =
|
|
struct mcryptd_hash_request_ctx *rctx =
|
|
- container_of(desc, struct mcryptd_hash_request_ctx, desc);
|
|
|
|
|
|
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
|
|
struct mcryptd_alg_cstate *cstate =
|
|
struct mcryptd_alg_cstate *cstate =
|
|
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
|
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
|
|
|
|
|
@@ -598,7 +596,7 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
|
|
}
|
|
}
|
|
|
|
|
|
/* need to init context */
|
|
/* need to init context */
|
|
- req_ctx_init(rctx, desc);
|
|
|
|
|
|
+ req_ctx_init(rctx, areq);
|
|
|
|
|
|
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
|
|
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
|
|
|
|
|
|
@@ -611,11 +609,10 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
|
|
rctx->flag |= HASH_DONE;
|
|
rctx->flag |= HASH_DONE;
|
|
flag = HASH_LAST;
|
|
flag = HASH_LAST;
|
|
}
|
|
}
|
|
- rctx->out = out;
|
|
|
|
|
|
|
|
/* submit */
|
|
/* submit */
|
|
rctx->flag |= HASH_FINAL;
|
|
rctx->flag |= HASH_FINAL;
|
|
- sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
|
|
|
|
|
|
+ sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
|
|
sha1_mb_add_list(rctx, cstate);
|
|
sha1_mb_add_list(rctx, cstate);
|
|
|
|
|
|
kernel_fpu_begin();
|
|
kernel_fpu_begin();
|
|
@@ -641,10 +638,10 @@ done:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static int sha1_mb_final(struct shash_desc *desc, u8 *out)
|
|
|
|
|
|
+static int sha1_mb_final(struct ahash_request *areq)
|
|
{
|
|
{
|
|
struct mcryptd_hash_request_ctx *rctx =
|
|
struct mcryptd_hash_request_ctx *rctx =
|
|
- container_of(desc, struct mcryptd_hash_request_ctx, desc);
|
|
|
|
|
|
+ container_of(areq, struct mcryptd_hash_request_ctx, areq);
|
|
struct mcryptd_alg_cstate *cstate =
|
|
struct mcryptd_alg_cstate *cstate =
|
|
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
|
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
|
|
|
|
|
@@ -659,12 +656,11 @@ static int sha1_mb_final(struct shash_desc *desc, u8 *out)
|
|
}
|
|
}
|
|
|
|
|
|
/* need to init context */
|
|
/* need to init context */
|
|
- req_ctx_init(rctx, desc);
|
|
|
|
|
|
+ req_ctx_init(rctx, areq);
|
|
|
|
|
|
- rctx->out = out;
|
|
|
|
rctx->flag |= HASH_DONE | HASH_FINAL;
|
|
rctx->flag |= HASH_DONE | HASH_FINAL;
|
|
|
|
|
|
- sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
|
|
|
|
|
|
+ sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
|
|
/* flag HASH_FINAL and 0 data size */
|
|
/* flag HASH_FINAL and 0 data size */
|
|
sha1_mb_add_list(rctx, cstate);
|
|
sha1_mb_add_list(rctx, cstate);
|
|
kernel_fpu_begin();
|
|
kernel_fpu_begin();
|
|
@@ -691,48 +687,98 @@ done:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static int sha1_mb_export(struct shash_desc *desc, void *out)
|
|
|
|
|
|
+static int sha1_mb_export(struct ahash_request *areq, void *out)
|
|
{
|
|
{
|
|
- struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
|
|
|
|
|
|
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
|
|
|
|
|
|
memcpy(out, sctx, sizeof(*sctx));
|
|
memcpy(out, sctx, sizeof(*sctx));
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int sha1_mb_import(struct shash_desc *desc, const void *in)
|
|
|
|
|
|
+static int sha1_mb_import(struct ahash_request *areq, const void *in)
|
|
{
|
|
{
|
|
- struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
|
|
|
|
|
|
+ struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
|
|
|
|
|
|
memcpy(sctx, in, sizeof(*sctx));
|
|
memcpy(sctx, in, sizeof(*sctx));
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
|
|
|
|
+{
|
|
|
|
+ struct mcryptd_ahash *mcryptd_tfm;
|
|
|
|
+ struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
+ struct mcryptd_hash_ctx *mctx;
|
|
|
|
|
|
-static struct shash_alg sha1_mb_shash_alg = {
|
|
|
|
- .digestsize = SHA1_DIGEST_SIZE,
|
|
|
|
|
|
+ mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
|
|
|
|
+ CRYPTO_ALG_INTERNAL,
|
|
|
|
+ CRYPTO_ALG_INTERNAL);
|
|
|
|
+ if (IS_ERR(mcryptd_tfm))
|
|
|
|
+ return PTR_ERR(mcryptd_tfm);
|
|
|
|
+ mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
|
|
|
|
+ mctx->alg_state = &sha1_mb_alg_state;
|
|
|
|
+ ctx->mcryptd_tfm = mcryptd_tfm;
|
|
|
|
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
|
|
+ sizeof(struct ahash_request) +
|
|
|
|
+ crypto_ahash_reqsize(&mcryptd_tfm->base));
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
|
|
|
|
+{
|
|
|
|
+ struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
+
|
|
|
|
+ mcryptd_free_ahash(ctx->mcryptd_tfm);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
|
|
|
|
+{
|
|
|
|
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
|
|
+ sizeof(struct ahash_request) +
|
|
|
|
+ sizeof(struct sha1_hash_ctx));
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
|
|
|
|
+{
|
|
|
|
+ struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
+
|
|
|
|
+ mcryptd_free_ahash(ctx->mcryptd_tfm);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct ahash_alg sha1_mb_areq_alg = {
|
|
.init = sha1_mb_init,
|
|
.init = sha1_mb_init,
|
|
.update = sha1_mb_update,
|
|
.update = sha1_mb_update,
|
|
.final = sha1_mb_final,
|
|
.final = sha1_mb_final,
|
|
.finup = sha1_mb_finup,
|
|
.finup = sha1_mb_finup,
|
|
.export = sha1_mb_export,
|
|
.export = sha1_mb_export,
|
|
.import = sha1_mb_import,
|
|
.import = sha1_mb_import,
|
|
- .descsize = sizeof(struct sha1_hash_ctx),
|
|
|
|
- .statesize = sizeof(struct sha1_hash_ctx),
|
|
|
|
- .base = {
|
|
|
|
- .cra_name = "__sha1-mb",
|
|
|
|
- .cra_driver_name = "__intel_sha1-mb",
|
|
|
|
- .cra_priority = 100,
|
|
|
|
- /*
|
|
|
|
- * use ASYNC flag as some buffers in multi-buffer
|
|
|
|
- * algo may not have completed before hashing thread sleep
|
|
|
|
- */
|
|
|
|
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC |
|
|
|
|
- CRYPTO_ALG_INTERNAL,
|
|
|
|
- .cra_blocksize = SHA1_BLOCK_SIZE,
|
|
|
|
- .cra_module = THIS_MODULE,
|
|
|
|
- .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list),
|
|
|
|
|
|
+ .halg = {
|
|
|
|
+ .digestsize = SHA1_DIGEST_SIZE,
|
|
|
|
+ .statesize = sizeof(struct sha1_hash_ctx),
|
|
|
|
+ .base = {
|
|
|
|
+ .cra_name = "__sha1-mb",
|
|
|
|
+ .cra_driver_name = "__intel_sha1-mb",
|
|
|
|
+ .cra_priority = 100,
|
|
|
|
+ /*
|
|
|
|
+ * use ASYNC flag as some buffers in multi-buffer
|
|
|
|
+ * algo may not have completed before hashing thread
|
|
|
|
+ * sleep
|
|
|
|
+ */
|
|
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
|
|
|
|
+ CRYPTO_ALG_ASYNC |
|
|
|
|
+ CRYPTO_ALG_INTERNAL,
|
|
|
|
+ .cra_blocksize = SHA1_BLOCK_SIZE,
|
|
|
|
+ .cra_module = THIS_MODULE,
|
|
|
|
+ .cra_list = LIST_HEAD_INIT
|
|
|
|
+ (sha1_mb_areq_alg.halg.base.cra_list),
|
|
|
|
+ .cra_init = sha1_mb_areq_init_tfm,
|
|
|
|
+ .cra_exit = sha1_mb_areq_exit_tfm,
|
|
|
|
+ .cra_ctxsize = sizeof(struct sha1_hash_ctx),
|
|
|
|
+ }
|
|
}
|
|
}
|
|
};
|
|
};
|
|
|
|
|
|
@@ -817,46 +863,20 @@ static int sha1_mb_async_import(struct ahash_request *req, const void *in)
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
|
|
struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
|
|
- struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm);
|
|
|
|
|
|
+ struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
|
|
struct mcryptd_hash_request_ctx *rctx;
|
|
struct mcryptd_hash_request_ctx *rctx;
|
|
- struct shash_desc *desc;
|
|
|
|
|
|
+ struct ahash_request *areq;
|
|
|
|
|
|
memcpy(mcryptd_req, req, sizeof(*req));
|
|
memcpy(mcryptd_req, req, sizeof(*req));
|
|
ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
|
|
ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
|
|
rctx = ahash_request_ctx(mcryptd_req);
|
|
rctx = ahash_request_ctx(mcryptd_req);
|
|
- desc = &rctx->desc;
|
|
|
|
- desc->tfm = child;
|
|
|
|
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
-
|
|
|
|
- return crypto_ahash_import(mcryptd_req, in);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
|
|
|
|
-{
|
|
|
|
- struct mcryptd_ahash *mcryptd_tfm;
|
|
|
|
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
- struct mcryptd_hash_ctx *mctx;
|
|
|
|
|
|
+ areq = &rctx->areq;
|
|
|
|
|
|
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
|
|
|
|
- CRYPTO_ALG_INTERNAL,
|
|
|
|
- CRYPTO_ALG_INTERNAL);
|
|
|
|
- if (IS_ERR(mcryptd_tfm))
|
|
|
|
- return PTR_ERR(mcryptd_tfm);
|
|
|
|
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
|
|
|
|
- mctx->alg_state = &sha1_mb_alg_state;
|
|
|
|
- ctx->mcryptd_tfm = mcryptd_tfm;
|
|
|
|
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
|
|
- sizeof(struct ahash_request) +
|
|
|
|
- crypto_ahash_reqsize(&mcryptd_tfm->base));
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
|
|
|
|
-{
|
|
|
|
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
+ ahash_request_set_tfm(areq, child);
|
|
|
|
+ ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
|
|
+ rctx->complete, req);
|
|
|
|
|
|
- mcryptd_free_ahash(ctx->mcryptd_tfm);
|
|
|
|
|
|
+ return crypto_ahash_import(mcryptd_req, in);
|
|
}
|
|
}
|
|
|
|
|
|
static struct ahash_alg sha1_mb_async_alg = {
|
|
static struct ahash_alg sha1_mb_async_alg = {
|
|
@@ -965,7 +985,7 @@ static int __init sha1_mb_mod_init(void)
|
|
}
|
|
}
|
|
sha1_mb_alg_state.flusher = &sha1_mb_flusher;
|
|
sha1_mb_alg_state.flusher = &sha1_mb_flusher;
|
|
|
|
|
|
- err = crypto_register_shash(&sha1_mb_shash_alg);
|
|
|
|
|
|
+ err = crypto_register_ahash(&sha1_mb_areq_alg);
|
|
if (err)
|
|
if (err)
|
|
goto err2;
|
|
goto err2;
|
|
err = crypto_register_ahash(&sha1_mb_async_alg);
|
|
err = crypto_register_ahash(&sha1_mb_async_alg);
|
|
@@ -975,7 +995,7 @@ static int __init sha1_mb_mod_init(void)
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
err1:
|
|
err1:
|
|
- crypto_unregister_shash(&sha1_mb_shash_alg);
|
|
|
|
|
|
+ crypto_unregister_ahash(&sha1_mb_areq_alg);
|
|
err2:
|
|
err2:
|
|
for_each_possible_cpu(cpu) {
|
|
for_each_possible_cpu(cpu) {
|
|
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
|
|
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
|
|
@@ -991,7 +1011,7 @@ static void __exit sha1_mb_mod_fini(void)
|
|
struct mcryptd_alg_cstate *cpu_state;
|
|
struct mcryptd_alg_cstate *cpu_state;
|
|
|
|
|
|
crypto_unregister_ahash(&sha1_mb_async_alg);
|
|
crypto_unregister_ahash(&sha1_mb_async_alg);
|
|
- crypto_unregister_shash(&sha1_mb_shash_alg);
|
|
|
|
|
|
+ crypto_unregister_ahash(&sha1_mb_areq_alg);
|
|
for_each_possible_cpu(cpu) {
|
|
for_each_possible_cpu(cpu) {
|
|
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
|
|
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
|
|
kfree(cpu_state->mgr);
|
|
kfree(cpu_state->mgr);
|