|
@@ -84,7 +84,7 @@ struct dcp_async_ctx {
|
|
unsigned int hot:1;
|
|
unsigned int hot:1;
|
|
|
|
|
|
/* Crypto-specific context */
|
|
/* Crypto-specific context */
|
|
- struct crypto_skcipher *fallback;
|
|
|
|
|
|
+ struct crypto_sync_skcipher *fallback;
|
|
unsigned int key_len;
|
|
unsigned int key_len;
|
|
uint8_t key[AES_KEYSIZE_128];
|
|
uint8_t key[AES_KEYSIZE_128];
|
|
};
|
|
};
|
|
@@ -376,10 +376,10 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
|
|
{
|
|
{
|
|
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
|
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
|
struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
|
struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
|
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
|
|
|
|
|
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- skcipher_request_set_tfm(subreq, ctx->fallback);
|
|
|
|
|
|
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
|
|
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
|
|
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
|
|
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
|
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
|
req->nbytes, req->info);
|
|
req->nbytes, req->info);
|
|
@@ -460,16 +460,16 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
* but is supported by in-kernel software implementation, we use
|
|
* but is supported by in-kernel software implementation, we use
|
|
* software fallback.
|
|
* software fallback.
|
|
*/
|
|
*/
|
|
- crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
|
|
|
|
- crypto_skcipher_set_flags(actx->fallback,
|
|
|
|
|
|
+ crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
|
|
|
|
+ crypto_sync_skcipher_set_flags(actx->fallback,
|
|
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
|
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
|
|
|
|
|
- ret = crypto_skcipher_setkey(actx->fallback, key, len);
|
|
|
|
|
|
+ ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
|
|
if (!ret)
|
|
if (!ret)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
|
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
|
- tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
|
|
|
|
|
|
+ tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
|
|
CRYPTO_TFM_RES_MASK;
|
|
CRYPTO_TFM_RES_MASK;
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -478,11 +478,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
|
|
static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
|
|
{
|
|
{
|
|
const char *name = crypto_tfm_alg_name(tfm);
|
|
const char *name = crypto_tfm_alg_name(tfm);
|
|
- const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
|
|
|
|
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
|
|
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
|
|
- struct crypto_skcipher *blk;
|
|
|
|
|
|
+ struct crypto_sync_skcipher *blk;
|
|
|
|
|
|
- blk = crypto_alloc_skcipher(name, 0, flags);
|
|
|
|
|
|
+ blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
|
if (IS_ERR(blk))
|
|
if (IS_ERR(blk))
|
|
return PTR_ERR(blk);
|
|
return PTR_ERR(blk);
|
|
|
|
|
|
@@ -495,7 +494,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
|
|
{
|
|
{
|
|
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
|
|
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
- crypto_free_skcipher(actx->fallback);
|
|
|
|
|
|
+ crypto_free_sync_skcipher(actx->fallback);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|