|
@@ -70,8 +70,8 @@ static void ssi_hash_create_xcbc_setup(
|
|
unsigned int *seq_size);
|
|
unsigned int *seq_size);
|
|
|
|
|
|
static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
|
|
static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
|
|
- struct cc_hw_desc desc[],
|
|
|
|
- unsigned int *seq_size);
|
|
|
|
|
|
+ struct cc_hw_desc desc[],
|
|
|
|
+ unsigned int *seq_size);
|
|
|
|
|
|
struct ssi_hash_alg {
|
|
struct ssi_hash_alg {
|
|
struct list_head entry;
|
|
struct list_head entry;
|
|
@@ -117,8 +117,8 @@ static void ssi_hash_create_data_desc(
|
|
static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
|
|
static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
|
|
{
|
|
{
|
|
if (unlikely((mode == DRV_HASH_MD5) ||
|
|
if (unlikely((mode == DRV_HASH_MD5) ||
|
|
- (mode == DRV_HASH_SHA384) ||
|
|
|
|
- (mode == DRV_HASH_SHA512))) {
|
|
|
|
|
|
+ (mode == DRV_HASH_SHA384) ||
|
|
|
|
+ (mode == DRV_HASH_SHA512))) {
|
|
set_bytes_swap(desc, 1);
|
|
set_bytes_swap(desc, 1);
|
|
} else {
|
|
} else {
|
|
set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
|
|
set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
|
|
@@ -135,7 +135,7 @@ static int ssi_hash_map_result(struct device *dev,
|
|
DMA_BIDIRECTIONAL);
|
|
DMA_BIDIRECTIONAL);
|
|
if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
|
|
if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
|
|
SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
|
|
SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
|
|
- digestsize);
|
|
|
|
|
|
+ digestsize);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
SSI_LOG_DEBUG("Mapped digest result buffer %u B "
|
|
SSI_LOG_DEBUG("Mapped digest result buffer %u B "
|
|
@@ -200,12 +200,12 @@ static int ssi_hash_map_request(struct device *dev,
|
|
state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
|
|
state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
|
|
if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
|
|
if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
|
|
SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
|
|
SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
|
|
- ctx->inter_digestsize, state->digest_buff);
|
|
|
|
|
|
+ ctx->inter_digestsize, state->digest_buff);
|
|
goto fail3;
|
|
goto fail3;
|
|
}
|
|
}
|
|
SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=%pad\n",
|
|
SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=%pad\n",
|
|
- ctx->inter_digestsize, state->digest_buff,
|
|
|
|
- state->digest_buff_dma_addr);
|
|
|
|
|
|
+ ctx->inter_digestsize, state->digest_buff,
|
|
|
|
+ state->digest_buff_dma_addr);
|
|
|
|
|
|
if (is_hmac) {
|
|
if (is_hmac) {
|
|
dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
|
|
dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
|
|
@@ -249,12 +249,12 @@ static int ssi_hash_map_request(struct device *dev,
|
|
state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
|
|
state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
|
|
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
|
|
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
|
|
SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
|
|
SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
|
|
- HASH_LEN_SIZE, state->digest_bytes_len);
|
|
|
|
|
|
+ HASH_LEN_SIZE, state->digest_bytes_len);
|
|
goto fail4;
|
|
goto fail4;
|
|
}
|
|
}
|
|
SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=%pad\n",
|
|
SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=%pad\n",
|
|
- HASH_LEN_SIZE, state->digest_bytes_len,
|
|
|
|
- state->digest_bytes_len_dma_addr);
|
|
|
|
|
|
+ HASH_LEN_SIZE, state->digest_bytes_len,
|
|
|
|
+ state->digest_bytes_len_dma_addr);
|
|
} else {
|
|
} else {
|
|
state->digest_bytes_len_dma_addr = 0;
|
|
state->digest_bytes_len_dma_addr = 0;
|
|
}
|
|
}
|
|
@@ -263,12 +263,13 @@ static int ssi_hash_map_request(struct device *dev,
|
|
state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
|
|
state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
|
|
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
|
|
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
|
|
SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
|
|
SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
|
|
- ctx->inter_digestsize, state->opad_digest_buff);
|
|
|
|
|
|
+ ctx->inter_digestsize,
|
|
|
|
+ state->opad_digest_buff);
|
|
goto fail5;
|
|
goto fail5;
|
|
}
|
|
}
|
|
SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=%pad\n",
|
|
SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=%pad\n",
|
|
- ctx->inter_digestsize, state->opad_digest_buff,
|
|
|
|
- state->opad_digest_dma_addr);
|
|
|
|
|
|
+ ctx->inter_digestsize, state->opad_digest_buff,
|
|
|
|
+ state->opad_digest_dma_addr);
|
|
} else {
|
|
} else {
|
|
state->opad_digest_dma_addr = 0;
|
|
state->opad_digest_dma_addr = 0;
|
|
}
|
|
}
|
|
@@ -602,7 +603,7 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
|
|
if (unlikely(rc)) {
|
|
if (unlikely(rc)) {
|
|
if (rc == 1) {
|
|
if (rc == 1) {
|
|
SSI_LOG_DEBUG(" data size not require HW update %x\n",
|
|
SSI_LOG_DEBUG(" data size not require HW update %x\n",
|
|
- nbytes);
|
|
|
|
|
|
+ nbytes);
|
|
/* No hardware updates are required */
|
|
/* No hardware updates are required */
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1145,17 +1146,17 @@ out:
|
|
|
|
|
|
if (ctx->key_params.key_dma_addr) {
|
|
if (ctx->key_params.key_dma_addr) {
|
|
dma_unmap_single(&ctx->drvdata->plat_dev->dev,
|
|
dma_unmap_single(&ctx->drvdata->plat_dev->dev,
|
|
- ctx->key_params.key_dma_addr,
|
|
|
|
- ctx->key_params.keylen, DMA_TO_DEVICE);
|
|
|
|
|
|
+ ctx->key_params.key_dma_addr,
|
|
|
|
+ ctx->key_params.keylen, DMA_TO_DEVICE);
|
|
SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
|
|
SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
|
|
- ctx->key_params.key_dma_addr,
|
|
|
|
- ctx->key_params.keylen);
|
|
|
|
|
|
+ ctx->key_params.key_dma_addr,
|
|
|
|
+ ctx->key_params.keylen);
|
|
}
|
|
}
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
|
|
static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
|
|
- const u8 *key, unsigned int keylen)
|
|
|
|
|
|
+ const u8 *key, unsigned int keylen)
|
|
{
|
|
{
|
|
struct ssi_crypto_req ssi_req = {};
|
|
struct ssi_crypto_req ssi_req = {};
|
|
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
@@ -1232,18 +1233,18 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
|
|
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
|
|
|
|
dma_unmap_single(&ctx->drvdata->plat_dev->dev,
|
|
dma_unmap_single(&ctx->drvdata->plat_dev->dev,
|
|
- ctx->key_params.key_dma_addr,
|
|
|
|
- ctx->key_params.keylen, DMA_TO_DEVICE);
|
|
|
|
|
|
+ ctx->key_params.key_dma_addr,
|
|
|
|
+ ctx->key_params.keylen, DMA_TO_DEVICE);
|
|
SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
|
|
SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
|
|
- ctx->key_params.key_dma_addr,
|
|
|
|
- ctx->key_params.keylen);
|
|
|
|
|
|
+ ctx->key_params.key_dma_addr,
|
|
|
|
+ ctx->key_params.keylen);
|
|
|
|
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
#if SSI_CC_HAS_CMAC
|
|
#if SSI_CC_HAS_CMAC
|
|
static int ssi_cmac_setkey(struct crypto_ahash *ahash,
|
|
static int ssi_cmac_setkey(struct crypto_ahash *ahash,
|
|
- const u8 *key, unsigned int keylen)
|
|
|
|
|
|
+ const u8 *key, unsigned int keylen)
|
|
{
|
|
{
|
|
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
|
|
|
|
@@ -1316,22 +1317,22 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
|
|
ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
|
|
ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
|
|
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
|
|
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
|
|
SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
|
|
SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
|
|
- sizeof(ctx->digest_buff), ctx->digest_buff);
|
|
|
|
|
|
+ sizeof(ctx->digest_buff), ctx->digest_buff);
|
|
goto fail;
|
|
goto fail;
|
|
}
|
|
}
|
|
SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=%pad\n",
|
|
SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=%pad\n",
|
|
- sizeof(ctx->digest_buff), ctx->digest_buff,
|
|
|
|
|
|
+ sizeof(ctx->digest_buff), ctx->digest_buff,
|
|
ctx->digest_buff_dma_addr);
|
|
ctx->digest_buff_dma_addr);
|
|
|
|
|
|
ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
|
|
ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
|
|
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
|
|
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
|
|
SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
|
|
SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
|
|
- sizeof(ctx->opad_tmp_keys_buff),
|
|
|
|
- ctx->opad_tmp_keys_buff);
|
|
|
|
|
|
+ sizeof(ctx->opad_tmp_keys_buff),
|
|
|
|
+ ctx->opad_tmp_keys_buff);
|
|
goto fail;
|
|
goto fail;
|
|
}
|
|
}
|
|
SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
|
|
SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
|
|
- sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
|
|
|
|
|
|
+ sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
|
|
ctx->opad_tmp_keys_dma_addr);
|
|
ctx->opad_tmp_keys_dma_addr);
|
|
|
|
|
|
ctx->is_hmac = false;
|
|
ctx->is_hmac = false;
|
|
@@ -1353,7 +1354,7 @@ static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
|
|
container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
|
|
container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
|
|
|
|
|
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
- sizeof(struct ahash_req_ctx));
|
|
|
|
|
|
+ sizeof(struct ahash_req_ctx));
|
|
|
|
|
|
ctx->hash_mode = ssi_alg->hash_mode;
|
|
ctx->hash_mode = ssi_alg->hash_mode;
|
|
ctx->hw_mode = ssi_alg->hw_mode;
|
|
ctx->hw_mode = ssi_alg->hw_mode;
|
|
@@ -1394,7 +1395,7 @@ static int ssi_mac_update(struct ahash_request *req)
|
|
if (unlikely(rc)) {
|
|
if (unlikely(rc)) {
|
|
if (rc == 1) {
|
|
if (rc == 1) {
|
|
SSI_LOG_DEBUG(" data size not require HW update %x\n",
|
|
SSI_LOG_DEBUG(" data size not require HW update %x\n",
|
|
- req->nbytes);
|
|
|
|
|
|
+ req->nbytes);
|
|
/* No hardware updates are required */
|
|
/* No hardware updates are required */
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1837,7 +1838,7 @@ out:
|
|
}
|
|
}
|
|
|
|
|
|
static int ssi_ahash_setkey(struct crypto_ahash *ahash,
|
|
static int ssi_ahash_setkey(struct crypto_ahash *ahash,
|
|
- const u8 *key, unsigned int keylen)
|
|
|
|
|
|
+ const u8 *key, unsigned int keylen)
|
|
{
|
|
{
|
|
return ssi_hash_setkey((void *)ahash, key, keylen, false);
|
|
return ssi_hash_setkey((void *)ahash, key, keylen, false);
|
|
}
|
|
}
|
|
@@ -2119,7 +2120,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
|
|
|
|
|
|
/* Copy-to-sram digest-len */
|
|
/* Copy-to-sram digest-len */
|
|
ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
|
|
ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
|
|
- ARRAY_SIZE(digest_len_init), larval_seq, &larval_seq_len);
|
|
|
|
|
|
+ ARRAY_SIZE(digest_len_init),
|
|
|
|
+ larval_seq, &larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
if (unlikely(rc != 0))
|
|
if (unlikely(rc != 0))
|
|
goto init_digest_const_err;
|
|
goto init_digest_const_err;
|
|
@@ -2130,7 +2132,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
|
|
#if (DX_DEV_SHA_MAX > 256)
|
|
#if (DX_DEV_SHA_MAX > 256)
|
|
/* Copy-to-sram digest-len for sha384/512 */
|
|
/* Copy-to-sram digest-len for sha384/512 */
|
|
ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
|
|
ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
|
|
- ARRAY_SIZE(digest_len_sha512_init), larval_seq, &larval_seq_len);
|
|
|
|
|
|
+ ARRAY_SIZE(digest_len_sha512_init),
|
|
|
|
+ larval_seq, &larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
if (unlikely(rc != 0))
|
|
if (unlikely(rc != 0))
|
|
goto init_digest_const_err;
|
|
goto init_digest_const_err;
|
|
@@ -2144,7 +2147,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
|
|
|
|
|
|
/* Copy-to-sram initial SHA* digests */
|
|
/* Copy-to-sram initial SHA* digests */
|
|
ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
|
|
ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
|
|
- ARRAY_SIZE(md5_init), larval_seq, &larval_seq_len);
|
|
|
|
|
|
+ ARRAY_SIZE(md5_init), larval_seq,
|
|
|
|
+ &larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
if (unlikely(rc != 0))
|
|
if (unlikely(rc != 0))
|
|
goto init_digest_const_err;
|
|
goto init_digest_const_err;
|
|
@@ -2152,7 +2156,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
|
|
larval_seq_len = 0;
|
|
larval_seq_len = 0;
|
|
|
|
|
|
ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
|
|
ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
|
|
- ARRAY_SIZE(sha1_init), larval_seq, &larval_seq_len);
|
|
|
|
|
|
+ ARRAY_SIZE(sha1_init), larval_seq,
|
|
|
|
+ &larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
if (unlikely(rc != 0))
|
|
if (unlikely(rc != 0))
|
|
goto init_digest_const_err;
|
|
goto init_digest_const_err;
|
|
@@ -2160,7 +2165,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
|
|
larval_seq_len = 0;
|
|
larval_seq_len = 0;
|
|
|
|
|
|
ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
|
|
ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
|
|
- ARRAY_SIZE(sha224_init), larval_seq, &larval_seq_len);
|
|
|
|
|
|
+ ARRAY_SIZE(sha224_init), larval_seq,
|
|
|
|
+ &larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
if (unlikely(rc != 0))
|
|
if (unlikely(rc != 0))
|
|
goto init_digest_const_err;
|
|
goto init_digest_const_err;
|
|
@@ -2168,7 +2174,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
|
|
larval_seq_len = 0;
|
|
larval_seq_len = 0;
|
|
|
|
|
|
ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
|
|
ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
|
|
- ARRAY_SIZE(sha256_init), larval_seq, &larval_seq_len);
|
|
|
|
|
|
+ ARRAY_SIZE(sha256_init), larval_seq,
|
|
|
|
+ &larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
if (unlikely(rc != 0))
|
|
if (unlikely(rc != 0))
|
|
goto init_digest_const_err;
|
|
goto init_digest_const_err;
|
|
@@ -2182,10 +2189,10 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
|
|
const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
|
|
const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
|
|
|
|
|
|
ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
|
|
ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
|
|
- larval_seq, &larval_seq_len);
|
|
|
|
|
|
+ larval_seq, &larval_seq_len);
|
|
sram_buff_ofs += sizeof(u32);
|
|
sram_buff_ofs += sizeof(u32);
|
|
ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
|
|
ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
|
|
- larval_seq, &larval_seq_len);
|
|
|
|
|
|
+ larval_seq, &larval_seq_len);
|
|
sram_buff_ofs += sizeof(u32);
|
|
sram_buff_ofs += sizeof(u32);
|
|
}
|
|
}
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
@@ -2200,10 +2207,10 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
|
|
const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
|
|
const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
|
|
|
|
|
|
ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
|
|
ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
|
|
- larval_seq, &larval_seq_len);
|
|
|
|
|
|
+ larval_seq, &larval_seq_len);
|
|
sram_buff_ofs += sizeof(u32);
|
|
sram_buff_ofs += sizeof(u32);
|
|
ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
|
|
ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
|
|
- larval_seq, &larval_seq_len);
|
|
|
|
|
|
+ larval_seq, &larval_seq_len);
|
|
sram_buff_ofs += sizeof(u32);
|
|
sram_buff_ofs += sizeof(u32);
|
|
}
|
|
}
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
|
|
@@ -2228,7 +2235,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
|
|
hash_handle = kzalloc(sizeof(struct ssi_hash_handle), GFP_KERNEL);
|
|
hash_handle = kzalloc(sizeof(struct ssi_hash_handle), GFP_KERNEL);
|
|
if (!hash_handle) {
|
|
if (!hash_handle) {
|
|
SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
|
|
SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
|
|
- sizeof(struct ssi_hash_handle));
|
|
|
|
|
|
+ sizeof(struct ssi_hash_handle));
|
|
rc = -ENOMEM;
|
|
rc = -ENOMEM;
|
|
goto fail;
|
|
goto fail;
|
|
}
|
|
}
|
|
@@ -2300,7 +2307,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
|
|
if (IS_ERR(t_alg)) {
|
|
if (IS_ERR(t_alg)) {
|
|
rc = PTR_ERR(t_alg);
|
|
rc = PTR_ERR(t_alg);
|
|
SSI_LOG_ERR("%s alg allocation failed\n",
|
|
SSI_LOG_ERR("%s alg allocation failed\n",
|
|
- driver_hash[alg].driver_name);
|
|
|
|
|
|
+ driver_hash[alg].driver_name);
|
|
goto fail;
|
|
goto fail;
|
|
}
|
|
}
|
|
t_alg->drvdata = drvdata;
|
|
t_alg->drvdata = drvdata;
|
|
@@ -2346,8 +2353,8 @@ int ssi_hash_free(struct ssi_drvdata *drvdata)
|
|
}
|
|
}
|
|
|
|
|
|
static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
|
|
static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
|
|
- struct cc_hw_desc desc[],
|
|
|
|
- unsigned int *seq_size)
|
|
|
|
|
|
+ struct cc_hw_desc desc[],
|
|
|
|
+ unsigned int *seq_size)
|
|
{
|
|
{
|
|
unsigned int idx = *seq_size;
|
|
unsigned int idx = *seq_size;
|
|
struct ahash_req_ctx *state = ahash_request_ctx(areq);
|
|
struct ahash_req_ctx *state = ahash_request_ctx(areq);
|
|
@@ -2404,8 +2411,8 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
|
|
}
|
|
}
|
|
|
|
|
|
static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
|
|
static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
|
|
- struct cc_hw_desc desc[],
|
|
|
|
- unsigned int *seq_size)
|
|
|
|
|
|
+ struct cc_hw_desc desc[],
|
|
|
|
+ unsigned int *seq_size)
|
|
{
|
|
{
|
|
unsigned int idx = *seq_size;
|
|
unsigned int idx = *seq_size;
|
|
struct ahash_req_ctx *state = ahash_request_ctx(areq);
|
|
struct ahash_req_ctx *state = ahash_request_ctx(areq);
|