|
@@ -83,14 +83,14 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
|
|
|
while (nbytes != 0) {
|
|
|
if (sg_is_chain(sg_list)) {
|
|
|
SSI_LOG_ERR("Unexpected chained entry "
|
|
|
- "in sg (entry =0x%X) \n", nents);
|
|
|
+ "in sg (entry =0x%X)\n", nents);
|
|
|
BUG();
|
|
|
}
|
|
|
if (sg_list->length != 0) {
|
|
|
nents++;
|
|
|
/* get the number of bytes in the last entry */
|
|
|
*lbytes = nbytes;
|
|
|
- nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length;
|
|
|
+ nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
|
|
|
sg_list = sg_next(sg_list);
|
|
|
} else {
|
|
|
sg_list = (struct scatterlist *)sg_page(sg_list);
|
|
@@ -99,7 +99,7 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
- SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes);
|
|
|
+ SSI_LOG_DEBUG("nents %d last bytes %d\n", nents, *lbytes);
|
|
|
return nents;
|
|
|
}
|
|
|
|
|
@@ -154,16 +154,16 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
|
|
|
u32 new_nents;;
|
|
|
|
|
|
/* Verify there is no memory overflow*/
|
|
|
- new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
|
|
|
- if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) {
|
|
|
+ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
|
|
|
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
/*handle buffer longer than 64 kbytes */
|
|
|
- while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) {
|
|
|
+ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
|
|
|
cc_lli_set_addr(mlli_entry_p, buff_dma);
|
|
|
cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
|
|
|
- SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
|
|
|
+ SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
|
|
|
mlli_entry_p[LLI_WORD0_OFFSET],
|
|
|
mlli_entry_p[LLI_WORD1_OFFSET]);
|
|
|
buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
|
|
@@ -174,7 +174,7 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
|
|
|
/*Last entry */
|
|
|
cc_lli_set_addr(mlli_entry_p, buff_dma);
|
|
|
cc_lli_set_size(mlli_entry_p, buff_size);
|
|
|
- SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
|
|
|
+ SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
|
|
|
mlli_entry_p[LLI_WORD0_OFFSET],
|
|
|
mlli_entry_p[LLI_WORD1_OFFSET]);
|
|
|
mlli_entry_p = mlli_entry_p + 2;
|
|
@@ -196,15 +196,15 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
|
|
|
curr_sgl = sg_next(curr_sgl)) {
|
|
|
u32 entry_data_len =
|
|
|
(sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
|
|
|
- sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
|
|
|
+ sg_dma_len(curr_sgl) - sglOffset : sgl_data_len;
|
|
|
sgl_data_len -= entry_data_len;
|
|
|
rc = ssi_buffer_mgr_render_buff_to_mlli(
|
|
|
sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
|
|
|
&mlli_entry_p);
|
|
|
- if(rc != 0) {
|
|
|
+ if (rc != 0) {
|
|
|
return rc;
|
|
|
}
|
|
|
- sglOffset=0;
|
|
|
+ sglOffset = 0;
|
|
|
}
|
|
|
*mlli_entry_pp = mlli_entry_p;
|
|
|
return 0;
|
|
@@ -216,7 +216,7 @@ static int ssi_buffer_mgr_generate_mlli(
|
|
|
struct mlli_params *mlli_params)
|
|
|
{
|
|
|
u32 *mlli_p;
|
|
|
- u32 total_nents = 0,prev_total_nents = 0;
|
|
|
+ u32 total_nents = 0, prev_total_nents = 0;
|
|
|
int rc = 0, i;
|
|
|
|
|
|
SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
|
|
@@ -227,7 +227,7 @@ static int ssi_buffer_mgr_generate_mlli(
|
|
|
&(mlli_params->mlli_dma_addr));
|
|
|
if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
|
|
|
SSI_LOG_ERR("dma_pool_alloc() failed\n");
|
|
|
- rc =-ENOMEM;
|
|
|
+ rc = -ENOMEM;
|
|
|
goto build_mlli_exit;
|
|
|
}
|
|
|
/* Point to start of MLLI */
|
|
@@ -244,7 +244,7 @@ static int ssi_buffer_mgr_generate_mlli(
|
|
|
sg_data->entry[i].buffer_dma,
|
|
|
sg_data->total_data_len[i], &total_nents,
|
|
|
&mlli_p);
|
|
|
- if(rc != 0) {
|
|
|
+ if (rc != 0) {
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
@@ -323,13 +323,13 @@ static int
|
|
|
ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
|
|
|
enum dma_data_direction direction)
|
|
|
{
|
|
|
- u32 i , j;
|
|
|
+ u32 i, j;
|
|
|
struct scatterlist *l_sg = sg;
|
|
|
for (i = 0; i < nents; i++) {
|
|
|
if (l_sg == NULL) {
|
|
|
break;
|
|
|
}
|
|
|
- if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){
|
|
|
+ if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
|
|
|
SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
|
|
|
goto err;
|
|
|
}
|
|
@@ -343,7 +343,7 @@ err:
|
|
|
if (sg == NULL) {
|
|
|
break;
|
|
|
}
|
|
|
- dma_unmap_sg(dev,sg,1,direction);
|
|
|
+ dma_unmap_sg(dev, sg, 1, direction);
|
|
|
sg = sg_next(sg);
|
|
|
}
|
|
|
return 0;
|
|
@@ -387,7 +387,7 @@ static int ssi_buffer_mgr_map_scatterlist(
|
|
|
* be changed from the original sgl nents
|
|
|
*/
|
|
|
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
|
|
|
- if (unlikely(*mapped_nents == 0)){
|
|
|
+ if (unlikely(*mapped_nents == 0)) {
|
|
|
*nents = 0;
|
|
|
SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
|
|
|
return -ENOMEM;
|
|
@@ -400,7 +400,7 @@ static int ssi_buffer_mgr_map_scatterlist(
|
|
|
sg,
|
|
|
*nents,
|
|
|
direction);
|
|
|
- if (unlikely(*mapped_nents != *nents)){
|
|
|
+ if (unlikely(*mapped_nents != *nents)) {
|
|
|
*nents = *mapped_nents;
|
|
|
SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
|
|
|
return -ENOMEM;
|
|
@@ -418,7 +418,7 @@ ssi_aead_handle_config_buf(struct device *dev,
|
|
|
struct buffer_array *sg_data,
|
|
|
unsigned int assoclen)
|
|
|
{
|
|
|
- SSI_LOG_DEBUG(" handle additional data config set to DLLI \n");
|
|
|
+ SSI_LOG_DEBUG(" handle additional data config set to DLLI\n");
|
|
|
/* create sg for the current buffer */
|
|
|
sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
|
|
|
if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
|
|
@@ -453,9 +453,9 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
|
|
|
u32 curr_buff_cnt,
|
|
|
struct buffer_array *sg_data)
|
|
|
{
|
|
|
- SSI_LOG_DEBUG(" handle curr buff %x set to DLLI \n", curr_buff_cnt);
|
|
|
+ SSI_LOG_DEBUG(" handle curr buff %x set to DLLI\n", curr_buff_cnt);
|
|
|
/* create sg for the current buffer */
|
|
|
- sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt);
|
|
|
+ sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
|
|
|
if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
|
|
|
DMA_TO_DEVICE) != 1)) {
|
|
|
SSI_LOG_ERR("dma_map_sg() "
|
|
@@ -540,12 +540,12 @@ int ssi_buffer_mgr_map_blkcipher_request(
|
|
|
sg_data.num_of_buffers = 0;
|
|
|
|
|
|
/* Map IV buffer */
|
|
|
- if (likely(ivsize != 0) ) {
|
|
|
+ if (likely(ivsize != 0)) {
|
|
|
dump_byte_array("iv", (u8 *)info, ivsize);
|
|
|
req_ctx->gen_ctx.iv_dma_addr =
|
|
|
dma_map_single(dev, (void *)info,
|
|
|
ivsize,
|
|
|
- req_ctx->is_giv ? DMA_BIDIRECTIONAL:
|
|
|
+ req_ctx->is_giv ? DMA_BIDIRECTIONAL :
|
|
|
DMA_TO_DEVICE);
|
|
|
if (unlikely(dma_mapping_error(dev,
|
|
|
req_ctx->gen_ctx.iv_dma_addr))) {
|
|
@@ -581,7 +581,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
|
|
|
} else {
|
|
|
/* Map the dst sg */
|
|
|
if (unlikely(ssi_buffer_mgr_map_scatterlist(
|
|
|
- dev,dst, nbytes,
|
|
|
+ dev, dst, nbytes,
|
|
|
DMA_BIDIRECTIONAL, &req_ctx->out_nents,
|
|
|
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
|
|
|
&mapped_nents))){
|
|
@@ -606,7 +606,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
|
|
|
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
|
|
|
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
|
|
|
rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
|
|
|
- if (unlikely(rc!= 0))
|
|
|
+ if (unlikely(rc != 0))
|
|
|
goto ablkcipher_exit;
|
|
|
|
|
|
}
|
|
@@ -686,19 +686,19 @@ void ssi_buffer_mgr_unmap_aead_request(
|
|
|
areq_ctx->mlli_params.mlli_dma_addr);
|
|
|
}
|
|
|
|
|
|
- SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src),areq_ctx->src.nents,areq_ctx->assoc.nents,req->assoclen,req->cryptlen);
|
|
|
- size_to_unmap = req->assoclen+req->cryptlen;
|
|
|
- if(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT){
|
|
|
+ SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, req->assoclen, req->cryptlen);
|
|
|
+ size_to_unmap = req->assoclen + req->cryptlen;
|
|
|
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) {
|
|
|
size_to_unmap += areq_ctx->req_authsize;
|
|
|
}
|
|
|
if (areq_ctx->is_gcm4543)
|
|
|
size_to_unmap += crypto_aead_ivsize(tfm);
|
|
|
|
|
|
- dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src,size_to_unmap,&dummy,&chained) , DMA_BIDIRECTIONAL);
|
|
|
+ dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
|
|
|
if (unlikely(req->src != req->dst)) {
|
|
|
SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
|
|
|
sg_virt(req->dst));
|
|
|
- dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
|
|
|
+ dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst, size_to_unmap, &dummy, &chained),
|
|
|
DMA_BIDIRECTIONAL);
|
|
|
}
|
|
|
if (drvdata->coherent &&
|
|
@@ -714,8 +714,8 @@ void ssi_buffer_mgr_unmap_aead_request(
|
|
|
*/
|
|
|
ssi_buffer_mgr_copy_scatterlist_portion(
|
|
|
areq_ctx->backup_mac, req->src,
|
|
|
- size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
|
|
|
- size_to_skip+ req->cryptlen, SSI_SG_FROM_BUF);
|
|
|
+ size_to_skip + req->cryptlen - areq_ctx->req_authsize,
|
|
|
+ size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -736,7 +736,7 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- for( i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
|
|
|
+ for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
|
|
|
if (sgl == NULL) {
|
|
|
break;
|
|
|
}
|
|
@@ -798,7 +798,7 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
|
|
|
SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
|
|
|
hw_iv_size, req->iv,
|
|
|
(unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
|
|
|
- if (do_chain == true && areq_ctx->plaintext_authenticate_only == true){ // TODO: what about CTR?? ask Ron
|
|
|
+ if (do_chain == true && areq_ctx->plaintext_authenticate_only == true) { // TODO: what about CTR?? ask Ron
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
|
|
|
unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
|
|
@@ -858,7 +858,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
|
|
|
current_sg = sg_next(current_sg);
|
|
|
//if have reached the end of the sgl, then this is unexpected
|
|
|
if (current_sg == NULL) {
|
|
|
- SSI_LOG_ERR("reached end of sg list. unexpected \n");
|
|
|
+ SSI_LOG_ERR("reached end of sg list. unexpected\n");
|
|
|
BUG();
|
|
|
}
|
|
|
sg_index += current_sg->length;
|
|
@@ -923,7 +923,7 @@ static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
|
|
|
if (likely(req->src == req->dst)) {
|
|
|
/*INPLACE*/
|
|
|
areq_ctx->icv_dma_addr = sg_dma_address(
|
|
|
- areq_ctx->srcSgl)+
|
|
|
+ areq_ctx->srcSgl) +
|
|
|
(*src_last_bytes - authsize);
|
|
|
areq_ctx->icv_virt_addr = sg_virt(
|
|
|
areq_ctx->srcSgl) +
|
|
@@ -942,7 +942,7 @@ static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
|
|
|
areq_ctx->dstSgl) +
|
|
|
(*dst_last_bytes - authsize);
|
|
|
areq_ctx->icv_virt_addr = sg_virt(
|
|
|
- areq_ctx->dstSgl)+
|
|
|
+ areq_ctx->dstSgl) +
|
|
|
(*dst_last_bytes - authsize);
|
|
|
}
|
|
|
}
|
|
@@ -964,7 +964,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
|
|
|
/*INPLACE*/
|
|
|
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
|
|
|
areq_ctx->src.nents, areq_ctx->srcSgl,
|
|
|
- areq_ctx->cryptlen,areq_ctx->srcOffset, is_last_table,
|
|
|
+ areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
|
|
|
&areq_ctx->src.mlli_nents);
|
|
|
|
|
|
icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
|
|
@@ -1018,11 +1018,11 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
|
|
|
/*NON-INPLACE and DECRYPT*/
|
|
|
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
|
|
|
areq_ctx->src.nents, areq_ctx->srcSgl,
|
|
|
- areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
|
|
|
+ areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
|
|
|
&areq_ctx->src.mlli_nents);
|
|
|
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
|
|
|
areq_ctx->dst.nents, areq_ctx->dstSgl,
|
|
|
- areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
|
|
|
+ areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
|
|
|
&areq_ctx->dst.mlli_nents);
|
|
|
|
|
|
icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
|
|
@@ -1044,8 +1044,8 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
|
|
|
}
|
|
|
ssi_buffer_mgr_copy_scatterlist_portion(
|
|
|
areq_ctx->backup_mac, req->src,
|
|
|
- size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
|
|
|
- size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
|
|
|
+ size_to_skip + req->cryptlen - areq_ctx->req_authsize,
|
|
|
+ size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
|
|
|
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
|
|
|
} else { /* Contig. ICV */
|
|
|
/*Should hanlde if the sg is not contig.*/
|
|
@@ -1061,11 +1061,11 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
|
|
|
/*NON-INPLACE and ENCRYPT*/
|
|
|
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
|
|
|
areq_ctx->dst.nents, areq_ctx->dstSgl,
|
|
|
- areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
|
|
|
+ areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
|
|
|
&areq_ctx->dst.mlli_nents);
|
|
|
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
|
|
|
areq_ctx->src.nents, areq_ctx->srcSgl,
|
|
|
- areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
|
|
|
+ areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
|
|
|
&areq_ctx->src.mlli_nents);
|
|
|
|
|
|
icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
|
|
@@ -1108,7 +1108,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
|
|
|
int rc = 0;
|
|
|
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
|
|
|
u32 offset = 0;
|
|
|
- unsigned int size_for_map = req->assoclen +req->cryptlen; /*non-inplace mode*/
|
|
|
+ unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
u32 sg_index = 0;
|
|
|
bool chained = false;
|
|
@@ -1130,8 +1130,8 @@ static inline int ssi_buffer_mgr_aead_chain_data(
|
|
|
size_for_map += crypto_aead_ivsize(tfm);
|
|
|
}
|
|
|
|
|
|
- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;
|
|
|
- src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);
|
|
|
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
|
|
|
+ src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
|
|
|
sg_index = areq_ctx->srcSgl->length;
|
|
|
//check where the data starts
|
|
|
while (sg_index <= size_to_skip) {
|
|
@@ -1139,7 +1139,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
|
|
|
areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
|
|
|
//if have reached the end of the sgl, then this is unexpected
|
|
|
if (areq_ctx->srcSgl == NULL) {
|
|
|
- SSI_LOG_ERR("reached end of sg list. unexpected \n");
|
|
|
+ SSI_LOG_ERR("reached end of sg list. unexpected\n");
|
|
|
BUG();
|
|
|
}
|
|
|
sg_index += areq_ctx->srcSgl->length;
|
|
@@ -1157,7 +1157,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
|
|
|
areq_ctx->srcOffset = offset;
|
|
|
|
|
|
if (req->src != req->dst) {
|
|
|
- size_for_map = req->assoclen +req->cryptlen;
|
|
|
+ size_for_map = req->assoclen + req->cryptlen;
|
|
|
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
|
|
|
if (is_gcm4543) {
|
|
|
size_for_map += crypto_aead_ivsize(tfm);
|
|
@@ -1173,7 +1173,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst,size_for_map,&dst_last_bytes, &chained);
|
|
|
+ dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
|
|
|
sg_index = areq_ctx->dstSgl->length;
|
|
|
offset = size_to_skip;
|
|
|
|
|
@@ -1184,7 +1184,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
|
|
|
areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
|
|
|
//if have reached the end of the sgl, then this is unexpected
|
|
|
if (areq_ctx->dstSgl == NULL) {
|
|
|
- SSI_LOG_ERR("reached end of sg list. unexpected \n");
|
|
|
+ SSI_LOG_ERR("reached end of sg list. unexpected\n");
|
|
|
BUG();
|
|
|
}
|
|
|
sg_index += areq_ctx->dstSgl->length;
|
|
@@ -1214,7 +1214,7 @@ chain_data_exit:
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
|
|
|
+static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
|
|
|
struct aead_request *req)
|
|
|
{
|
|
|
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
|
@@ -1298,8 +1298,8 @@ int ssi_buffer_mgr_map_aead_request(
|
|
|
*/
|
|
|
ssi_buffer_mgr_copy_scatterlist_portion(
|
|
|
areq_ctx->backup_mac, req->src,
|
|
|
- size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
|
|
|
- size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
|
|
|
+ size_to_skip + req->cryptlen - areq_ctx->req_authsize,
|
|
|
+ size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
|
|
|
}
|
|
|
|
|
|
/* cacluate the size for cipher remove ICV in decrypt*/
|
|
@@ -1393,7 +1393,7 @@ int ssi_buffer_mgr_map_aead_request(
|
|
|
size_to_map += crypto_aead_ivsize(tfm);
|
|
|
rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
|
|
|
size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
|
|
|
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES+LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
|
|
|
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
|
|
|
if (unlikely(rc != 0)) {
|
|
|
rc = -ENOMEM;
|
|
|
goto aead_map_failure;
|
|
@@ -1459,9 +1459,9 @@ int ssi_buffer_mgr_map_aead_request(
|
|
|
}
|
|
|
|
|
|
ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
|
|
|
- SSI_LOG_DEBUG("assoc params mn %d\n",areq_ctx->assoc.mlli_nents);
|
|
|
- SSI_LOG_DEBUG("src params mn %d\n",areq_ctx->src.mlli_nents);
|
|
|
- SSI_LOG_DEBUG("dst params mn %d\n",areq_ctx->dst.mlli_nents);
|
|
|
+ SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx->assoc.mlli_nents);
|
|
|
+ SSI_LOG_DEBUG("src params mn %d\n", areq_ctx->src.mlli_nents);
|
|
|
+ SSI_LOG_DEBUG("dst params mn %d\n", areq_ctx->dst.mlli_nents);
|
|
|
}
|
|
|
return 0;
|
|
|
|
|
@@ -1503,7 +1503,7 @@ int ssi_buffer_mgr_map_hash_request_final(
|
|
|
|
|
|
/*TODO: copy data in case that buffer is enough for operation */
|
|
|
/* map the previous buffer */
|
|
|
- if (*curr_buff_cnt != 0 ) {
|
|
|
+ if (*curr_buff_cnt != 0) {
|
|
|
if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
|
|
|
*curr_buff_cnt, &sg_data) != 0) {
|
|
|
return -ENOMEM;
|
|
@@ -1511,7 +1511,7 @@ int ssi_buffer_mgr_map_hash_request_final(
|
|
|
}
|
|
|
|
|
|
if (src && (nbytes > 0) && do_update) {
|
|
|
- if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
|
|
|
+ if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
|
|
|
nbytes,
|
|
|
DMA_TO_DEVICE,
|
|
|
&areq_ctx->in_nents,
|
|
@@ -1519,9 +1519,9 @@ int ssi_buffer_mgr_map_hash_request_final(
|
|
|
&dummy, &mapped_nents))){
|
|
|
goto unmap_curr_buff;
|
|
|
}
|
|
|
- if ( src && (mapped_nents == 1)
|
|
|
- && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
|
|
|
- memcpy(areq_ctx->buff_sg,src,
|
|
|
+ if (src && (mapped_nents == 1)
|
|
|
+ && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
|
|
|
+ memcpy(areq_ctx->buff_sg, src,
|
|
|
sizeof(struct scatterlist));
|
|
|
areq_ctx->buff_sg->length = nbytes;
|
|
|
areq_ctx->curr_sg = areq_ctx->buff_sg;
|
|
@@ -1547,7 +1547,7 @@ int ssi_buffer_mgr_map_hash_request_final(
|
|
|
}
|
|
|
}
|
|
|
/* change the buffer index for the unmap function */
|
|
|
- areq_ctx->buff_index = (areq_ctx->buff_index^1);
|
|
|
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
|
|
|
SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
|
|
|
GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
|
|
|
return 0;
|
|
@@ -1556,7 +1556,7 @@ fail_unmap_din:
|
|
|
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
|
|
|
|
|
|
unmap_curr_buff:
|
|
|
- if (*curr_buff_cnt != 0 ) {
|
|
|
+ if (*curr_buff_cnt != 0) {
|
|
|
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
|
|
|
}
|
|
|
return -ENOMEM;
|
|
@@ -1586,7 +1586,7 @@ int ssi_buffer_mgr_map_hash_request_update(
|
|
|
|
|
|
SSI_LOG_DEBUG(" update params : curr_buff=%pK "
|
|
|
"curr_buff_cnt=0x%X nbytes=0x%X "
|
|
|
- "src=%pK curr_index=%u \n",
|
|
|
+ "src=%pK curr_index=%u\n",
|
|
|
curr_buff, *curr_buff_cnt, nbytes,
|
|
|
src, areq_ctx->buff_index);
|
|
|
/* Init the type of the dma buffer */
|
|
@@ -1623,12 +1623,12 @@ int ssi_buffer_mgr_map_hash_request_update(
|
|
|
/* Copy the new residue to next buffer */
|
|
|
if (*next_buff_cnt != 0) {
|
|
|
SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
|
|
|
- " residue %u \n", next_buff,
|
|
|
+ " residue %u\n", next_buff,
|
|
|
(update_data_len - *curr_buff_cnt),
|
|
|
*next_buff_cnt);
|
|
|
ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
|
|
|
- (update_data_len -*curr_buff_cnt),
|
|
|
- nbytes,SSI_SG_TO_BUF);
|
|
|
+ (update_data_len - *curr_buff_cnt),
|
|
|
+ nbytes, SSI_SG_TO_BUF);
|
|
|
/* change the buffer index for next operation */
|
|
|
swap_index = 1;
|
|
|
}
|
|
@@ -1642,19 +1642,19 @@ int ssi_buffer_mgr_map_hash_request_update(
|
|
|
swap_index = 1;
|
|
|
}
|
|
|
|
|
|
- if ( update_data_len > *curr_buff_cnt ) {
|
|
|
- if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
|
|
|
- (update_data_len -*curr_buff_cnt),
|
|
|
+ if (update_data_len > *curr_buff_cnt) {
|
|
|
+ if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
|
|
|
+ (update_data_len - *curr_buff_cnt),
|
|
|
DMA_TO_DEVICE,
|
|
|
&areq_ctx->in_nents,
|
|
|
LLI_MAX_NUM_OF_DATA_ENTRIES,
|
|
|
&dummy, &mapped_nents))){
|
|
|
goto unmap_curr_buff;
|
|
|
}
|
|
|
- if ( (mapped_nents == 1)
|
|
|
- && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
|
|
|
+ if ((mapped_nents == 1)
|
|
|
+ && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
|
|
|
/* only one entry in the SG and no previous data */
|
|
|
- memcpy(areq_ctx->buff_sg,src,
|
|
|
+ memcpy(areq_ctx->buff_sg, src,
|
|
|
sizeof(struct scatterlist));
|
|
|
areq_ctx->buff_sg->length = update_data_len;
|
|
|
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
|
|
@@ -1678,7 +1678,7 @@ int ssi_buffer_mgr_map_hash_request_update(
|
|
|
}
|
|
|
|
|
|
}
|
|
|
- areq_ctx->buff_index = (areq_ctx->buff_index^swap_index);
|
|
|
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
|
|
|
|
|
|
return 0;
|
|
|
|
|
@@ -1686,7 +1686,7 @@ fail_unmap_din:
|
|
|
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
|
|
|
|
|
|
unmap_curr_buff:
|
|
|
- if (*curr_buff_cnt != 0 ) {
|
|
|
+ if (*curr_buff_cnt != 0) {
|
|
|
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
|
|
|
}
|
|
|
return -ENOMEM;
|
|
@@ -1722,7 +1722,7 @@ void ssi_buffer_mgr_unmap_hash_request(
|
|
|
|
|
|
if (*prev_len != 0) {
|
|
|
SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
|
|
|
- "dma=0x%llX len 0x%X\n",
|
|
|
+ " dma=0x%llX len 0x%X\n",
|
|
|
sg_virt(areq_ctx->buff_sg),
|
|
|
(unsigned long long)sg_dma_address(areq_ctx->buff_sg),
|
|
|
sg_dma_len(areq_ctx->buff_sg));
|