|
@@ -211,7 +211,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
|
|
|
}
|
|
|
memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
|
|
|
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
|
|
|
- 256 * PPC4XX_NUM_PD,
|
|
|
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
|
|
|
&dev->shadow_sa_pool_pa,
|
|
|
GFP_ATOMIC);
|
|
|
if (!dev->shadow_sa_pool)
|
|
@@ -223,16 +223,14 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
|
|
|
if (!dev->shadow_sr_pool)
|
|
|
return -ENOMEM;
|
|
|
for (i = 0; i < PPC4XX_NUM_PD; i++) {
|
|
|
- pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
|
|
|
- sizeof(struct pd_uinfo) * i);
|
|
|
+ pd_uinfo = &dev->pdr_uinfo[i];
|
|
|
|
|
|
/* alloc 256 bytes which is enough for any kind of dynamic sa */
|
|
|
- pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
|
|
|
+ pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
|
|
|
pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
|
|
|
|
|
|
/* alloc state record */
|
|
|
- pd_uinfo->sr_va = dev->shadow_sr_pool +
|
|
|
- sizeof(struct sa_state_record) * i;
|
|
|
+ pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
|
|
|
pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
|
|
|
sizeof(struct sa_state_record) * i;
|
|
|
}
|
|
@@ -248,8 +246,9 @@ static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
|
|
|
dev->pdr, dev->pdr_pa);
|
|
|
|
|
|
if (dev->shadow_sa_pool)
|
|
|
- dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
|
|
|
- dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
|
|
|
+ dma_free_coherent(dev->core_dev->device,
|
|
|
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
|
|
|
+ dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
|
|
|
|
|
|
if (dev->shadow_sr_pool)
|
|
|
dma_free_coherent(dev->core_dev->device,
|
|
@@ -277,11 +276,9 @@ static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
|
|
|
|
|
|
static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
|
|
|
{
|
|
|
- struct pd_uinfo *pd_uinfo;
|
|
|
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
|
|
|
unsigned long flags;
|
|
|
|
|
|
- pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
|
|
|
- sizeof(struct pd_uinfo) * idx);
|
|
|
spin_lock_irqsave(&dev->core_dev->lock, flags);
|
|
|
if (dev->pdr_tail != PPC4XX_LAST_PD)
|
|
|
dev->pdr_tail++;
|
|
@@ -298,7 +295,7 @@ static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
|
|
|
{
|
|
|
*pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
|
|
|
|
|
|
- return dev->pdr + sizeof(struct ce_pd) * idx;
|
|
|
+ return &dev->pdr[idx];
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -376,7 +373,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
|
|
|
{
|
|
|
*gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
|
|
|
|
|
|
- return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
|
|
|
+ return &dev->gdr[idx];
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -387,7 +384,6 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
|
|
|
static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
|
|
|
{
|
|
|
int i;
|
|
|
- struct ce_sd *sd_array;
|
|
|
|
|
|
/* alloc memory for scatter descriptor ring */
|
|
|
dev->sdr = dma_alloc_coherent(dev->core_dev->device,
|
|
@@ -407,10 +403,8 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- sd_array = dev->sdr;
|
|
|
-
|
|
|
for (i = 0; i < PPC4XX_NUM_SD; i++) {
|
|
|
- sd_array[i].ptr = dev->scatter_buffer_pa +
|
|
|
+ dev->sdr[i].ptr = dev->scatter_buffer_pa +
|
|
|
PPC4XX_SD_BUFFER_SIZE * i;
|
|
|
}
|
|
|
|
|
@@ -480,7 +474,7 @@ static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
|
|
|
{
|
|
|
*sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
|
|
|
|
|
|
- return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
|
|
|
+ return &dev->sdr[idx];
|
|
|
}
|
|
|
|
|
|
static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
|
|
@@ -529,11 +523,10 @@ static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
|
|
|
struct crypto4xx_ctx *ctx)
|
|
|
{
|
|
|
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
|
|
|
- struct sa_state_record *state_record =
|
|
|
- (struct sa_state_record *) pd_uinfo->sr_va;
|
|
|
|
|
|
if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
|
|
|
- memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
|
|
|
+ memcpy((void *) pd_uinfo->dest_va,
|
|
|
+ pd_uinfo->sr_va->save_digest,
|
|
|
SA_HASH_ALG_SHA1_DIGEST_SIZE);
|
|
|
}
|
|
|
|
|
@@ -607,11 +600,9 @@ static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
|
|
|
|
|
|
static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
|
|
|
{
|
|
|
- struct ce_pd *pd;
|
|
|
- struct pd_uinfo *pd_uinfo;
|
|
|
+ struct ce_pd *pd = &dev->pdr[idx];
|
|
|
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
|
|
|
|
|
|
- pd = dev->pdr + sizeof(struct ce_pd)*idx;
|
|
|
- pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
|
|
|
if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
|
|
|
CRYPTO_ALG_TYPE_ABLKCIPHER)
|
|
|
return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
|
|
@@ -712,7 +703,6 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
|
|
|
unsigned long flags;
|
|
|
struct pd_uinfo *pd_uinfo = NULL;
|
|
|
unsigned int nbytes = datalen, idx;
|
|
|
- unsigned int ivlen = 0;
|
|
|
u32 gd_idx = 0;
|
|
|
|
|
|
/* figure how many gd is needed */
|
|
@@ -771,17 +761,15 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
|
|
|
}
|
|
|
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
|
|
|
|
|
|
- pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
|
|
|
- sizeof(struct pd_uinfo) * pd_entry);
|
|
|
+ pd_uinfo = &dev->pdr_uinfo[pd_entry];
|
|
|
pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
|
|
|
pd_uinfo->async_req = req;
|
|
|
pd_uinfo->num_gd = num_gd;
|
|
|
pd_uinfo->num_sd = num_sd;
|
|
|
|
|
|
if (iv_len || ctx->is_hash) {
|
|
|
- ivlen = iv_len;
|
|
|
pd->sa = pd_uinfo->sa_pa;
|
|
|
- sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
|
|
|
+ sa = pd_uinfo->sa_va;
|
|
|
if (ctx->direction == DIR_INBOUND)
|
|
|
memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
|
|
|
else
|
|
@@ -791,14 +779,15 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
|
|
|
&pd_uinfo->sr_pa, 4);
|
|
|
|
|
|
if (iv_len)
|
|
|
- crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
|
|
|
+ crypto4xx_memcpy_le(pd_uinfo->sr_va->save_iv,
|
|
|
+ iv, iv_len);
|
|
|
} else {
|
|
|
if (ctx->direction == DIR_INBOUND) {
|
|
|
pd->sa = ctx->sa_in_dma_addr;
|
|
|
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
|
|
|
+ sa = ctx->sa_in;
|
|
|
} else {
|
|
|
pd->sa = ctx->sa_out_dma_addr;
|
|
|
- sa = (struct dynamic_sa_ctl *) ctx->sa_out;
|
|
|
+ sa = ctx->sa_out;
|
|
|
}
|
|
|
}
|
|
|
pd->sa_len = ctx->sa_len;
|
|
@@ -1006,9 +995,8 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
|
|
|
|
|
|
while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
|
|
|
tail = core_dev->dev->pdr_tail;
|
|
|
- pd_uinfo = core_dev->dev->pdr_uinfo +
|
|
|
- sizeof(struct pd_uinfo)*tail;
|
|
|
- pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
|
|
|
+ pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
|
|
|
+ pd = &core_dev->dev->pdr[tail];
|
|
|
if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
|
|
|
pd->pd_ctl.bf.pe_done &&
|
|
|
!pd->pd_ctl.bf.host_ready) {
|