|
@@ -39,6 +39,7 @@
|
|
|
#include <crypto/ctr.h>
|
|
|
#include <crypto/sha.h>
|
|
|
#include <crypto/scatterwalk.h>
|
|
|
+#include <crypto/internal/skcipher.h>
|
|
|
#include "crypto4xx_reg_def.h"
|
|
|
#include "crypto4xx_core.h"
|
|
|
#include "crypto4xx_sa.h"
|
|
@@ -573,8 +574,10 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
|
|
|
dst->offset, dst->length, DMA_FROM_DEVICE);
|
|
|
}
|
|
|
crypto4xx_ret_sg_desc(dev, pd_uinfo);
|
|
|
- if (ablk_req->base.complete != NULL)
|
|
|
- ablk_req->base.complete(&ablk_req->base, 0);
|
|
|
+
|
|
|
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
|
|
|
+ ablkcipher_request_complete(ablk_req, -EINPROGRESS);
|
|
|
+ ablkcipher_request_complete(ablk_req, 0);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -591,9 +594,10 @@ static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
|
|
|
crypto4xx_copy_digest_to_dst(pd_uinfo,
|
|
|
crypto_tfm_ctx(ahash_req->base.tfm));
|
|
|
crypto4xx_ret_sg_desc(dev, pd_uinfo);
|
|
|
- /* call user provided callback function x */
|
|
|
- if (ahash_req->base.complete != NULL)
|
|
|
- ahash_req->base.complete(&ahash_req->base, 0);
|
|
|
+
|
|
|
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
|
|
|
+ ahash_request_complete(ahash_req, -EINPROGRESS);
|
|
|
+ ahash_request_complete(ahash_req, 0);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -704,6 +708,7 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
|
|
|
struct pd_uinfo *pd_uinfo = NULL;
|
|
|
unsigned int nbytes = datalen, idx;
|
|
|
u32 gd_idx = 0;
|
|
|
+ bool is_busy;
|
|
|
|
|
|
/* figure how many gd is needed */
|
|
|
num_gd = sg_nents_for_len(src, datalen);
|
|
@@ -734,6 +739,31 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
|
|
|
* already got must be return the original place.
|
|
|
*/
|
|
|
spin_lock_irqsave(&dev->core_dev->lock, flags);
|
|
|
+ /*
|
|
|
+ * Let the caller know to slow down, once more than 13/16ths = 81%
|
|
|
+ * of the available data contexts are being used simultaneously.
|
|
|
+ *
|
|
|
+ * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
|
|
|
+ * 31 more contexts. Before new requests have to be rejected.
|
|
|
+ */
|
|
|
+ if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
|
|
|
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
|
|
|
+ ((PPC4XX_NUM_PD * 13) / 16);
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * To fix contention issues between ipsec (no blacklog) and
|
|
|
+ * dm-crypto (backlog) reserve 32 entries for "no backlog"
|
|
|
+ * data contexts.
|
|
|
+ */
|
|
|
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
|
|
|
+ ((PPC4XX_NUM_PD * 15) / 16);
|
|
|
+
|
|
|
+ if (is_busy) {
|
|
|
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
|
|
|
+ return -EBUSY;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
if (num_gd) {
|
|
|
fst_gd = crypto4xx_get_n_gd(dev, num_gd);
|
|
|
if (fst_gd == ERING_WAS_FULL) {
|
|
@@ -888,11 +918,12 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
|
|
|
sa->sa_command_1.bf.hash_crypto_offset = 0;
|
|
|
pd->pd_ctl.w = ctx->pd_ctl;
|
|
|
pd->pd_ctl_len.w = 0x00400000 | datalen;
|
|
|
- pd_uinfo->state = PD_ENTRY_INUSE;
|
|
|
+ pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
|
|
|
+
|
|
|
wmb();
|
|
|
/* write any value to push engine to read a pd */
|
|
|
writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
|
|
|
- return -EINPROGRESS;
|
|
|
+ return is_busy ? -EBUSY : -EINPROGRESS;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -997,7 +1028,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
|
|
|
tail = core_dev->dev->pdr_tail;
|
|
|
pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
|
|
|
pd = &core_dev->dev->pdr[tail];
|
|
|
- if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
|
|
|
+ if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
|
|
|
pd->pd_ctl.bf.pe_done &&
|
|
|
!pd->pd_ctl.bf.host_ready) {
|
|
|
pd->pd_ctl.bf.pe_done = 0;
|