|
@@ -40,14 +40,33 @@ MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if over
|
|
|
|
|
|
struct mv_cesa_dev *cesa_dev;
|
|
|
|
|
|
-static void mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine)
|
|
|
+struct crypto_async_request *
|
|
|
+mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
|
|
|
+ struct crypto_async_request **backlog)
|
|
|
{
|
|
|
- struct crypto_async_request *req, *backlog;
|
|
|
- struct mv_cesa_ctx *ctx;
|
|
|
+ struct crypto_async_request *req;
|
|
|
|
|
|
- backlog = crypto_get_backlog(&engine->queue);
|
|
|
+ *backlog = crypto_get_backlog(&engine->queue);
|
|
|
req = crypto_dequeue_request(&engine->queue);
|
|
|
- engine->req = req;
|
|
|
+
|
|
|
+ if (!req)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return req;
|
|
|
+}
|
|
|
+
|
|
|
+static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
|
|
|
+{
|
|
|
+ struct crypto_async_request *req = NULL, *backlog = NULL;
|
|
|
+ struct mv_cesa_ctx *ctx;
|
|
|
+
|
|
|
+
|
|
|
+ spin_lock_bh(&engine->lock);
|
|
|
+ if (!engine->req) {
|
|
|
+ req = mv_cesa_dequeue_req_locked(engine, &backlog);
|
|
|
+ engine->req = req;
|
|
|
+ }
|
|
|
+ spin_unlock_bh(&engine->lock);
|
|
|
|
|
|
if (!req)
|
|
|
return;
|
|
@@ -57,6 +76,46 @@ static void mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine)
|
|
|
|
|
|
ctx = crypto_tfm_ctx(req->tfm);
|
|
|
ctx->ops->step(req);
|
|
|
+
|
|
|
+ return;
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
|
|
|
+{
|
|
|
+ struct crypto_async_request *req;
|
|
|
+ struct mv_cesa_ctx *ctx;
|
|
|
+ int res;
|
|
|
+
|
|
|
+ req = engine->req;
|
|
|
+ ctx = crypto_tfm_ctx(req->tfm);
|
|
|
+ res = ctx->ops->process(req, status);
|
|
|
+
|
|
|
+ if (res == 0) {
|
|
|
+ ctx->ops->complete(req);
|
|
|
+ mv_cesa_engine_enqueue_complete_request(engine, req);
|
|
|
+ } else if (res == -EINPROGRESS) {
|
|
|
+ ctx->ops->step(req);
|
|
|
+ }
|
|
|
+
|
|
|
+ return res;
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
|
|
|
+{
|
|
|
+ if (engine->chain.first && engine->chain.last)
|
|
|
+ return mv_cesa_tdma_process(engine, status);
|
|
|
+
|
|
|
+ return mv_cesa_std_process(engine, status);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
|
|
|
+ int res)
|
|
|
+{
|
|
|
+ ctx->ops->cleanup(req);
|
|
|
+ local_bh_disable();
|
|
|
+ req->complete(req, res);
|
|
|
+ local_bh_enable();
|
|
|
}
|
|
|
|
|
|
static irqreturn_t mv_cesa_int(int irq, void *priv)
|
|
@@ -83,26 +142,31 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
|
|
|
writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
|
|
|
writel(~status, engine->regs + CESA_SA_INT_STATUS);
|
|
|
|
|
|
+ /* Process fetched requests */
|
|
|
+ res = mv_cesa_int_process(engine, status & mask);
|
|
|
ret = IRQ_HANDLED;
|
|
|
+
|
|
|
spin_lock_bh(&engine->lock);
|
|
|
req = engine->req;
|
|
|
+ if (res != -EINPROGRESS)
|
|
|
+ engine->req = NULL;
|
|
|
spin_unlock_bh(&engine->lock);
|
|
|
- if (req) {
|
|
|
- ctx = crypto_tfm_ctx(req->tfm);
|
|
|
- res = ctx->ops->process(req, status & mask);
|
|
|
- if (res != -EINPROGRESS) {
|
|
|
- spin_lock_bh(&engine->lock);
|
|
|
- engine->req = NULL;
|
|
|
- mv_cesa_dequeue_req_locked(engine);
|
|
|
- spin_unlock_bh(&engine->lock);
|
|
|
- ctx->ops->complete(req);
|
|
|
- ctx->ops->cleanup(req);
|
|
|
- local_bh_disable();
|
|
|
- req->complete(req, res);
|
|
|
- local_bh_enable();
|
|
|
- } else {
|
|
|
- ctx->ops->step(req);
|
|
|
- }
|
|
|
+
|
|
|
+ ctx = crypto_tfm_ctx(req->tfm);
|
|
|
+
|
|
|
+ if (res && res != -EINPROGRESS)
|
|
|
+ mv_cesa_complete_req(ctx, req, res);
|
|
|
+
|
|
|
+ /* Launch the next pending request */
|
|
|
+ mv_cesa_rearm_engine(engine);
|
|
|
+
|
|
|
+ /* Iterate over the complete queue */
|
|
|
+ while (true) {
|
|
|
+ req = mv_cesa_engine_dequeue_complete_request(engine);
|
|
|
+ if (!req)
|
|
|
+ break;
|
|
|
+
|
|
|
+ mv_cesa_complete_req(ctx, req, 0);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -116,16 +180,16 @@ int mv_cesa_queue_req(struct crypto_async_request *req,
|
|
|
struct mv_cesa_engine *engine = creq->engine;
|
|
|
|
|
|
spin_lock_bh(&engine->lock);
|
|
|
+ if (mv_cesa_req_get_type(creq) == CESA_DMA_REQ)
|
|
|
+ mv_cesa_tdma_chain(engine, creq);
|
|
|
+
|
|
|
ret = crypto_enqueue_request(&engine->queue, req);
|
|
|
spin_unlock_bh(&engine->lock);
|
|
|
|
|
|
if (ret != -EINPROGRESS)
|
|
|
return ret;
|
|
|
|
|
|
- spin_lock_bh(&engine->lock);
|
|
|
- if (!engine->req)
|
|
|
- mv_cesa_dequeue_req_locked(engine);
|
|
|
- spin_unlock_bh(&engine->lock);
|
|
|
+ mv_cesa_rearm_engine(engine);
|
|
|
|
|
|
return -EINPROGRESS;
|
|
|
}
|
|
@@ -496,6 +560,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
|
|
|
|
|
|
crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
|
|
|
atomic_set(&engine->load, 0);
|
|
|
+ INIT_LIST_HEAD(&engine->complete_queue);
|
|
|
}
|
|
|
|
|
|
cesa_dev = cesa;
|