|
@@ -115,6 +115,8 @@
|
|
|
#define FLAGS_BE32_SHA1 8
|
|
#define FLAGS_BE32_SHA1 8
|
|
|
#define FLAGS_SGS_COPIED 9
|
|
#define FLAGS_SGS_COPIED 9
|
|
|
#define FLAGS_SGS_ALLOCED 10
|
|
#define FLAGS_SGS_ALLOCED 10
|
|
|
|
|
+#define FLAGS_HUGE 11
|
|
|
|
|
+
|
|
|
/* context flags */
|
|
/* context flags */
|
|
|
#define FLAGS_FINUP 16
|
|
#define FLAGS_FINUP 16
|
|
|
|
|
|
|
@@ -139,6 +141,8 @@
|
|
|
#define BUFLEN SHA512_BLOCK_SIZE
|
|
#define BUFLEN SHA512_BLOCK_SIZE
|
|
|
#define OMAP_SHA_DMA_THRESHOLD 256
|
|
#define OMAP_SHA_DMA_THRESHOLD 256
|
|
|
|
|
|
|
|
|
|
+#define OMAP_SHA_MAX_DMA_LEN (1024 * 2048)
|
|
|
|
|
+
|
|
|
struct omap_sham_dev;
|
|
struct omap_sham_dev;
|
|
|
|
|
|
|
|
struct omap_sham_reqctx {
|
|
struct omap_sham_reqctx {
|
|
@@ -692,21 +696,20 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
|
|
|
|
|
|
|
|
set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
|
|
set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
|
|
|
|
|
|
|
|
|
|
+ ctx->offset += new_len - ctx->bufcnt;
|
|
|
ctx->bufcnt = 0;
|
|
ctx->bufcnt = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
|
|
static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
|
|
|
- struct scatterlist *sg, int bs, int new_len)
|
|
|
|
|
|
|
+ struct scatterlist *sg, int bs,
|
|
|
|
|
+ unsigned int new_len)
|
|
|
{
|
|
{
|
|
|
int pages;
|
|
int pages;
|
|
|
void *buf;
|
|
void *buf;
|
|
|
- int len;
|
|
|
|
|
-
|
|
|
|
|
- len = new_len + ctx->bufcnt;
|
|
|
|
|
|
|
|
|
|
- pages = get_order(ctx->total);
|
|
|
|
|
|
|
+ pages = get_order(new_len);
|
|
|
|
|
|
|
|
buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
|
|
buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
|
|
|
if (!buf) {
|
|
if (!buf) {
|
|
@@ -718,14 +721,14 @@ static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
|
|
|
memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
|
|
memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
|
|
|
|
|
|
|
|
scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
|
|
scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
|
|
|
- ctx->total - ctx->bufcnt, 0);
|
|
|
|
|
|
|
+ min(new_len, ctx->total) - ctx->bufcnt, 0);
|
|
|
sg_init_table(ctx->sgl, 1);
|
|
sg_init_table(ctx->sgl, 1);
|
|
|
- sg_set_buf(ctx->sgl, buf, len);
|
|
|
|
|
|
|
+ sg_set_buf(ctx->sgl, buf, new_len);
|
|
|
ctx->sg = ctx->sgl;
|
|
ctx->sg = ctx->sgl;
|
|
|
set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
|
|
set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
|
|
|
ctx->sg_len = 1;
|
|
ctx->sg_len = 1;
|
|
|
|
|
+ ctx->offset += new_len - ctx->bufcnt;
|
|
|
ctx->bufcnt = 0;
|
|
ctx->bufcnt = 0;
|
|
|
- ctx->offset = 0;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
}
|
|
}
|
|
@@ -744,7 +747,7 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
|
|
|
if (!sg || !sg->length || !nbytes)
|
|
if (!sg || !sg->length || !nbytes)
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
- new_len = nbytes;
|
|
|
|
|
|
|
+ new_len = nbytes - offset;
|
|
|
|
|
|
|
|
if (offset)
|
|
if (offset)
|
|
|
list_ok = false;
|
|
list_ok = false;
|
|
@@ -754,6 +757,9 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
|
|
|
else
|
|
else
|
|
|
new_len = (new_len - 1) / bs * bs;
|
|
new_len = (new_len - 1) / bs * bs;
|
|
|
|
|
|
|
|
|
|
+ if (!new_len)
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
if (nbytes != new_len)
|
|
if (nbytes != new_len)
|
|
|
list_ok = false;
|
|
list_ok = false;
|
|
|
|
|
|
|
@@ -797,10 +803,17 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ if (new_len > OMAP_SHA_MAX_DMA_LEN) {
|
|
|
|
|
+ new_len = OMAP_SHA_MAX_DMA_LEN;
|
|
|
|
|
+ aligned = false;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
if (!aligned)
|
|
if (!aligned)
|
|
|
return omap_sham_copy_sgs(rctx, sg, bs, new_len);
|
|
return omap_sham_copy_sgs(rctx, sg, bs, new_len);
|
|
|
else if (!list_ok)
|
|
else if (!list_ok)
|
|
|
return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
|
|
return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
|
|
|
|
|
+ else
|
|
|
|
|
+ rctx->offset += new_len;
|
|
|
|
|
|
|
|
rctx->sg_len = n;
|
|
rctx->sg_len = n;
|
|
|
rctx->sg = sg;
|
|
rctx->sg = sg;
|
|
@@ -824,7 +837,12 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
|
|
|
else
|
|
else
|
|
|
nbytes = 0;
|
|
nbytes = 0;
|
|
|
|
|
|
|
|
- rctx->total = nbytes + rctx->bufcnt;
|
|
|
|
|
|
|
+ rctx->total = nbytes + rctx->bufcnt - rctx->offset;
|
|
|
|
|
+
|
|
|
|
|
+ dev_dbg(rctx->dd->dev,
|
|
|
|
|
+ "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
|
|
|
|
|
+ __func__, nbytes, bs, rctx->total, rctx->offset,
|
|
|
|
|
+ rctx->bufcnt);
|
|
|
|
|
|
|
|
if (!rctx->total)
|
|
if (!rctx->total)
|
|
|
return 0;
|
|
return 0;
|
|
@@ -850,12 +868,15 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
|
|
|
|
|
|
|
|
xmit_len = rctx->total;
|
|
xmit_len = rctx->total;
|
|
|
|
|
|
|
|
|
|
+ if (xmit_len > OMAP_SHA_MAX_DMA_LEN)
|
|
|
|
|
+ xmit_len = OMAP_SHA_MAX_DMA_LEN;
|
|
|
|
|
+
|
|
|
if (!IS_ALIGNED(xmit_len, bs)) {
|
|
if (!IS_ALIGNED(xmit_len, bs)) {
|
|
|
if (final)
|
|
if (final)
|
|
|
xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
|
|
xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
|
|
|
else
|
|
else
|
|
|
xmit_len = xmit_len / bs * bs;
|
|
xmit_len = xmit_len / bs * bs;
|
|
|
- } else if (!final) {
|
|
|
|
|
|
|
+ } else if (!final && rctx->total == xmit_len) {
|
|
|
xmit_len -= bs;
|
|
xmit_len -= bs;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -883,7 +904,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
|
|
|
rctx->sg_len = 1;
|
|
rctx->sg_len = 1;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- if (hash_later) {
|
|
|
|
|
|
|
+ if (hash_later && hash_later <= rctx->buflen) {
|
|
|
int offset = 0;
|
|
int offset = 0;
|
|
|
|
|
|
|
|
if (hash_later > req->nbytes) {
|
|
if (hash_later > req->nbytes) {
|
|
@@ -904,6 +925,9 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
|
|
|
rctx->bufcnt = 0;
|
|
rctx->bufcnt = 0;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ if (hash_later > rctx->buflen)
|
|
|
|
|
+ set_bit(FLAGS_HUGE, &rctx->dd->flags);
|
|
|
|
|
+
|
|
|
if (!final)
|
|
if (!final)
|
|
|
rctx->total = xmit_len;
|
|
rctx->total = xmit_len;
|
|
|
|
|
|
|
@@ -1001,10 +1025,11 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
|
|
|
struct ahash_request *req = dd->req;
|
|
struct ahash_request *req = dd->req;
|
|
|
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
|
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
|
|
int err;
|
|
int err;
|
|
|
- bool final = ctx->flags & BIT(FLAGS_FINUP);
|
|
|
|
|
|
|
+ bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
|
|
|
|
|
+ !(dd->flags & BIT(FLAGS_HUGE));
|
|
|
|
|
|
|
|
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
|
|
|
|
|
- ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
|
|
|
|
|
|
|
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d",
|
|
|
|
|
+ ctx->total, ctx->digcnt, final);
|
|
|
|
|
|
|
|
if (ctx->total < get_block_size(ctx) ||
|
|
if (ctx->total < get_block_size(ctx) ||
|
|
|
ctx->total < dd->fallback_sz)
|
|
ctx->total < dd->fallback_sz)
|
|
@@ -1027,6 +1052,9 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
|
|
|
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
|
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
|
|
int err = 0, use_dma = 1;
|
|
int err = 0, use_dma = 1;
|
|
|
|
|
|
|
|
|
|
+ if (dd->flags & BIT(FLAGS_HUGE))
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
|
|
if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
|
|
|
/*
|
|
/*
|
|
|
* faster to handle last block with cpu or
|
|
* faster to handle last block with cpu or
|
|
@@ -1087,7 +1115,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
|
|
|
|
|
|
|
|
if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
|
|
if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
|
|
|
free_pages((unsigned long)sg_virt(ctx->sg),
|
|
free_pages((unsigned long)sg_virt(ctx->sg),
|
|
|
- get_order(ctx->sg->length + ctx->bufcnt));
|
|
|
|
|
|
|
+ get_order(ctx->sg->length));
|
|
|
|
|
|
|
|
if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
|
|
if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
|
|
|
kfree(ctx->sg);
|
|
kfree(ctx->sg);
|
|
@@ -1096,6 +1124,21 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
|
|
|
|
|
|
|
|
dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
|
|
dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
|
|
|
|
|
|
|
|
|
|
+ if (dd->flags & BIT(FLAGS_HUGE)) {
|
|
|
|
|
+ dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
|
|
|
|
|
+ BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
|
|
|
|
|
+ omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
|
|
|
|
|
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
|
|
|
|
|
+ err = omap_sham_update_req(dd);
|
|
|
|
|
+ if (err != -EINPROGRESS &&
|
|
|
|
|
+ (ctx->flags & BIT(FLAGS_FINUP)))
|
|
|
|
|
+ err = omap_sham_final_req(dd);
|
|
|
|
|
+ } else if (ctx->op == OP_FINAL) {
|
|
|
|
|
+ omap_sham_final_req(dd);
|
|
|
|
|
+ }
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
if (!err) {
|
|
if (!err) {
|
|
|
dd->pdata->copy_hash(req, 1);
|
|
dd->pdata->copy_hash(req, 1);
|
|
|
if (test_bit(FLAGS_FINAL, &dd->flags))
|
|
if (test_bit(FLAGS_FINAL, &dd->flags))
|
|
@@ -1111,6 +1154,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
|
|
|
pm_runtime_mark_last_busy(dd->dev);
|
|
pm_runtime_mark_last_busy(dd->dev);
|
|
|
pm_runtime_put_autosuspend(dd->dev);
|
|
pm_runtime_put_autosuspend(dd->dev);
|
|
|
|
|
|
|
|
|
|
+ ctx->offset = 0;
|
|
|
|
|
+
|
|
|
if (req->base.complete)
|
|
if (req->base.complete)
|
|
|
req->base.complete(&req->base, err);
|
|
req->base.complete(&req->base, err);
|
|
|
}
|
|
}
|
|
@@ -1162,7 +1207,7 @@ retry:
|
|
|
/* request has changed - restore hash */
|
|
/* request has changed - restore hash */
|
|
|
dd->pdata->copy_hash(req, 0);
|
|
dd->pdata->copy_hash(req, 0);
|
|
|
|
|
|
|
|
- if (ctx->op == OP_UPDATE) {
|
|
|
|
|
|
|
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
|
|
|
err = omap_sham_update_req(dd);
|
|
err = omap_sham_update_req(dd);
|
|
|
if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
|
|
if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
|
|
|
/* no final() after finup() */
|
|
/* no final() after finup() */
|
|
@@ -1735,6 +1780,8 @@ static void omap_sham_done_task(unsigned long data)
|
|
|
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
|
|
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
|
|
|
int err = 0;
|
|
int err = 0;
|
|
|
|
|
|
|
|
|
|
+ dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
|
|
|
|
|
+
|
|
|
if (!test_bit(FLAGS_BUSY, &dd->flags)) {
|
|
if (!test_bit(FLAGS_BUSY, &dd->flags)) {
|
|
|
omap_sham_handle_queue(dd, NULL);
|
|
omap_sham_handle_queue(dd, NULL);
|
|
|
return;
|
|
return;
|