123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854 |
- /*
- * AMD Cryptographic Coprocessor (CCP) driver
- *
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
- *
- * Author: Tom Lendacky <thomas.lendacky@amd.com>
- * Author: Gary R Hook <gary.hook@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
- #include <linux/module.h>
- #include <linux/kernel.h>
- #include <linux/pci.h>
- #include <linux/interrupt.h>
- #include <crypto/scatterwalk.h>
- #include <linux/ccp.h>
- #include "ccp-dev.h"
- /* SHA initial context values */
- static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
- cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
- cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
- cpu_to_be32(SHA1_H4),
- };
- static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
- cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
- cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
- cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
- cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
- };
- static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
- cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
- cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
- cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
- cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
- };
- #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
- ccp_gen_jobid(ccp) : 0)
- static u32 ccp_gen_jobid(struct ccp_device *ccp)
- {
- return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
- }
- static void ccp_sg_free(struct ccp_sg_workarea *wa)
- {
- if (wa->dma_count)
- dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
- wa->dma_count = 0;
- }
- static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
- struct scatterlist *sg, u64 len,
- enum dma_data_direction dma_dir)
- {
- memset(wa, 0, sizeof(*wa));
- wa->sg = sg;
- if (!sg)
- return 0;
- wa->nents = sg_nents_for_len(sg, len);
- if (wa->nents < 0)
- return wa->nents;
- wa->bytes_left = len;
- wa->sg_used = 0;
- if (len == 0)
- return 0;
- if (dma_dir == DMA_NONE)
- return 0;
- wa->dma_sg = sg;
- wa->dma_dev = dev;
- wa->dma_dir = dma_dir;
- wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
- if (!wa->dma_count)
- return -ENOMEM;
- return 0;
- }
- static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
- {
- unsigned int nbytes = min_t(u64, len, wa->bytes_left);
- if (!wa->sg)
- return;
- wa->sg_used += nbytes;
- wa->bytes_left -= nbytes;
- if (wa->sg_used == wa->sg->length) {
- wa->sg = sg_next(wa->sg);
- wa->sg_used = 0;
- }
- }
- static void ccp_dm_free(struct ccp_dm_workarea *wa)
- {
- if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
- if (wa->address)
- dma_pool_free(wa->dma_pool, wa->address,
- wa->dma.address);
- } else {
- if (wa->dma.address)
- dma_unmap_single(wa->dev, wa->dma.address, wa->length,
- wa->dma.dir);
- kfree(wa->address);
- }
- wa->address = NULL;
- wa->dma.address = 0;
- }
- static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
- struct ccp_cmd_queue *cmd_q,
- unsigned int len,
- enum dma_data_direction dir)
- {
- memset(wa, 0, sizeof(*wa));
- if (!len)
- return 0;
- wa->dev = cmd_q->ccp->dev;
- wa->length = len;
- if (len <= CCP_DMAPOOL_MAX_SIZE) {
- wa->dma_pool = cmd_q->dma_pool;
- wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
- &wa->dma.address);
- if (!wa->address)
- return -ENOMEM;
- wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
- memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
- } else {
- wa->address = kzalloc(len, GFP_KERNEL);
- if (!wa->address)
- return -ENOMEM;
- wa->dma.address = dma_map_single(wa->dev, wa->address, len,
- dir);
- if (!wa->dma.address)
- return -ENOMEM;
- wa->dma.length = len;
- }
- wa->dma.dir = dir;
- return 0;
- }
- static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
- struct scatterlist *sg, unsigned int sg_offset,
- unsigned int len)
- {
- WARN_ON(!wa->address);
- scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
- 0);
- }
- static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
- struct scatterlist *sg, unsigned int sg_offset,
- unsigned int len)
- {
- WARN_ON(!wa->address);
- scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
- 1);
- }
- static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
- unsigned int wa_offset,
- struct scatterlist *sg,
- unsigned int sg_offset,
- unsigned int len)
- {
- u8 *p, *q;
- ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
- p = wa->address + wa_offset;
- q = p + len - 1;
- while (p < q) {
- *p = *p ^ *q;
- *q = *p ^ *q;
- *p = *p ^ *q;
- p++;
- q--;
- }
- return 0;
- }
- static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
- unsigned int wa_offset,
- struct scatterlist *sg,
- unsigned int sg_offset,
- unsigned int len)
- {
- u8 *p, *q;
- p = wa->address + wa_offset;
- q = p + len - 1;
- while (p < q) {
- *p = *p ^ *q;
- *q = *p ^ *q;
- *p = *p ^ *q;
- p++;
- q--;
- }
- ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
- }
- static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
- {
- ccp_dm_free(&data->dm_wa);
- ccp_sg_free(&data->sg_wa);
- }
- static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
- struct scatterlist *sg, u64 sg_len,
- unsigned int dm_len,
- enum dma_data_direction dir)
- {
- int ret;
- memset(data, 0, sizeof(*data));
- ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
- dir);
- if (ret)
- goto e_err;
- ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
- if (ret)
- goto e_err;
- return 0;
- e_err:
- ccp_free_data(data, cmd_q);
- return ret;
- }
- static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
- {
- struct ccp_sg_workarea *sg_wa = &data->sg_wa;
- struct ccp_dm_workarea *dm_wa = &data->dm_wa;
- unsigned int buf_count, nbytes;
- /* Clear the buffer if setting it */
- if (!from)
- memset(dm_wa->address, 0, dm_wa->length);
- if (!sg_wa->sg)
- return 0;
- /* Perform the copy operation
- * nbytes will always be <= UINT_MAX because dm_wa->length is
- * an unsigned int
- */
- nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
- scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
- nbytes, from);
- /* Update the structures and generate the count */
- buf_count = 0;
- while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
- nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
- dm_wa->length - buf_count);
- nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
- buf_count += nbytes;
- ccp_update_sg_workarea(sg_wa, nbytes);
- }
- return buf_count;
- }
- static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
- {
- return ccp_queue_buf(data, 0);
- }
- static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
- {
- return ccp_queue_buf(data, 1);
- }
- static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
- struct ccp_op *op, unsigned int block_size,
- bool blocksize_op)
- {
- unsigned int sg_src_len, sg_dst_len, op_len;
- /* The CCP can only DMA from/to one address each per operation. This
- * requires that we find the smallest DMA area between the source
- * and destination. The resulting len values will always be <= UINT_MAX
- * because the dma length is an unsigned int.
- */
- sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
- sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
- if (dst) {
- sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
- sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
- op_len = min(sg_src_len, sg_dst_len);
- } else {
- op_len = sg_src_len;
- }
- /* The data operation length will be at least block_size in length
- * or the smaller of available sg room remaining for the source or
- * the destination
- */
- op_len = max(op_len, block_size);
- /* Unless we have to buffer data, there's no reason to wait */
- op->soc = 0;
- if (sg_src_len < block_size) {
- /* Not enough data in the sg element, so it
- * needs to be buffered into a blocksize chunk
- */
- int cp_len = ccp_fill_queue_buf(src);
- op->soc = 1;
- op->src.u.dma.address = src->dm_wa.dma.address;
- op->src.u.dma.offset = 0;
- op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
- } else {
- /* Enough data in the sg element, but we need to
- * adjust for any previously copied data
- */
- op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
- op->src.u.dma.offset = src->sg_wa.sg_used;
- op->src.u.dma.length = op_len & ~(block_size - 1);
- ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
- }
- if (dst) {
- if (sg_dst_len < block_size) {
- /* Not enough room in the sg element or we're on the
- * last piece of data (when using padding), so the
- * output needs to be buffered into a blocksize chunk
- */
- op->soc = 1;
- op->dst.u.dma.address = dst->dm_wa.dma.address;
- op->dst.u.dma.offset = 0;
- op->dst.u.dma.length = op->src.u.dma.length;
- } else {
- /* Enough room in the sg element, but we need to
- * adjust for any previously used area
- */
- op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
- op->dst.u.dma.offset = dst->sg_wa.sg_used;
- op->dst.u.dma.length = op->src.u.dma.length;
- }
- }
- }
- static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
- struct ccp_op *op)
- {
- op->init = 0;
- if (dst) {
- if (op->dst.u.dma.address == dst->dm_wa.dma.address)
- ccp_empty_queue_buf(dst);
- else
- ccp_update_sg_workarea(&dst->sg_wa,
- op->dst.u.dma.length);
- }
- }
- static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
- struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
- u32 byte_swap, bool from)
- {
- struct ccp_op op;
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.eom = 1;
- if (from) {
- op.soc = 1;
- op.src.type = CCP_MEMTYPE_SB;
- op.src.u.sb = sb;
- op.dst.type = CCP_MEMTYPE_SYSTEM;
- op.dst.u.dma.address = wa->dma.address;
- op.dst.u.dma.length = wa->length;
- } else {
- op.src.type = CCP_MEMTYPE_SYSTEM;
- op.src.u.dma.address = wa->dma.address;
- op.src.u.dma.length = wa->length;
- op.dst.type = CCP_MEMTYPE_SB;
- op.dst.u.sb = sb;
- }
- op.u.passthru.byte_swap = byte_swap;
- return cmd_q->ccp->vdata->perform->passthru(&op);
- }
- static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
- struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
- u32 byte_swap)
- {
- return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
- }
- static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
- struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
- u32 byte_swap)
- {
- return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
- }
- static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
- struct ccp_cmd *cmd)
- {
- struct ccp_aes_engine *aes = &cmd->u.aes;
- struct ccp_dm_workarea key, ctx;
- struct ccp_data src;
- struct ccp_op op;
- unsigned int dm_offset;
- int ret;
- if (!((aes->key_len == AES_KEYSIZE_128) ||
- (aes->key_len == AES_KEYSIZE_192) ||
- (aes->key_len == AES_KEYSIZE_256)))
- return -EINVAL;
- if (aes->src_len & (AES_BLOCK_SIZE - 1))
- return -EINVAL;
- if (aes->iv_len != AES_BLOCK_SIZE)
- return -EINVAL;
- if (!aes->key || !aes->iv || !aes->src)
- return -EINVAL;
- if (aes->cmac_final) {
- if (aes->cmac_key_len != AES_BLOCK_SIZE)
- return -EINVAL;
- if (!aes->cmac_key)
- return -EINVAL;
- }
- BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
- BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
- ret = -EIO;
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
- op.sb_key = cmd_q->sb_key;
- op.sb_ctx = cmd_q->sb_ctx;
- op.init = 1;
- op.u.aes.type = aes->type;
- op.u.aes.mode = aes->mode;
- op.u.aes.action = aes->action;
- /* All supported key sizes fit in a single (32-byte) SB entry
- * and must be in little endian format. Use the 256-bit byte
- * swap passthru option to convert from big endian to little
- * endian.
- */
- ret = ccp_init_dm_workarea(&key, cmd_q,
- CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
- DMA_TO_DEVICE);
- if (ret)
- return ret;
- dm_offset = CCP_SB_BYTES - aes->key_len;
- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
- ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_key;
- }
- /* The AES context fits in a single (32-byte) SB entry and
- * must be in little endian format. Use the 256-bit byte swap
- * passthru option to convert from big endian to little endian.
- */
- ret = ccp_init_dm_workarea(&ctx, cmd_q,
- CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
- DMA_BIDIRECTIONAL);
- if (ret)
- goto e_key;
- dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_ctx;
- }
- /* Send data to the CCP AES engine */
- ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
- AES_BLOCK_SIZE, DMA_TO_DEVICE);
- if (ret)
- goto e_ctx;
- while (src.sg_wa.bytes_left) {
- ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
- if (aes->cmac_final && !src.sg_wa.bytes_left) {
- op.eom = 1;
- /* Push the K1/K2 key to the CCP now */
- ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
- op.sb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_src;
- }
- ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
- aes->cmac_key_len);
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_src;
- }
- }
- ret = cmd_q->ccp->vdata->perform->aes(&op);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_src;
- }
- ccp_process_data(&src, NULL, &op);
- }
- /* Retrieve the AES context - convert from LE to BE using
- * 32-byte (256-bit) byteswapping
- */
- ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_src;
- }
- /* ...but we only need AES_BLOCK_SIZE bytes */
- dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
- ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
- e_src:
- ccp_free_data(&src, cmd_q);
- e_ctx:
- ccp_dm_free(&ctx);
- e_key:
- ccp_dm_free(&key);
- return ret;
- }
- static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
- {
- struct ccp_aes_engine *aes = &cmd->u.aes;
- struct ccp_dm_workarea key, ctx;
- struct ccp_data src, dst;
- struct ccp_op op;
- unsigned int dm_offset;
- bool in_place = false;
- int ret;
- if (aes->mode == CCP_AES_MODE_CMAC)
- return ccp_run_aes_cmac_cmd(cmd_q, cmd);
- if (!((aes->key_len == AES_KEYSIZE_128) ||
- (aes->key_len == AES_KEYSIZE_192) ||
- (aes->key_len == AES_KEYSIZE_256)))
- return -EINVAL;
- if (((aes->mode == CCP_AES_MODE_ECB) ||
- (aes->mode == CCP_AES_MODE_CBC) ||
- (aes->mode == CCP_AES_MODE_CFB)) &&
- (aes->src_len & (AES_BLOCK_SIZE - 1)))
- return -EINVAL;
- if (!aes->key || !aes->src || !aes->dst)
- return -EINVAL;
- if (aes->mode != CCP_AES_MODE_ECB) {
- if (aes->iv_len != AES_BLOCK_SIZE)
- return -EINVAL;
- if (!aes->iv)
- return -EINVAL;
- }
- BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
- BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
- ret = -EIO;
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
- op.sb_key = cmd_q->sb_key;
- op.sb_ctx = cmd_q->sb_ctx;
- op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
- op.u.aes.type = aes->type;
- op.u.aes.mode = aes->mode;
- op.u.aes.action = aes->action;
- /* All supported key sizes fit in a single (32-byte) SB entry
- * and must be in little endian format. Use the 256-bit byte
- * swap passthru option to convert from big endian to little
- * endian.
- */
- ret = ccp_init_dm_workarea(&key, cmd_q,
- CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
- DMA_TO_DEVICE);
- if (ret)
- return ret;
- dm_offset = CCP_SB_BYTES - aes->key_len;
- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
- ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_key;
- }
- /* The AES context fits in a single (32-byte) SB entry and
- * must be in little endian format. Use the 256-bit byte swap
- * passthru option to convert from big endian to little endian.
- */
- ret = ccp_init_dm_workarea(&ctx, cmd_q,
- CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
- DMA_BIDIRECTIONAL);
- if (ret)
- goto e_key;
- if (aes->mode != CCP_AES_MODE_ECB) {
- /* Load the AES context - convert to LE */
- dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_ctx;
- }
- }
- switch (aes->mode) {
- case CCP_AES_MODE_CFB: /* CFB128 only */
- case CCP_AES_MODE_CTR:
- op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
- break;
- default:
- op.u.aes.size = 0;
- }
- /* Prepare the input and output data workareas. For in-place
- * operations we need to set the dma direction to BIDIRECTIONAL
- * and copy the src workarea to the dst workarea.
- */
- if (sg_virt(aes->src) == sg_virt(aes->dst))
- in_place = true;
- ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
- AES_BLOCK_SIZE,
- in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
- if (ret)
- goto e_ctx;
- if (in_place) {
- dst = src;
- } else {
- ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
- AES_BLOCK_SIZE, DMA_FROM_DEVICE);
- if (ret)
- goto e_src;
- }
- /* Send data to the CCP AES engine */
- while (src.sg_wa.bytes_left) {
- ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
- if (!src.sg_wa.bytes_left) {
- op.eom = 1;
- /* Since we don't retrieve the AES context in ECB
- * mode we have to wait for the operation to complete
- * on the last piece of data
- */
- if (aes->mode == CCP_AES_MODE_ECB)
- op.soc = 1;
- }
- ret = cmd_q->ccp->vdata->perform->aes(&op);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_dst;
- }
- ccp_process_data(&src, &dst, &op);
- }
- if (aes->mode != CCP_AES_MODE_ECB) {
- /* Retrieve the AES context - convert from LE to BE using
- * 32-byte (256-bit) byteswapping
- */
- ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_dst;
- }
- /* ...but we only need AES_BLOCK_SIZE bytes */
- dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
- ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
- }
- e_dst:
- if (!in_place)
- ccp_free_data(&dst, cmd_q);
- e_src:
- ccp_free_data(&src, cmd_q);
- e_ctx:
- ccp_dm_free(&ctx);
- e_key:
- ccp_dm_free(&key);
- return ret;
- }
- static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
- struct ccp_cmd *cmd)
- {
- struct ccp_xts_aes_engine *xts = &cmd->u.xts;
- struct ccp_dm_workarea key, ctx;
- struct ccp_data src, dst;
- struct ccp_op op;
- unsigned int unit_size, dm_offset;
- bool in_place = false;
- int ret;
- switch (xts->unit_size) {
- case CCP_XTS_AES_UNIT_SIZE_16:
- unit_size = 16;
- break;
- case CCP_XTS_AES_UNIT_SIZE_512:
- unit_size = 512;
- break;
- case CCP_XTS_AES_UNIT_SIZE_1024:
- unit_size = 1024;
- break;
- case CCP_XTS_AES_UNIT_SIZE_2048:
- unit_size = 2048;
- break;
- case CCP_XTS_AES_UNIT_SIZE_4096:
- unit_size = 4096;
- break;
- default:
- return -EINVAL;
- }
- if (xts->key_len != AES_KEYSIZE_128)
- return -EINVAL;
- if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
- return -EINVAL;
- if (xts->iv_len != AES_BLOCK_SIZE)
- return -EINVAL;
- if (!xts->key || !xts->iv || !xts->src || !xts->dst)
- return -EINVAL;
- BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
- BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
- ret = -EIO;
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
- op.sb_key = cmd_q->sb_key;
- op.sb_ctx = cmd_q->sb_ctx;
- op.init = 1;
- op.u.xts.action = xts->action;
- op.u.xts.unit_size = xts->unit_size;
- /* All supported key sizes fit in a single (32-byte) SB entry
- * and must be in little endian format. Use the 256-bit byte
- * swap passthru option to convert from big endian to little
- * endian.
- */
- ret = ccp_init_dm_workarea(&key, cmd_q,
- CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
- DMA_TO_DEVICE);
- if (ret)
- return ret;
- dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
- ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
- ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_key;
- }
- /* The AES context fits in a single (32-byte) SB entry and
- * for XTS is already in little endian format so no byte swapping
- * is needed.
- */
- ret = ccp_init_dm_workarea(&ctx, cmd_q,
- CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
- DMA_BIDIRECTIONAL);
- if (ret)
- goto e_key;
- ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- CCP_PASSTHRU_BYTESWAP_NOOP);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_ctx;
- }
- /* Prepare the input and output data workareas. For in-place
- * operations we need to set the dma direction to BIDIRECTIONAL
- * and copy the src workarea to the dst workarea.
- */
- if (sg_virt(xts->src) == sg_virt(xts->dst))
- in_place = true;
- ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
- unit_size,
- in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
- if (ret)
- goto e_ctx;
- if (in_place) {
- dst = src;
- } else {
- ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
- unit_size, DMA_FROM_DEVICE);
- if (ret)
- goto e_src;
- }
- /* Send data to the CCP AES engine */
- while (src.sg_wa.bytes_left) {
- ccp_prepare_data(&src, &dst, &op, unit_size, true);
- if (!src.sg_wa.bytes_left)
- op.eom = 1;
- ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_dst;
- }
- ccp_process_data(&src, &dst, &op);
- }
- /* Retrieve the AES context - convert from LE to BE using
- * 32-byte (256-bit) byteswapping
- */
- ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_dst;
- }
- /* ...but we only need AES_BLOCK_SIZE bytes */
- dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
- ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
- e_dst:
- if (!in_place)
- ccp_free_data(&dst, cmd_q);
- e_src:
- ccp_free_data(&src, cmd_q);
- e_ctx:
- ccp_dm_free(&ctx);
- e_key:
- ccp_dm_free(&key);
- return ret;
- }
- static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
- {
- struct ccp_sha_engine *sha = &cmd->u.sha;
- struct ccp_dm_workarea ctx;
- struct ccp_data src;
- struct ccp_op op;
- unsigned int ioffset, ooffset;
- unsigned int digest_size;
- int sb_count;
- const void *init;
- u64 block_size;
- int ctx_size;
- int ret;
- switch (sha->type) {
- case CCP_SHA_TYPE_1:
- if (sha->ctx_len < SHA1_DIGEST_SIZE)
- return -EINVAL;
- block_size = SHA1_BLOCK_SIZE;
- break;
- case CCP_SHA_TYPE_224:
- if (sha->ctx_len < SHA224_DIGEST_SIZE)
- return -EINVAL;
- block_size = SHA224_BLOCK_SIZE;
- break;
- case CCP_SHA_TYPE_256:
- if (sha->ctx_len < SHA256_DIGEST_SIZE)
- return -EINVAL;
- block_size = SHA256_BLOCK_SIZE;
- break;
- default:
- return -EINVAL;
- }
- if (!sha->ctx)
- return -EINVAL;
- if (!sha->final && (sha->src_len & (block_size - 1)))
- return -EINVAL;
- /* The version 3 device can't handle zero-length input */
- if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
- if (!sha->src_len) {
- unsigned int digest_len;
- const u8 *sha_zero;
- /* Not final, just return */
- if (!sha->final)
- return 0;
- /* CCP can't do a zero length sha operation so the
- * caller must buffer the data.
- */
- if (sha->msg_bits)
- return -EINVAL;
- /* The CCP cannot perform zero-length sha operations
- * so the caller is required to buffer data for the
- * final operation. However, a sha operation for a
- * message with a total length of zero is valid so
- * known values are required to supply the result.
- */
- switch (sha->type) {
- case CCP_SHA_TYPE_1:
- sha_zero = sha1_zero_message_hash;
- digest_len = SHA1_DIGEST_SIZE;
- break;
- case CCP_SHA_TYPE_224:
- sha_zero = sha224_zero_message_hash;
- digest_len = SHA224_DIGEST_SIZE;
- break;
- case CCP_SHA_TYPE_256:
- sha_zero = sha256_zero_message_hash;
- digest_len = SHA256_DIGEST_SIZE;
- break;
- default:
- return -EINVAL;
- }
- scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
- digest_len, 1);
- return 0;
- }
- }
- /* Set variables used throughout */
- switch (sha->type) {
- case CCP_SHA_TYPE_1:
- digest_size = SHA1_DIGEST_SIZE;
- init = (void *) ccp_sha1_init;
- ctx_size = SHA1_DIGEST_SIZE;
- sb_count = 1;
- if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
- ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
- else
- ooffset = ioffset = 0;
- break;
- case CCP_SHA_TYPE_224:
- digest_size = SHA224_DIGEST_SIZE;
- init = (void *) ccp_sha224_init;
- ctx_size = SHA256_DIGEST_SIZE;
- sb_count = 1;
- ioffset = 0;
- if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
- ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
- else
- ooffset = 0;
- break;
- case CCP_SHA_TYPE_256:
- digest_size = SHA256_DIGEST_SIZE;
- init = (void *) ccp_sha256_init;
- ctx_size = SHA256_DIGEST_SIZE;
- sb_count = 1;
- ooffset = ioffset = 0;
- break;
- default:
- ret = -EINVAL;
- goto e_data;
- }
- /* For zero-length plaintext the src pointer is ignored;
- * otherwise both parts must be valid
- */
- if (sha->src_len && !sha->src)
- return -EINVAL;
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.u.sha.type = sha->type;
- op.u.sha.msg_bits = sha->msg_bits;
- ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
- DMA_BIDIRECTIONAL);
- if (ret)
- return ret;
- if (sha->first) {
- switch (sha->type) {
- case CCP_SHA_TYPE_1:
- case CCP_SHA_TYPE_224:
- case CCP_SHA_TYPE_256:
- memcpy(ctx.address + ioffset, init, ctx_size);
- break;
- default:
- ret = -EINVAL;
- goto e_ctx;
- }
- } else {
- /* Restore the context */
- ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
- sb_count * CCP_SB_BYTES);
- }
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_ctx;
- }
- if (sha->src) {
- /* Send data to the CCP SHA engine; block_size is set above */
- ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
- block_size, DMA_TO_DEVICE);
- if (ret)
- goto e_ctx;
- while (src.sg_wa.bytes_left) {
- ccp_prepare_data(&src, NULL, &op, block_size, false);
- if (sha->final && !src.sg_wa.bytes_left)
- op.eom = 1;
- ret = cmd_q->ccp->vdata->perform->sha(&op);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_data;
- }
- ccp_process_data(&src, NULL, &op);
- }
- } else {
- op.eom = 1;
- ret = cmd_q->ccp->vdata->perform->sha(&op);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_data;
- }
- }
- /* Retrieve the SHA context - convert from LE to BE using
- * 32-byte (256-bit) byteswapping to BE
- */
- ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_data;
- }
- if (sha->final) {
- /* Finishing up, so get the digest */
- switch (sha->type) {
- case CCP_SHA_TYPE_1:
- case CCP_SHA_TYPE_224:
- case CCP_SHA_TYPE_256:
- ccp_get_dm_area(&ctx, ooffset,
- sha->ctx, 0,
- digest_size);
- break;
- default:
- ret = -EINVAL;
- goto e_ctx;
- }
- } else {
- /* Stash the context */
- ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
- sb_count * CCP_SB_BYTES);
- }
- if (sha->final && sha->opad) {
- /* HMAC operation, recursively perform final SHA */
- struct ccp_cmd hmac_cmd;
- struct scatterlist sg;
- u8 *hmac_buf;
- if (sha->opad_len != block_size) {
- ret = -EINVAL;
- goto e_data;
- }
- hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
- if (!hmac_buf) {
- ret = -ENOMEM;
- goto e_data;
- }
- sg_init_one(&sg, hmac_buf, block_size + digest_size);
- scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
- switch (sha->type) {
- case CCP_SHA_TYPE_1:
- case CCP_SHA_TYPE_224:
- case CCP_SHA_TYPE_256:
- memcpy(hmac_buf + block_size,
- ctx.address + ooffset,
- digest_size);
- break;
- default:
- ret = -EINVAL;
- goto e_ctx;
- }
- memset(&hmac_cmd, 0, sizeof(hmac_cmd));
- hmac_cmd.engine = CCP_ENGINE_SHA;
- hmac_cmd.u.sha.type = sha->type;
- hmac_cmd.u.sha.ctx = sha->ctx;
- hmac_cmd.u.sha.ctx_len = sha->ctx_len;
- hmac_cmd.u.sha.src = &sg;
- hmac_cmd.u.sha.src_len = block_size + digest_size;
- hmac_cmd.u.sha.opad = NULL;
- hmac_cmd.u.sha.opad_len = 0;
- hmac_cmd.u.sha.first = 1;
- hmac_cmd.u.sha.final = 1;
- hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
- ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
- if (ret)
- cmd->engine_error = hmac_cmd.engine_error;
- kfree(hmac_buf);
- }
- e_data:
- if (sha->src)
- ccp_free_data(&src, cmd_q);
- e_ctx:
- ccp_dm_free(&ctx);
- return ret;
- }
- static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
- {
- struct ccp_rsa_engine *rsa = &cmd->u.rsa;
- struct ccp_dm_workarea exp, src;
- struct ccp_data dst;
- struct ccp_op op;
- unsigned int sb_count, i_len, o_len;
- int ret;
- if (rsa->key_size > CCP_RSA_MAX_WIDTH)
- return -EINVAL;
- if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
- return -EINVAL;
- /* The RSA modulus must precede the message being acted upon, so
- * it must be copied to a DMA area where the message and the
- * modulus can be concatenated. Therefore the input buffer
- * length required is twice the output buffer length (which
- * must be a multiple of 256-bits).
- */
- o_len = ((rsa->key_size + 255) / 256) * 32;
- i_len = o_len * 2;
- sb_count = o_len / CCP_SB_BYTES;
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
- op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
- if (!op.sb_key)
- return -EIO;
- /* The RSA exponent may span multiple (32-byte) SB entries and must
- * be in little endian format. Reverse copy each 32-byte chunk
- * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
- * and each byte within that chunk and do not perform any byte swap
- * operations on the passthru operation.
- */
- ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
- if (ret)
- goto e_sb;
- ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
- if (ret)
- goto e_exp;
- ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
- CCP_PASSTHRU_BYTESWAP_NOOP);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_exp;
- }
- /* Concatenate the modulus and the message. Both the modulus and
- * the operands must be in little endian format. Since the input
- * is in big endian format it must be converted.
- */
- ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
- if (ret)
- goto e_exp;
- ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
- if (ret)
- goto e_src;
- ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
- if (ret)
- goto e_src;
- /* Prepare the output area for the operation */
- ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
- o_len, DMA_FROM_DEVICE);
- if (ret)
- goto e_src;
- op.soc = 1;
- op.src.u.dma.address = src.dma.address;
- op.src.u.dma.offset = 0;
- op.src.u.dma.length = i_len;
- op.dst.u.dma.address = dst.dm_wa.dma.address;
- op.dst.u.dma.offset = 0;
- op.dst.u.dma.length = o_len;
- op.u.rsa.mod_size = rsa->key_size;
- op.u.rsa.input_len = i_len;
- ret = cmd_q->ccp->vdata->perform->rsa(&op);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_dst;
- }
- ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
- e_dst:
- ccp_free_data(&dst, cmd_q);
- e_src:
- ccp_dm_free(&src);
- e_exp:
- ccp_dm_free(&exp);
- e_sb:
- cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
- return ret;
- }
- static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
- struct ccp_cmd *cmd)
- {
- struct ccp_passthru_engine *pt = &cmd->u.passthru;
- struct ccp_dm_workarea mask;
- struct ccp_data src, dst;
- struct ccp_op op;
- bool in_place = false;
- unsigned int i;
- int ret = 0;
- if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
- return -EINVAL;
- if (!pt->src || !pt->dst)
- return -EINVAL;
- if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
- if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
- return -EINVAL;
- if (!pt->mask)
- return -EINVAL;
- }
- BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
- if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
- /* Load the mask */
- op.sb_key = cmd_q->sb_key;
- ret = ccp_init_dm_workarea(&mask, cmd_q,
- CCP_PASSTHRU_SB_COUNT *
- CCP_SB_BYTES,
- DMA_TO_DEVICE);
- if (ret)
- return ret;
- ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
- ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
- CCP_PASSTHRU_BYTESWAP_NOOP);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_mask;
- }
- }
- /* Prepare the input and output data workareas. For in-place
- * operations we need to set the dma direction to BIDIRECTIONAL
- * and copy the src workarea to the dst workarea.
- */
- if (sg_virt(pt->src) == sg_virt(pt->dst))
- in_place = true;
- ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
- CCP_PASSTHRU_MASKSIZE,
- in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
- if (ret)
- goto e_mask;
- if (in_place) {
- dst = src;
- } else {
- ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
- CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
- if (ret)
- goto e_src;
- }
- /* Send data to the CCP Passthru engine
- * Because the CCP engine works on a single source and destination
- * dma address at a time, each entry in the source scatterlist
- * (after the dma_map_sg call) must be less than or equal to the
- * (remaining) length in the destination scatterlist entry and the
- * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
- */
- dst.sg_wa.sg_used = 0;
- for (i = 1; i <= src.sg_wa.dma_count; i++) {
- if (!dst.sg_wa.sg ||
- (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
- ret = -EINVAL;
- goto e_dst;
- }
- if (i == src.sg_wa.dma_count) {
- op.eom = 1;
- op.soc = 1;
- }
- op.src.type = CCP_MEMTYPE_SYSTEM;
- op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
- op.src.u.dma.offset = 0;
- op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
- op.dst.type = CCP_MEMTYPE_SYSTEM;
- op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
- op.dst.u.dma.offset = dst.sg_wa.sg_used;
- op.dst.u.dma.length = op.src.u.dma.length;
- ret = cmd_q->ccp->vdata->perform->passthru(&op);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_dst;
- }
- dst.sg_wa.sg_used += src.sg_wa.sg->length;
- if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
- dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
- dst.sg_wa.sg_used = 0;
- }
- src.sg_wa.sg = sg_next(src.sg_wa.sg);
- }
- e_dst:
- if (!in_place)
- ccp_free_data(&dst, cmd_q);
- e_src:
- ccp_free_data(&src, cmd_q);
- e_mask:
- if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
- ccp_dm_free(&mask);
- return ret;
- }
- static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
- struct ccp_cmd *cmd)
- {
- struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
- struct ccp_dm_workarea mask;
- struct ccp_op op;
- int ret;
- if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
- return -EINVAL;
- if (!pt->src_dma || !pt->dst_dma)
- return -EINVAL;
- if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
- if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
- return -EINVAL;
- if (!pt->mask)
- return -EINVAL;
- }
- BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
- if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
- /* Load the mask */
- op.sb_key = cmd_q->sb_key;
- mask.length = pt->mask_len;
- mask.dma.address = pt->mask;
- mask.dma.length = pt->mask_len;
- ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
- CCP_PASSTHRU_BYTESWAP_NOOP);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- return ret;
- }
- }
- /* Send data to the CCP Passthru engine */
- op.eom = 1;
- op.soc = 1;
- op.src.type = CCP_MEMTYPE_SYSTEM;
- op.src.u.dma.address = pt->src_dma;
- op.src.u.dma.offset = 0;
- op.src.u.dma.length = pt->src_len;
- op.dst.type = CCP_MEMTYPE_SYSTEM;
- op.dst.u.dma.address = pt->dst_dma;
- op.dst.u.dma.offset = 0;
- op.dst.u.dma.length = pt->src_len;
- ret = cmd_q->ccp->vdata->perform->passthru(&op);
- if (ret)
- cmd->engine_error = cmd_q->cmd_error;
- return ret;
- }
- static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
- {
- struct ccp_ecc_engine *ecc = &cmd->u.ecc;
- struct ccp_dm_workarea src, dst;
- struct ccp_op op;
- int ret;
- u8 *save;
- if (!ecc->u.mm.operand_1 ||
- (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
- return -EINVAL;
- if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
- if (!ecc->u.mm.operand_2 ||
- (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
- return -EINVAL;
- if (!ecc->u.mm.result ||
- (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
- return -EINVAL;
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
- /* Concatenate the modulus and the operands. Both the modulus and
- * the operands must be in little endian format. Since the input
- * is in big endian format it must be converted and placed in a
- * fixed length buffer.
- */
- ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
- DMA_TO_DEVICE);
- if (ret)
- return ret;
- /* Save the workarea address since it is updated in order to perform
- * the concatenation
- */
- save = src.address;
- /* Copy the ECC modulus */
- ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
- if (ret)
- goto e_src;
- src.address += CCP_ECC_OPERAND_SIZE;
- /* Copy the first operand */
- ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
- ecc->u.mm.operand_1_len);
- if (ret)
- goto e_src;
- src.address += CCP_ECC_OPERAND_SIZE;
- if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
- /* Copy the second operand */
- ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
- ecc->u.mm.operand_2_len);
- if (ret)
- goto e_src;
- src.address += CCP_ECC_OPERAND_SIZE;
- }
- /* Restore the workarea address */
- src.address = save;
- /* Prepare the output area for the operation */
- ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (ret)
- goto e_src;
- op.soc = 1;
- op.src.u.dma.address = src.dma.address;
- op.src.u.dma.offset = 0;
- op.src.u.dma.length = src.length;
- op.dst.u.dma.address = dst.dma.address;
- op.dst.u.dma.offset = 0;
- op.dst.u.dma.length = dst.length;
- op.u.ecc.function = cmd->u.ecc.function;
- ret = cmd_q->ccp->vdata->perform->ecc(&op);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_dst;
- }
- ecc->ecc_result = le16_to_cpup(
- (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
- if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
- ret = -EIO;
- goto e_dst;
- }
- /* Save the ECC result */
- ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
- CCP_ECC_MODULUS_BYTES);
- e_dst:
- ccp_dm_free(&dst);
- e_src:
- ccp_dm_free(&src);
- return ret;
- }
- static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
- {
- struct ccp_ecc_engine *ecc = &cmd->u.ecc;
- struct ccp_dm_workarea src, dst;
- struct ccp_op op;
- int ret;
- u8 *save;
- if (!ecc->u.pm.point_1.x ||
- (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
- !ecc->u.pm.point_1.y ||
- (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
- return -EINVAL;
- if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
- if (!ecc->u.pm.point_2.x ||
- (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
- !ecc->u.pm.point_2.y ||
- (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
- return -EINVAL;
- } else {
- if (!ecc->u.pm.domain_a ||
- (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
- return -EINVAL;
- if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
- if (!ecc->u.pm.scalar ||
- (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
- return -EINVAL;
- }
- if (!ecc->u.pm.result.x ||
- (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
- !ecc->u.pm.result.y ||
- (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
- return -EINVAL;
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
- /* Concatenate the modulus and the operands. Both the modulus and
- * the operands must be in little endian format. Since the input
- * is in big endian format it must be converted and placed in a
- * fixed length buffer.
- */
- ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
- DMA_TO_DEVICE);
- if (ret)
- return ret;
- /* Save the workarea address since it is updated in order to perform
- * the concatenation
- */
- save = src.address;
- /* Copy the ECC modulus */
- ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
- if (ret)
- goto e_src;
- src.address += CCP_ECC_OPERAND_SIZE;
- /* Copy the first point X and Y coordinate */
- ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
- ecc->u.pm.point_1.x_len);
- if (ret)
- goto e_src;
- src.address += CCP_ECC_OPERAND_SIZE;
- ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
- ecc->u.pm.point_1.y_len);
- if (ret)
- goto e_src;
- src.address += CCP_ECC_OPERAND_SIZE;
- /* Set the first point Z coordinate to 1 */
- *src.address = 0x01;
- src.address += CCP_ECC_OPERAND_SIZE;
- if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
- /* Copy the second point X and Y coordinate */
- ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
- ecc->u.pm.point_2.x_len);
- if (ret)
- goto e_src;
- src.address += CCP_ECC_OPERAND_SIZE;
- ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
- ecc->u.pm.point_2.y_len);
- if (ret)
- goto e_src;
- src.address += CCP_ECC_OPERAND_SIZE;
- /* Set the second point Z coordinate to 1 */
- *src.address = 0x01;
- src.address += CCP_ECC_OPERAND_SIZE;
- } else {
- /* Copy the Domain "a" parameter */
- ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
- ecc->u.pm.domain_a_len);
- if (ret)
- goto e_src;
- src.address += CCP_ECC_OPERAND_SIZE;
- if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
- /* Copy the scalar value */
- ret = ccp_reverse_set_dm_area(&src, 0,
- ecc->u.pm.scalar, 0,
- ecc->u.pm.scalar_len);
- if (ret)
- goto e_src;
- src.address += CCP_ECC_OPERAND_SIZE;
- }
- }
- /* Restore the workarea address */
- src.address = save;
- /* Prepare the output area for the operation */
- ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (ret)
- goto e_src;
- op.soc = 1;
- op.src.u.dma.address = src.dma.address;
- op.src.u.dma.offset = 0;
- op.src.u.dma.length = src.length;
- op.dst.u.dma.address = dst.dma.address;
- op.dst.u.dma.offset = 0;
- op.dst.u.dma.length = dst.length;
- op.u.ecc.function = cmd->u.ecc.function;
- ret = cmd_q->ccp->vdata->perform->ecc(&op);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_dst;
- }
- ecc->ecc_result = le16_to_cpup(
- (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
- if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
- ret = -EIO;
- goto e_dst;
- }
- /* Save the workarea address since it is updated as we walk through
- * to copy the point math result
- */
- save = dst.address;
- /* Save the ECC result X and Y coordinates */
- ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
- CCP_ECC_MODULUS_BYTES);
- dst.address += CCP_ECC_OUTPUT_SIZE;
- ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
- CCP_ECC_MODULUS_BYTES);
- dst.address += CCP_ECC_OUTPUT_SIZE;
- /* Restore the workarea address */
- dst.address = save;
- e_dst:
- ccp_dm_free(&dst);
- e_src:
- ccp_dm_free(&src);
- return ret;
- }
- static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
- {
- struct ccp_ecc_engine *ecc = &cmd->u.ecc;
- ecc->ecc_result = 0;
- if (!ecc->mod ||
- (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
- return -EINVAL;
- switch (ecc->function) {
- case CCP_ECC_FUNCTION_MMUL_384BIT:
- case CCP_ECC_FUNCTION_MADD_384BIT:
- case CCP_ECC_FUNCTION_MINV_384BIT:
- return ccp_run_ecc_mm_cmd(cmd_q, cmd);
- case CCP_ECC_FUNCTION_PADD_384BIT:
- case CCP_ECC_FUNCTION_PMUL_384BIT:
- case CCP_ECC_FUNCTION_PDBL_384BIT:
- return ccp_run_ecc_pm_cmd(cmd_q, cmd);
- default:
- return -EINVAL;
- }
- }
- int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
- {
- int ret;
- cmd->engine_error = 0;
- cmd_q->cmd_error = 0;
- cmd_q->int_rcvd = 0;
- cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
- switch (cmd->engine) {
- case CCP_ENGINE_AES:
- ret = ccp_run_aes_cmd(cmd_q, cmd);
- break;
- case CCP_ENGINE_XTS_AES_128:
- ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
- break;
- case CCP_ENGINE_SHA:
- ret = ccp_run_sha_cmd(cmd_q, cmd);
- break;
- case CCP_ENGINE_RSA:
- ret = ccp_run_rsa_cmd(cmd_q, cmd);
- break;
- case CCP_ENGINE_PASSTHRU:
- if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
- ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
- else
- ret = ccp_run_passthru_cmd(cmd_q, cmd);
- break;
- case CCP_ENGINE_ECC:
- ret = ccp_run_ecc_cmd(cmd_q, cmd);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
- }
|