ccp-ops.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. * Author: Gary R Hook <gary.hook@amd.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/pci.h>
  16. #include <linux/interrupt.h>
  17. #include <crypto/scatterwalk.h>
  18. #include <linux/ccp.h>
  19. #include "ccp-dev.h"
  20. /* SHA initial context values */
  21. static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
  22. cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
  23. cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
  24. cpu_to_be32(SHA1_H4),
  25. };
  26. static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
  27. cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
  28. cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
  29. cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
  30. cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
  31. };
  32. static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
  33. cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
  34. cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
  35. cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
  36. cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
  37. };
  38. #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
  39. ccp_gen_jobid(ccp) : 0)
  40. static u32 ccp_gen_jobid(struct ccp_device *ccp)
  41. {
  42. return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
  43. }
  44. static void ccp_sg_free(struct ccp_sg_workarea *wa)
  45. {
  46. if (wa->dma_count)
  47. dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
  48. wa->dma_count = 0;
  49. }
  50. static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
  51. struct scatterlist *sg, u64 len,
  52. enum dma_data_direction dma_dir)
  53. {
  54. memset(wa, 0, sizeof(*wa));
  55. wa->sg = sg;
  56. if (!sg)
  57. return 0;
  58. wa->nents = sg_nents_for_len(sg, len);
  59. if (wa->nents < 0)
  60. return wa->nents;
  61. wa->bytes_left = len;
  62. wa->sg_used = 0;
  63. if (len == 0)
  64. return 0;
  65. if (dma_dir == DMA_NONE)
  66. return 0;
  67. wa->dma_sg = sg;
  68. wa->dma_dev = dev;
  69. wa->dma_dir = dma_dir;
  70. wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
  71. if (!wa->dma_count)
  72. return -ENOMEM;
  73. return 0;
  74. }
  75. static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
  76. {
  77. unsigned int nbytes = min_t(u64, len, wa->bytes_left);
  78. if (!wa->sg)
  79. return;
  80. wa->sg_used += nbytes;
  81. wa->bytes_left -= nbytes;
  82. if (wa->sg_used == wa->sg->length) {
  83. wa->sg = sg_next(wa->sg);
  84. wa->sg_used = 0;
  85. }
  86. }
  87. static void ccp_dm_free(struct ccp_dm_workarea *wa)
  88. {
  89. if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
  90. if (wa->address)
  91. dma_pool_free(wa->dma_pool, wa->address,
  92. wa->dma.address);
  93. } else {
  94. if (wa->dma.address)
  95. dma_unmap_single(wa->dev, wa->dma.address, wa->length,
  96. wa->dma.dir);
  97. kfree(wa->address);
  98. }
  99. wa->address = NULL;
  100. wa->dma.address = 0;
  101. }
  102. static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
  103. struct ccp_cmd_queue *cmd_q,
  104. unsigned int len,
  105. enum dma_data_direction dir)
  106. {
  107. memset(wa, 0, sizeof(*wa));
  108. if (!len)
  109. return 0;
  110. wa->dev = cmd_q->ccp->dev;
  111. wa->length = len;
  112. if (len <= CCP_DMAPOOL_MAX_SIZE) {
  113. wa->dma_pool = cmd_q->dma_pool;
  114. wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
  115. &wa->dma.address);
  116. if (!wa->address)
  117. return -ENOMEM;
  118. wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
  119. memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
  120. } else {
  121. wa->address = kzalloc(len, GFP_KERNEL);
  122. if (!wa->address)
  123. return -ENOMEM;
  124. wa->dma.address = dma_map_single(wa->dev, wa->address, len,
  125. dir);
  126. if (!wa->dma.address)
  127. return -ENOMEM;
  128. wa->dma.length = len;
  129. }
  130. wa->dma.dir = dir;
  131. return 0;
  132. }
  133. static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
  134. struct scatterlist *sg, unsigned int sg_offset,
  135. unsigned int len)
  136. {
  137. WARN_ON(!wa->address);
  138. scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
  139. 0);
  140. }
  141. static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
  142. struct scatterlist *sg, unsigned int sg_offset,
  143. unsigned int len)
  144. {
  145. WARN_ON(!wa->address);
  146. scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
  147. 1);
  148. }
  149. static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
  150. unsigned int wa_offset,
  151. struct scatterlist *sg,
  152. unsigned int sg_offset,
  153. unsigned int len)
  154. {
  155. u8 *p, *q;
  156. ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
  157. p = wa->address + wa_offset;
  158. q = p + len - 1;
  159. while (p < q) {
  160. *p = *p ^ *q;
  161. *q = *p ^ *q;
  162. *p = *p ^ *q;
  163. p++;
  164. q--;
  165. }
  166. return 0;
  167. }
  168. static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
  169. unsigned int wa_offset,
  170. struct scatterlist *sg,
  171. unsigned int sg_offset,
  172. unsigned int len)
  173. {
  174. u8 *p, *q;
  175. p = wa->address + wa_offset;
  176. q = p + len - 1;
  177. while (p < q) {
  178. *p = *p ^ *q;
  179. *q = *p ^ *q;
  180. *p = *p ^ *q;
  181. p++;
  182. q--;
  183. }
  184. ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
  185. }
  186. static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
  187. {
  188. ccp_dm_free(&data->dm_wa);
  189. ccp_sg_free(&data->sg_wa);
  190. }
  191. static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
  192. struct scatterlist *sg, u64 sg_len,
  193. unsigned int dm_len,
  194. enum dma_data_direction dir)
  195. {
  196. int ret;
  197. memset(data, 0, sizeof(*data));
  198. ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
  199. dir);
  200. if (ret)
  201. goto e_err;
  202. ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
  203. if (ret)
  204. goto e_err;
  205. return 0;
  206. e_err:
  207. ccp_free_data(data, cmd_q);
  208. return ret;
  209. }
  210. static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
  211. {
  212. struct ccp_sg_workarea *sg_wa = &data->sg_wa;
  213. struct ccp_dm_workarea *dm_wa = &data->dm_wa;
  214. unsigned int buf_count, nbytes;
  215. /* Clear the buffer if setting it */
  216. if (!from)
  217. memset(dm_wa->address, 0, dm_wa->length);
  218. if (!sg_wa->sg)
  219. return 0;
  220. /* Perform the copy operation
  221. * nbytes will always be <= UINT_MAX because dm_wa->length is
  222. * an unsigned int
  223. */
  224. nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
  225. scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
  226. nbytes, from);
  227. /* Update the structures and generate the count */
  228. buf_count = 0;
  229. while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
  230. nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
  231. dm_wa->length - buf_count);
  232. nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
  233. buf_count += nbytes;
  234. ccp_update_sg_workarea(sg_wa, nbytes);
  235. }
  236. return buf_count;
  237. }
  238. static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
  239. {
  240. return ccp_queue_buf(data, 0);
  241. }
  242. static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
  243. {
  244. return ccp_queue_buf(data, 1);
  245. }
  246. static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
  247. struct ccp_op *op, unsigned int block_size,
  248. bool blocksize_op)
  249. {
  250. unsigned int sg_src_len, sg_dst_len, op_len;
  251. /* The CCP can only DMA from/to one address each per operation. This
  252. * requires that we find the smallest DMA area between the source
  253. * and destination. The resulting len values will always be <= UINT_MAX
  254. * because the dma length is an unsigned int.
  255. */
  256. sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
  257. sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
  258. if (dst) {
  259. sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
  260. sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
  261. op_len = min(sg_src_len, sg_dst_len);
  262. } else {
  263. op_len = sg_src_len;
  264. }
  265. /* The data operation length will be at least block_size in length
  266. * or the smaller of available sg room remaining for the source or
  267. * the destination
  268. */
  269. op_len = max(op_len, block_size);
  270. /* Unless we have to buffer data, there's no reason to wait */
  271. op->soc = 0;
  272. if (sg_src_len < block_size) {
  273. /* Not enough data in the sg element, so it
  274. * needs to be buffered into a blocksize chunk
  275. */
  276. int cp_len = ccp_fill_queue_buf(src);
  277. op->soc = 1;
  278. op->src.u.dma.address = src->dm_wa.dma.address;
  279. op->src.u.dma.offset = 0;
  280. op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
  281. } else {
  282. /* Enough data in the sg element, but we need to
  283. * adjust for any previously copied data
  284. */
  285. op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
  286. op->src.u.dma.offset = src->sg_wa.sg_used;
  287. op->src.u.dma.length = op_len & ~(block_size - 1);
  288. ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
  289. }
  290. if (dst) {
  291. if (sg_dst_len < block_size) {
  292. /* Not enough room in the sg element or we're on the
  293. * last piece of data (when using padding), so the
  294. * output needs to be buffered into a blocksize chunk
  295. */
  296. op->soc = 1;
  297. op->dst.u.dma.address = dst->dm_wa.dma.address;
  298. op->dst.u.dma.offset = 0;
  299. op->dst.u.dma.length = op->src.u.dma.length;
  300. } else {
  301. /* Enough room in the sg element, but we need to
  302. * adjust for any previously used area
  303. */
  304. op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
  305. op->dst.u.dma.offset = dst->sg_wa.sg_used;
  306. op->dst.u.dma.length = op->src.u.dma.length;
  307. }
  308. }
  309. }
  310. static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
  311. struct ccp_op *op)
  312. {
  313. op->init = 0;
  314. if (dst) {
  315. if (op->dst.u.dma.address == dst->dm_wa.dma.address)
  316. ccp_empty_queue_buf(dst);
  317. else
  318. ccp_update_sg_workarea(&dst->sg_wa,
  319. op->dst.u.dma.length);
  320. }
  321. }
  322. static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
  323. struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
  324. u32 byte_swap, bool from)
  325. {
  326. struct ccp_op op;
  327. memset(&op, 0, sizeof(op));
  328. op.cmd_q = cmd_q;
  329. op.jobid = jobid;
  330. op.eom = 1;
  331. if (from) {
  332. op.soc = 1;
  333. op.src.type = CCP_MEMTYPE_SB;
  334. op.src.u.sb = sb;
  335. op.dst.type = CCP_MEMTYPE_SYSTEM;
  336. op.dst.u.dma.address = wa->dma.address;
  337. op.dst.u.dma.length = wa->length;
  338. } else {
  339. op.src.type = CCP_MEMTYPE_SYSTEM;
  340. op.src.u.dma.address = wa->dma.address;
  341. op.src.u.dma.length = wa->length;
  342. op.dst.type = CCP_MEMTYPE_SB;
  343. op.dst.u.sb = sb;
  344. }
  345. op.u.passthru.byte_swap = byte_swap;
  346. return cmd_q->ccp->vdata->perform->passthru(&op);
  347. }
  348. static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
  349. struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
  350. u32 byte_swap)
  351. {
  352. return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
  353. }
  354. static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
  355. struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
  356. u32 byte_swap)
  357. {
  358. return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
  359. }
  360. static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
  361. struct ccp_cmd *cmd)
  362. {
  363. struct ccp_aes_engine *aes = &cmd->u.aes;
  364. struct ccp_dm_workarea key, ctx;
  365. struct ccp_data src;
  366. struct ccp_op op;
  367. unsigned int dm_offset;
  368. int ret;
  369. if (!((aes->key_len == AES_KEYSIZE_128) ||
  370. (aes->key_len == AES_KEYSIZE_192) ||
  371. (aes->key_len == AES_KEYSIZE_256)))
  372. return -EINVAL;
  373. if (aes->src_len & (AES_BLOCK_SIZE - 1))
  374. return -EINVAL;
  375. if (aes->iv_len != AES_BLOCK_SIZE)
  376. return -EINVAL;
  377. if (!aes->key || !aes->iv || !aes->src)
  378. return -EINVAL;
  379. if (aes->cmac_final) {
  380. if (aes->cmac_key_len != AES_BLOCK_SIZE)
  381. return -EINVAL;
  382. if (!aes->cmac_key)
  383. return -EINVAL;
  384. }
  385. BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
  386. BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
  387. ret = -EIO;
  388. memset(&op, 0, sizeof(op));
  389. op.cmd_q = cmd_q;
  390. op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
  391. op.sb_key = cmd_q->sb_key;
  392. op.sb_ctx = cmd_q->sb_ctx;
  393. op.init = 1;
  394. op.u.aes.type = aes->type;
  395. op.u.aes.mode = aes->mode;
  396. op.u.aes.action = aes->action;
  397. /* All supported key sizes fit in a single (32-byte) SB entry
  398. * and must be in little endian format. Use the 256-bit byte
  399. * swap passthru option to convert from big endian to little
  400. * endian.
  401. */
  402. ret = ccp_init_dm_workarea(&key, cmd_q,
  403. CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
  404. DMA_TO_DEVICE);
  405. if (ret)
  406. return ret;
  407. dm_offset = CCP_SB_BYTES - aes->key_len;
  408. ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
  409. ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
  410. CCP_PASSTHRU_BYTESWAP_256BIT);
  411. if (ret) {
  412. cmd->engine_error = cmd_q->cmd_error;
  413. goto e_key;
  414. }
  415. /* The AES context fits in a single (32-byte) SB entry and
  416. * must be in little endian format. Use the 256-bit byte swap
  417. * passthru option to convert from big endian to little endian.
  418. */
  419. ret = ccp_init_dm_workarea(&ctx, cmd_q,
  420. CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
  421. DMA_BIDIRECTIONAL);
  422. if (ret)
  423. goto e_key;
  424. dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
  425. ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
  426. ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
  427. CCP_PASSTHRU_BYTESWAP_256BIT);
  428. if (ret) {
  429. cmd->engine_error = cmd_q->cmd_error;
  430. goto e_ctx;
  431. }
  432. /* Send data to the CCP AES engine */
  433. ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
  434. AES_BLOCK_SIZE, DMA_TO_DEVICE);
  435. if (ret)
  436. goto e_ctx;
  437. while (src.sg_wa.bytes_left) {
  438. ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
  439. if (aes->cmac_final && !src.sg_wa.bytes_left) {
  440. op.eom = 1;
  441. /* Push the K1/K2 key to the CCP now */
  442. ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
  443. op.sb_ctx,
  444. CCP_PASSTHRU_BYTESWAP_256BIT);
  445. if (ret) {
  446. cmd->engine_error = cmd_q->cmd_error;
  447. goto e_src;
  448. }
  449. ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
  450. aes->cmac_key_len);
  451. ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
  452. CCP_PASSTHRU_BYTESWAP_256BIT);
  453. if (ret) {
  454. cmd->engine_error = cmd_q->cmd_error;
  455. goto e_src;
  456. }
  457. }
  458. ret = cmd_q->ccp->vdata->perform->aes(&op);
  459. if (ret) {
  460. cmd->engine_error = cmd_q->cmd_error;
  461. goto e_src;
  462. }
  463. ccp_process_data(&src, NULL, &op);
  464. }
  465. /* Retrieve the AES context - convert from LE to BE using
  466. * 32-byte (256-bit) byteswapping
  467. */
  468. ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
  469. CCP_PASSTHRU_BYTESWAP_256BIT);
  470. if (ret) {
  471. cmd->engine_error = cmd_q->cmd_error;
  472. goto e_src;
  473. }
  474. /* ...but we only need AES_BLOCK_SIZE bytes */
  475. dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
  476. ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
  477. e_src:
  478. ccp_free_data(&src, cmd_q);
  479. e_ctx:
  480. ccp_dm_free(&ctx);
  481. e_key:
  482. ccp_dm_free(&key);
  483. return ret;
  484. }
  485. static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  486. {
  487. struct ccp_aes_engine *aes = &cmd->u.aes;
  488. struct ccp_dm_workarea key, ctx;
  489. struct ccp_data src, dst;
  490. struct ccp_op op;
  491. unsigned int dm_offset;
  492. bool in_place = false;
  493. int ret;
  494. if (aes->mode == CCP_AES_MODE_CMAC)
  495. return ccp_run_aes_cmac_cmd(cmd_q, cmd);
  496. if (!((aes->key_len == AES_KEYSIZE_128) ||
  497. (aes->key_len == AES_KEYSIZE_192) ||
  498. (aes->key_len == AES_KEYSIZE_256)))
  499. return -EINVAL;
  500. if (((aes->mode == CCP_AES_MODE_ECB) ||
  501. (aes->mode == CCP_AES_MODE_CBC) ||
  502. (aes->mode == CCP_AES_MODE_CFB)) &&
  503. (aes->src_len & (AES_BLOCK_SIZE - 1)))
  504. return -EINVAL;
  505. if (!aes->key || !aes->src || !aes->dst)
  506. return -EINVAL;
  507. if (aes->mode != CCP_AES_MODE_ECB) {
  508. if (aes->iv_len != AES_BLOCK_SIZE)
  509. return -EINVAL;
  510. if (!aes->iv)
  511. return -EINVAL;
  512. }
  513. BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
  514. BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
  515. ret = -EIO;
  516. memset(&op, 0, sizeof(op));
  517. op.cmd_q = cmd_q;
  518. op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
  519. op.sb_key = cmd_q->sb_key;
  520. op.sb_ctx = cmd_q->sb_ctx;
  521. op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
  522. op.u.aes.type = aes->type;
  523. op.u.aes.mode = aes->mode;
  524. op.u.aes.action = aes->action;
  525. /* All supported key sizes fit in a single (32-byte) SB entry
  526. * and must be in little endian format. Use the 256-bit byte
  527. * swap passthru option to convert from big endian to little
  528. * endian.
  529. */
  530. ret = ccp_init_dm_workarea(&key, cmd_q,
  531. CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
  532. DMA_TO_DEVICE);
  533. if (ret)
  534. return ret;
  535. dm_offset = CCP_SB_BYTES - aes->key_len;
  536. ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
  537. ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
  538. CCP_PASSTHRU_BYTESWAP_256BIT);
  539. if (ret) {
  540. cmd->engine_error = cmd_q->cmd_error;
  541. goto e_key;
  542. }
  543. /* The AES context fits in a single (32-byte) SB entry and
  544. * must be in little endian format. Use the 256-bit byte swap
  545. * passthru option to convert from big endian to little endian.
  546. */
  547. ret = ccp_init_dm_workarea(&ctx, cmd_q,
  548. CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
  549. DMA_BIDIRECTIONAL);
  550. if (ret)
  551. goto e_key;
  552. if (aes->mode != CCP_AES_MODE_ECB) {
  553. /* Load the AES context - convert to LE */
  554. dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
  555. ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
  556. ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
  557. CCP_PASSTHRU_BYTESWAP_256BIT);
  558. if (ret) {
  559. cmd->engine_error = cmd_q->cmd_error;
  560. goto e_ctx;
  561. }
  562. }
  563. switch (aes->mode) {
  564. case CCP_AES_MODE_CFB: /* CFB128 only */
  565. case CCP_AES_MODE_CTR:
  566. op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
  567. break;
  568. default:
  569. op.u.aes.size = 0;
  570. }
  571. /* Prepare the input and output data workareas. For in-place
  572. * operations we need to set the dma direction to BIDIRECTIONAL
  573. * and copy the src workarea to the dst workarea.
  574. */
  575. if (sg_virt(aes->src) == sg_virt(aes->dst))
  576. in_place = true;
  577. ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
  578. AES_BLOCK_SIZE,
  579. in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  580. if (ret)
  581. goto e_ctx;
  582. if (in_place) {
  583. dst = src;
  584. } else {
  585. ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
  586. AES_BLOCK_SIZE, DMA_FROM_DEVICE);
  587. if (ret)
  588. goto e_src;
  589. }
  590. /* Send data to the CCP AES engine */
  591. while (src.sg_wa.bytes_left) {
  592. ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
  593. if (!src.sg_wa.bytes_left) {
  594. op.eom = 1;
  595. /* Since we don't retrieve the AES context in ECB
  596. * mode we have to wait for the operation to complete
  597. * on the last piece of data
  598. */
  599. if (aes->mode == CCP_AES_MODE_ECB)
  600. op.soc = 1;
  601. }
  602. ret = cmd_q->ccp->vdata->perform->aes(&op);
  603. if (ret) {
  604. cmd->engine_error = cmd_q->cmd_error;
  605. goto e_dst;
  606. }
  607. ccp_process_data(&src, &dst, &op);
  608. }
  609. if (aes->mode != CCP_AES_MODE_ECB) {
  610. /* Retrieve the AES context - convert from LE to BE using
  611. * 32-byte (256-bit) byteswapping
  612. */
  613. ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
  614. CCP_PASSTHRU_BYTESWAP_256BIT);
  615. if (ret) {
  616. cmd->engine_error = cmd_q->cmd_error;
  617. goto e_dst;
  618. }
  619. /* ...but we only need AES_BLOCK_SIZE bytes */
  620. dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
  621. ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
  622. }
  623. e_dst:
  624. if (!in_place)
  625. ccp_free_data(&dst, cmd_q);
  626. e_src:
  627. ccp_free_data(&src, cmd_q);
  628. e_ctx:
  629. ccp_dm_free(&ctx);
  630. e_key:
  631. ccp_dm_free(&key);
  632. return ret;
  633. }
  634. static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
  635. struct ccp_cmd *cmd)
  636. {
  637. struct ccp_xts_aes_engine *xts = &cmd->u.xts;
  638. struct ccp_dm_workarea key, ctx;
  639. struct ccp_data src, dst;
  640. struct ccp_op op;
  641. unsigned int unit_size, dm_offset;
  642. bool in_place = false;
  643. int ret;
  644. switch (xts->unit_size) {
  645. case CCP_XTS_AES_UNIT_SIZE_16:
  646. unit_size = 16;
  647. break;
  648. case CCP_XTS_AES_UNIT_SIZE_512:
  649. unit_size = 512;
  650. break;
  651. case CCP_XTS_AES_UNIT_SIZE_1024:
  652. unit_size = 1024;
  653. break;
  654. case CCP_XTS_AES_UNIT_SIZE_2048:
  655. unit_size = 2048;
  656. break;
  657. case CCP_XTS_AES_UNIT_SIZE_4096:
  658. unit_size = 4096;
  659. break;
  660. default:
  661. return -EINVAL;
  662. }
  663. if (xts->key_len != AES_KEYSIZE_128)
  664. return -EINVAL;
  665. if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
  666. return -EINVAL;
  667. if (xts->iv_len != AES_BLOCK_SIZE)
  668. return -EINVAL;
  669. if (!xts->key || !xts->iv || !xts->src || !xts->dst)
  670. return -EINVAL;
  671. BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
  672. BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
  673. ret = -EIO;
  674. memset(&op, 0, sizeof(op));
  675. op.cmd_q = cmd_q;
  676. op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
  677. op.sb_key = cmd_q->sb_key;
  678. op.sb_ctx = cmd_q->sb_ctx;
  679. op.init = 1;
  680. op.u.xts.action = xts->action;
  681. op.u.xts.unit_size = xts->unit_size;
  682. /* All supported key sizes fit in a single (32-byte) SB entry
  683. * and must be in little endian format. Use the 256-bit byte
  684. * swap passthru option to convert from big endian to little
  685. * endian.
  686. */
  687. ret = ccp_init_dm_workarea(&key, cmd_q,
  688. CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
  689. DMA_TO_DEVICE);
  690. if (ret)
  691. return ret;
  692. dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
  693. ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
  694. ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
  695. ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
  696. CCP_PASSTHRU_BYTESWAP_256BIT);
  697. if (ret) {
  698. cmd->engine_error = cmd_q->cmd_error;
  699. goto e_key;
  700. }
  701. /* The AES context fits in a single (32-byte) SB entry and
  702. * for XTS is already in little endian format so no byte swapping
  703. * is needed.
  704. */
  705. ret = ccp_init_dm_workarea(&ctx, cmd_q,
  706. CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
  707. DMA_BIDIRECTIONAL);
  708. if (ret)
  709. goto e_key;
  710. ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
  711. ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
  712. CCP_PASSTHRU_BYTESWAP_NOOP);
  713. if (ret) {
  714. cmd->engine_error = cmd_q->cmd_error;
  715. goto e_ctx;
  716. }
  717. /* Prepare the input and output data workareas. For in-place
  718. * operations we need to set the dma direction to BIDIRECTIONAL
  719. * and copy the src workarea to the dst workarea.
  720. */
  721. if (sg_virt(xts->src) == sg_virt(xts->dst))
  722. in_place = true;
  723. ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
  724. unit_size,
  725. in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  726. if (ret)
  727. goto e_ctx;
  728. if (in_place) {
  729. dst = src;
  730. } else {
  731. ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
  732. unit_size, DMA_FROM_DEVICE);
  733. if (ret)
  734. goto e_src;
  735. }
  736. /* Send data to the CCP AES engine */
  737. while (src.sg_wa.bytes_left) {
  738. ccp_prepare_data(&src, &dst, &op, unit_size, true);
  739. if (!src.sg_wa.bytes_left)
  740. op.eom = 1;
  741. ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
  742. if (ret) {
  743. cmd->engine_error = cmd_q->cmd_error;
  744. goto e_dst;
  745. }
  746. ccp_process_data(&src, &dst, &op);
  747. }
  748. /* Retrieve the AES context - convert from LE to BE using
  749. * 32-byte (256-bit) byteswapping
  750. */
  751. ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
  752. CCP_PASSTHRU_BYTESWAP_256BIT);
  753. if (ret) {
  754. cmd->engine_error = cmd_q->cmd_error;
  755. goto e_dst;
  756. }
  757. /* ...but we only need AES_BLOCK_SIZE bytes */
  758. dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
  759. ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
  760. e_dst:
  761. if (!in_place)
  762. ccp_free_data(&dst, cmd_q);
  763. e_src:
  764. ccp_free_data(&src, cmd_q);
  765. e_ctx:
  766. ccp_dm_free(&ctx);
  767. e_key:
  768. ccp_dm_free(&key);
  769. return ret;
  770. }
  771. static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  772. {
  773. struct ccp_sha_engine *sha = &cmd->u.sha;
  774. struct ccp_dm_workarea ctx;
  775. struct ccp_data src;
  776. struct ccp_op op;
  777. unsigned int ioffset, ooffset;
  778. unsigned int digest_size;
  779. int sb_count;
  780. const void *init;
  781. u64 block_size;
  782. int ctx_size;
  783. int ret;
  784. switch (sha->type) {
  785. case CCP_SHA_TYPE_1:
  786. if (sha->ctx_len < SHA1_DIGEST_SIZE)
  787. return -EINVAL;
  788. block_size = SHA1_BLOCK_SIZE;
  789. break;
  790. case CCP_SHA_TYPE_224:
  791. if (sha->ctx_len < SHA224_DIGEST_SIZE)
  792. return -EINVAL;
  793. block_size = SHA224_BLOCK_SIZE;
  794. break;
  795. case CCP_SHA_TYPE_256:
  796. if (sha->ctx_len < SHA256_DIGEST_SIZE)
  797. return -EINVAL;
  798. block_size = SHA256_BLOCK_SIZE;
  799. break;
  800. default:
  801. return -EINVAL;
  802. }
  803. if (!sha->ctx)
  804. return -EINVAL;
  805. if (!sha->final && (sha->src_len & (block_size - 1)))
  806. return -EINVAL;
  807. /* The version 3 device can't handle zero-length input */
  808. if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
  809. if (!sha->src_len) {
  810. unsigned int digest_len;
  811. const u8 *sha_zero;
  812. /* Not final, just return */
  813. if (!sha->final)
  814. return 0;
  815. /* CCP can't do a zero length sha operation so the
  816. * caller must buffer the data.
  817. */
  818. if (sha->msg_bits)
  819. return -EINVAL;
  820. /* The CCP cannot perform zero-length sha operations
  821. * so the caller is required to buffer data for the
  822. * final operation. However, a sha operation for a
  823. * message with a total length of zero is valid so
  824. * known values are required to supply the result.
  825. */
  826. switch (sha->type) {
  827. case CCP_SHA_TYPE_1:
  828. sha_zero = sha1_zero_message_hash;
  829. digest_len = SHA1_DIGEST_SIZE;
  830. break;
  831. case CCP_SHA_TYPE_224:
  832. sha_zero = sha224_zero_message_hash;
  833. digest_len = SHA224_DIGEST_SIZE;
  834. break;
  835. case CCP_SHA_TYPE_256:
  836. sha_zero = sha256_zero_message_hash;
  837. digest_len = SHA256_DIGEST_SIZE;
  838. break;
  839. default:
  840. return -EINVAL;
  841. }
  842. scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
  843. digest_len, 1);
  844. return 0;
  845. }
  846. }
  847. /* Set variables used throughout */
  848. switch (sha->type) {
  849. case CCP_SHA_TYPE_1:
  850. digest_size = SHA1_DIGEST_SIZE;
  851. init = (void *) ccp_sha1_init;
  852. ctx_size = SHA1_DIGEST_SIZE;
  853. sb_count = 1;
  854. if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
  855. ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
  856. else
  857. ooffset = ioffset = 0;
  858. break;
  859. case CCP_SHA_TYPE_224:
  860. digest_size = SHA224_DIGEST_SIZE;
  861. init = (void *) ccp_sha224_init;
  862. ctx_size = SHA256_DIGEST_SIZE;
  863. sb_count = 1;
  864. ioffset = 0;
  865. if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
  866. ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
  867. else
  868. ooffset = 0;
  869. break;
  870. case CCP_SHA_TYPE_256:
  871. digest_size = SHA256_DIGEST_SIZE;
  872. init = (void *) ccp_sha256_init;
  873. ctx_size = SHA256_DIGEST_SIZE;
  874. sb_count = 1;
  875. ooffset = ioffset = 0;
  876. break;
  877. default:
  878. ret = -EINVAL;
  879. goto e_data;
  880. }
  881. /* For zero-length plaintext the src pointer is ignored;
  882. * otherwise both parts must be valid
  883. */
  884. if (sha->src_len && !sha->src)
  885. return -EINVAL;
  886. memset(&op, 0, sizeof(op));
  887. op.cmd_q = cmd_q;
  888. op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
  889. op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
  890. op.u.sha.type = sha->type;
  891. op.u.sha.msg_bits = sha->msg_bits;
  892. ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
  893. DMA_BIDIRECTIONAL);
  894. if (ret)
  895. return ret;
  896. if (sha->first) {
  897. switch (sha->type) {
  898. case CCP_SHA_TYPE_1:
  899. case CCP_SHA_TYPE_224:
  900. case CCP_SHA_TYPE_256:
  901. memcpy(ctx.address + ioffset, init, ctx_size);
  902. break;
  903. default:
  904. ret = -EINVAL;
  905. goto e_ctx;
  906. }
  907. } else {
  908. /* Restore the context */
  909. ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
  910. sb_count * CCP_SB_BYTES);
  911. }
  912. ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
  913. CCP_PASSTHRU_BYTESWAP_256BIT);
  914. if (ret) {
  915. cmd->engine_error = cmd_q->cmd_error;
  916. goto e_ctx;
  917. }
  918. if (sha->src) {
  919. /* Send data to the CCP SHA engine; block_size is set above */
  920. ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
  921. block_size, DMA_TO_DEVICE);
  922. if (ret)
  923. goto e_ctx;
  924. while (src.sg_wa.bytes_left) {
  925. ccp_prepare_data(&src, NULL, &op, block_size, false);
  926. if (sha->final && !src.sg_wa.bytes_left)
  927. op.eom = 1;
  928. ret = cmd_q->ccp->vdata->perform->sha(&op);
  929. if (ret) {
  930. cmd->engine_error = cmd_q->cmd_error;
  931. goto e_data;
  932. }
  933. ccp_process_data(&src, NULL, &op);
  934. }
  935. } else {
  936. op.eom = 1;
  937. ret = cmd_q->ccp->vdata->perform->sha(&op);
  938. if (ret) {
  939. cmd->engine_error = cmd_q->cmd_error;
  940. goto e_data;
  941. }
  942. }
  943. /* Retrieve the SHA context - convert from LE to BE using
  944. * 32-byte (256-bit) byteswapping to BE
  945. */
  946. ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
  947. CCP_PASSTHRU_BYTESWAP_256BIT);
  948. if (ret) {
  949. cmd->engine_error = cmd_q->cmd_error;
  950. goto e_data;
  951. }
  952. if (sha->final) {
  953. /* Finishing up, so get the digest */
  954. switch (sha->type) {
  955. case CCP_SHA_TYPE_1:
  956. case CCP_SHA_TYPE_224:
  957. case CCP_SHA_TYPE_256:
  958. ccp_get_dm_area(&ctx, ooffset,
  959. sha->ctx, 0,
  960. digest_size);
  961. break;
  962. default:
  963. ret = -EINVAL;
  964. goto e_ctx;
  965. }
  966. } else {
  967. /* Stash the context */
  968. ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
  969. sb_count * CCP_SB_BYTES);
  970. }
  971. if (sha->final && sha->opad) {
  972. /* HMAC operation, recursively perform final SHA */
  973. struct ccp_cmd hmac_cmd;
  974. struct scatterlist sg;
  975. u8 *hmac_buf;
  976. if (sha->opad_len != block_size) {
  977. ret = -EINVAL;
  978. goto e_data;
  979. }
  980. hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
  981. if (!hmac_buf) {
  982. ret = -ENOMEM;
  983. goto e_data;
  984. }
  985. sg_init_one(&sg, hmac_buf, block_size + digest_size);
  986. scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
  987. switch (sha->type) {
  988. case CCP_SHA_TYPE_1:
  989. case CCP_SHA_TYPE_224:
  990. case CCP_SHA_TYPE_256:
  991. memcpy(hmac_buf + block_size,
  992. ctx.address + ooffset,
  993. digest_size);
  994. break;
  995. default:
  996. ret = -EINVAL;
  997. goto e_ctx;
  998. }
  999. memset(&hmac_cmd, 0, sizeof(hmac_cmd));
  1000. hmac_cmd.engine = CCP_ENGINE_SHA;
  1001. hmac_cmd.u.sha.type = sha->type;
  1002. hmac_cmd.u.sha.ctx = sha->ctx;
  1003. hmac_cmd.u.sha.ctx_len = sha->ctx_len;
  1004. hmac_cmd.u.sha.src = &sg;
  1005. hmac_cmd.u.sha.src_len = block_size + digest_size;
  1006. hmac_cmd.u.sha.opad = NULL;
  1007. hmac_cmd.u.sha.opad_len = 0;
  1008. hmac_cmd.u.sha.first = 1;
  1009. hmac_cmd.u.sha.final = 1;
  1010. hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
  1011. ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
  1012. if (ret)
  1013. cmd->engine_error = hmac_cmd.engine_error;
  1014. kfree(hmac_buf);
  1015. }
  1016. e_data:
  1017. if (sha->src)
  1018. ccp_free_data(&src, cmd_q);
  1019. e_ctx:
  1020. ccp_dm_free(&ctx);
  1021. return ret;
  1022. }
  1023. static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1024. {
  1025. struct ccp_rsa_engine *rsa = &cmd->u.rsa;
  1026. struct ccp_dm_workarea exp, src;
  1027. struct ccp_data dst;
  1028. struct ccp_op op;
  1029. unsigned int sb_count, i_len, o_len;
  1030. int ret;
  1031. if (rsa->key_size > CCP_RSA_MAX_WIDTH)
  1032. return -EINVAL;
  1033. if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
  1034. return -EINVAL;
  1035. /* The RSA modulus must precede the message being acted upon, so
  1036. * it must be copied to a DMA area where the message and the
  1037. * modulus can be concatenated. Therefore the input buffer
  1038. * length required is twice the output buffer length (which
  1039. * must be a multiple of 256-bits).
  1040. */
  1041. o_len = ((rsa->key_size + 255) / 256) * 32;
  1042. i_len = o_len * 2;
  1043. sb_count = o_len / CCP_SB_BYTES;
  1044. memset(&op, 0, sizeof(op));
  1045. op.cmd_q = cmd_q;
  1046. op.jobid = ccp_gen_jobid(cmd_q->ccp);
  1047. op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
  1048. if (!op.sb_key)
  1049. return -EIO;
  1050. /* The RSA exponent may span multiple (32-byte) SB entries and must
  1051. * be in little endian format. Reverse copy each 32-byte chunk
  1052. * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
  1053. * and each byte within that chunk and do not perform any byte swap
  1054. * operations on the passthru operation.
  1055. */
  1056. ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
  1057. if (ret)
  1058. goto e_sb;
  1059. ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
  1060. if (ret)
  1061. goto e_exp;
  1062. ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
  1063. CCP_PASSTHRU_BYTESWAP_NOOP);
  1064. if (ret) {
  1065. cmd->engine_error = cmd_q->cmd_error;
  1066. goto e_exp;
  1067. }
  1068. /* Concatenate the modulus and the message. Both the modulus and
  1069. * the operands must be in little endian format. Since the input
  1070. * is in big endian format it must be converted.
  1071. */
  1072. ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
  1073. if (ret)
  1074. goto e_exp;
  1075. ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
  1076. if (ret)
  1077. goto e_src;
  1078. ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
  1079. if (ret)
  1080. goto e_src;
  1081. /* Prepare the output area for the operation */
  1082. ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
  1083. o_len, DMA_FROM_DEVICE);
  1084. if (ret)
  1085. goto e_src;
  1086. op.soc = 1;
  1087. op.src.u.dma.address = src.dma.address;
  1088. op.src.u.dma.offset = 0;
  1089. op.src.u.dma.length = i_len;
  1090. op.dst.u.dma.address = dst.dm_wa.dma.address;
  1091. op.dst.u.dma.offset = 0;
  1092. op.dst.u.dma.length = o_len;
  1093. op.u.rsa.mod_size = rsa->key_size;
  1094. op.u.rsa.input_len = i_len;
  1095. ret = cmd_q->ccp->vdata->perform->rsa(&op);
  1096. if (ret) {
  1097. cmd->engine_error = cmd_q->cmd_error;
  1098. goto e_dst;
  1099. }
  1100. ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
  1101. e_dst:
  1102. ccp_free_data(&dst, cmd_q);
  1103. e_src:
  1104. ccp_dm_free(&src);
  1105. e_exp:
  1106. ccp_dm_free(&exp);
  1107. e_sb:
  1108. cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
  1109. return ret;
  1110. }
  1111. static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
  1112. struct ccp_cmd *cmd)
  1113. {
  1114. struct ccp_passthru_engine *pt = &cmd->u.passthru;
  1115. struct ccp_dm_workarea mask;
  1116. struct ccp_data src, dst;
  1117. struct ccp_op op;
  1118. bool in_place = false;
  1119. unsigned int i;
  1120. int ret = 0;
  1121. if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
  1122. return -EINVAL;
  1123. if (!pt->src || !pt->dst)
  1124. return -EINVAL;
  1125. if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
  1126. if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
  1127. return -EINVAL;
  1128. if (!pt->mask)
  1129. return -EINVAL;
  1130. }
  1131. BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
  1132. memset(&op, 0, sizeof(op));
  1133. op.cmd_q = cmd_q;
  1134. op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
  1135. if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
  1136. /* Load the mask */
  1137. op.sb_key = cmd_q->sb_key;
  1138. ret = ccp_init_dm_workarea(&mask, cmd_q,
  1139. CCP_PASSTHRU_SB_COUNT *
  1140. CCP_SB_BYTES,
  1141. DMA_TO_DEVICE);
  1142. if (ret)
  1143. return ret;
  1144. ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
  1145. ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
  1146. CCP_PASSTHRU_BYTESWAP_NOOP);
  1147. if (ret) {
  1148. cmd->engine_error = cmd_q->cmd_error;
  1149. goto e_mask;
  1150. }
  1151. }
  1152. /* Prepare the input and output data workareas. For in-place
  1153. * operations we need to set the dma direction to BIDIRECTIONAL
  1154. * and copy the src workarea to the dst workarea.
  1155. */
  1156. if (sg_virt(pt->src) == sg_virt(pt->dst))
  1157. in_place = true;
  1158. ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
  1159. CCP_PASSTHRU_MASKSIZE,
  1160. in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  1161. if (ret)
  1162. goto e_mask;
  1163. if (in_place) {
  1164. dst = src;
  1165. } else {
  1166. ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
  1167. CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
  1168. if (ret)
  1169. goto e_src;
  1170. }
  1171. /* Send data to the CCP Passthru engine
  1172. * Because the CCP engine works on a single source and destination
  1173. * dma address at a time, each entry in the source scatterlist
  1174. * (after the dma_map_sg call) must be less than or equal to the
  1175. * (remaining) length in the destination scatterlist entry and the
  1176. * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
  1177. */
  1178. dst.sg_wa.sg_used = 0;
  1179. for (i = 1; i <= src.sg_wa.dma_count; i++) {
  1180. if (!dst.sg_wa.sg ||
  1181. (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
  1182. ret = -EINVAL;
  1183. goto e_dst;
  1184. }
  1185. if (i == src.sg_wa.dma_count) {
  1186. op.eom = 1;
  1187. op.soc = 1;
  1188. }
  1189. op.src.type = CCP_MEMTYPE_SYSTEM;
  1190. op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
  1191. op.src.u.dma.offset = 0;
  1192. op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
  1193. op.dst.type = CCP_MEMTYPE_SYSTEM;
  1194. op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
  1195. op.dst.u.dma.offset = dst.sg_wa.sg_used;
  1196. op.dst.u.dma.length = op.src.u.dma.length;
  1197. ret = cmd_q->ccp->vdata->perform->passthru(&op);
  1198. if (ret) {
  1199. cmd->engine_error = cmd_q->cmd_error;
  1200. goto e_dst;
  1201. }
  1202. dst.sg_wa.sg_used += src.sg_wa.sg->length;
  1203. if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
  1204. dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
  1205. dst.sg_wa.sg_used = 0;
  1206. }
  1207. src.sg_wa.sg = sg_next(src.sg_wa.sg);
  1208. }
  1209. e_dst:
  1210. if (!in_place)
  1211. ccp_free_data(&dst, cmd_q);
  1212. e_src:
  1213. ccp_free_data(&src, cmd_q);
  1214. e_mask:
  1215. if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
  1216. ccp_dm_free(&mask);
  1217. return ret;
  1218. }
  1219. static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
  1220. struct ccp_cmd *cmd)
  1221. {
  1222. struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
  1223. struct ccp_dm_workarea mask;
  1224. struct ccp_op op;
  1225. int ret;
  1226. if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
  1227. return -EINVAL;
  1228. if (!pt->src_dma || !pt->dst_dma)
  1229. return -EINVAL;
  1230. if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
  1231. if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
  1232. return -EINVAL;
  1233. if (!pt->mask)
  1234. return -EINVAL;
  1235. }
  1236. BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
  1237. memset(&op, 0, sizeof(op));
  1238. op.cmd_q = cmd_q;
  1239. op.jobid = ccp_gen_jobid(cmd_q->ccp);
  1240. if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
  1241. /* Load the mask */
  1242. op.sb_key = cmd_q->sb_key;
  1243. mask.length = pt->mask_len;
  1244. mask.dma.address = pt->mask;
  1245. mask.dma.length = pt->mask_len;
  1246. ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
  1247. CCP_PASSTHRU_BYTESWAP_NOOP);
  1248. if (ret) {
  1249. cmd->engine_error = cmd_q->cmd_error;
  1250. return ret;
  1251. }
  1252. }
  1253. /* Send data to the CCP Passthru engine */
  1254. op.eom = 1;
  1255. op.soc = 1;
  1256. op.src.type = CCP_MEMTYPE_SYSTEM;
  1257. op.src.u.dma.address = pt->src_dma;
  1258. op.src.u.dma.offset = 0;
  1259. op.src.u.dma.length = pt->src_len;
  1260. op.dst.type = CCP_MEMTYPE_SYSTEM;
  1261. op.dst.u.dma.address = pt->dst_dma;
  1262. op.dst.u.dma.offset = 0;
  1263. op.dst.u.dma.length = pt->src_len;
  1264. ret = cmd_q->ccp->vdata->perform->passthru(&op);
  1265. if (ret)
  1266. cmd->engine_error = cmd_q->cmd_error;
  1267. return ret;
  1268. }
  1269. static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1270. {
  1271. struct ccp_ecc_engine *ecc = &cmd->u.ecc;
  1272. struct ccp_dm_workarea src, dst;
  1273. struct ccp_op op;
  1274. int ret;
  1275. u8 *save;
  1276. if (!ecc->u.mm.operand_1 ||
  1277. (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
  1278. return -EINVAL;
  1279. if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
  1280. if (!ecc->u.mm.operand_2 ||
  1281. (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
  1282. return -EINVAL;
  1283. if (!ecc->u.mm.result ||
  1284. (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
  1285. return -EINVAL;
  1286. memset(&op, 0, sizeof(op));
  1287. op.cmd_q = cmd_q;
  1288. op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
  1289. /* Concatenate the modulus and the operands. Both the modulus and
  1290. * the operands must be in little endian format. Since the input
  1291. * is in big endian format it must be converted and placed in a
  1292. * fixed length buffer.
  1293. */
  1294. ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
  1295. DMA_TO_DEVICE);
  1296. if (ret)
  1297. return ret;
  1298. /* Save the workarea address since it is updated in order to perform
  1299. * the concatenation
  1300. */
  1301. save = src.address;
  1302. /* Copy the ECC modulus */
  1303. ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
  1304. if (ret)
  1305. goto e_src;
  1306. src.address += CCP_ECC_OPERAND_SIZE;
  1307. /* Copy the first operand */
  1308. ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
  1309. ecc->u.mm.operand_1_len);
  1310. if (ret)
  1311. goto e_src;
  1312. src.address += CCP_ECC_OPERAND_SIZE;
  1313. if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
  1314. /* Copy the second operand */
  1315. ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
  1316. ecc->u.mm.operand_2_len);
  1317. if (ret)
  1318. goto e_src;
  1319. src.address += CCP_ECC_OPERAND_SIZE;
  1320. }
  1321. /* Restore the workarea address */
  1322. src.address = save;
  1323. /* Prepare the output area for the operation */
  1324. ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
  1325. DMA_FROM_DEVICE);
  1326. if (ret)
  1327. goto e_src;
  1328. op.soc = 1;
  1329. op.src.u.dma.address = src.dma.address;
  1330. op.src.u.dma.offset = 0;
  1331. op.src.u.dma.length = src.length;
  1332. op.dst.u.dma.address = dst.dma.address;
  1333. op.dst.u.dma.offset = 0;
  1334. op.dst.u.dma.length = dst.length;
  1335. op.u.ecc.function = cmd->u.ecc.function;
  1336. ret = cmd_q->ccp->vdata->perform->ecc(&op);
  1337. if (ret) {
  1338. cmd->engine_error = cmd_q->cmd_error;
  1339. goto e_dst;
  1340. }
  1341. ecc->ecc_result = le16_to_cpup(
  1342. (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
  1343. if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
  1344. ret = -EIO;
  1345. goto e_dst;
  1346. }
  1347. /* Save the ECC result */
  1348. ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
  1349. CCP_ECC_MODULUS_BYTES);
  1350. e_dst:
  1351. ccp_dm_free(&dst);
  1352. e_src:
  1353. ccp_dm_free(&src);
  1354. return ret;
  1355. }
  1356. static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1357. {
  1358. struct ccp_ecc_engine *ecc = &cmd->u.ecc;
  1359. struct ccp_dm_workarea src, dst;
  1360. struct ccp_op op;
  1361. int ret;
  1362. u8 *save;
  1363. if (!ecc->u.pm.point_1.x ||
  1364. (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
  1365. !ecc->u.pm.point_1.y ||
  1366. (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
  1367. return -EINVAL;
  1368. if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
  1369. if (!ecc->u.pm.point_2.x ||
  1370. (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
  1371. !ecc->u.pm.point_2.y ||
  1372. (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
  1373. return -EINVAL;
  1374. } else {
  1375. if (!ecc->u.pm.domain_a ||
  1376. (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
  1377. return -EINVAL;
  1378. if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
  1379. if (!ecc->u.pm.scalar ||
  1380. (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
  1381. return -EINVAL;
  1382. }
  1383. if (!ecc->u.pm.result.x ||
  1384. (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
  1385. !ecc->u.pm.result.y ||
  1386. (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
  1387. return -EINVAL;
  1388. memset(&op, 0, sizeof(op));
  1389. op.cmd_q = cmd_q;
  1390. op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
  1391. /* Concatenate the modulus and the operands. Both the modulus and
  1392. * the operands must be in little endian format. Since the input
  1393. * is in big endian format it must be converted and placed in a
  1394. * fixed length buffer.
  1395. */
  1396. ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
  1397. DMA_TO_DEVICE);
  1398. if (ret)
  1399. return ret;
  1400. /* Save the workarea address since it is updated in order to perform
  1401. * the concatenation
  1402. */
  1403. save = src.address;
  1404. /* Copy the ECC modulus */
  1405. ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
  1406. if (ret)
  1407. goto e_src;
  1408. src.address += CCP_ECC_OPERAND_SIZE;
  1409. /* Copy the first point X and Y coordinate */
  1410. ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
  1411. ecc->u.pm.point_1.x_len);
  1412. if (ret)
  1413. goto e_src;
  1414. src.address += CCP_ECC_OPERAND_SIZE;
  1415. ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
  1416. ecc->u.pm.point_1.y_len);
  1417. if (ret)
  1418. goto e_src;
  1419. src.address += CCP_ECC_OPERAND_SIZE;
  1420. /* Set the first point Z coordinate to 1 */
  1421. *src.address = 0x01;
  1422. src.address += CCP_ECC_OPERAND_SIZE;
  1423. if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
  1424. /* Copy the second point X and Y coordinate */
  1425. ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
  1426. ecc->u.pm.point_2.x_len);
  1427. if (ret)
  1428. goto e_src;
  1429. src.address += CCP_ECC_OPERAND_SIZE;
  1430. ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
  1431. ecc->u.pm.point_2.y_len);
  1432. if (ret)
  1433. goto e_src;
  1434. src.address += CCP_ECC_OPERAND_SIZE;
  1435. /* Set the second point Z coordinate to 1 */
  1436. *src.address = 0x01;
  1437. src.address += CCP_ECC_OPERAND_SIZE;
  1438. } else {
  1439. /* Copy the Domain "a" parameter */
  1440. ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
  1441. ecc->u.pm.domain_a_len);
  1442. if (ret)
  1443. goto e_src;
  1444. src.address += CCP_ECC_OPERAND_SIZE;
  1445. if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
  1446. /* Copy the scalar value */
  1447. ret = ccp_reverse_set_dm_area(&src, 0,
  1448. ecc->u.pm.scalar, 0,
  1449. ecc->u.pm.scalar_len);
  1450. if (ret)
  1451. goto e_src;
  1452. src.address += CCP_ECC_OPERAND_SIZE;
  1453. }
  1454. }
  1455. /* Restore the workarea address */
  1456. src.address = save;
  1457. /* Prepare the output area for the operation */
  1458. ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
  1459. DMA_FROM_DEVICE);
  1460. if (ret)
  1461. goto e_src;
  1462. op.soc = 1;
  1463. op.src.u.dma.address = src.dma.address;
  1464. op.src.u.dma.offset = 0;
  1465. op.src.u.dma.length = src.length;
  1466. op.dst.u.dma.address = dst.dma.address;
  1467. op.dst.u.dma.offset = 0;
  1468. op.dst.u.dma.length = dst.length;
  1469. op.u.ecc.function = cmd->u.ecc.function;
  1470. ret = cmd_q->ccp->vdata->perform->ecc(&op);
  1471. if (ret) {
  1472. cmd->engine_error = cmd_q->cmd_error;
  1473. goto e_dst;
  1474. }
  1475. ecc->ecc_result = le16_to_cpup(
  1476. (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
  1477. if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
  1478. ret = -EIO;
  1479. goto e_dst;
  1480. }
  1481. /* Save the workarea address since it is updated as we walk through
  1482. * to copy the point math result
  1483. */
  1484. save = dst.address;
  1485. /* Save the ECC result X and Y coordinates */
  1486. ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
  1487. CCP_ECC_MODULUS_BYTES);
  1488. dst.address += CCP_ECC_OUTPUT_SIZE;
  1489. ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
  1490. CCP_ECC_MODULUS_BYTES);
  1491. dst.address += CCP_ECC_OUTPUT_SIZE;
  1492. /* Restore the workarea address */
  1493. dst.address = save;
  1494. e_dst:
  1495. ccp_dm_free(&dst);
  1496. e_src:
  1497. ccp_dm_free(&src);
  1498. return ret;
  1499. }
  1500. static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1501. {
  1502. struct ccp_ecc_engine *ecc = &cmd->u.ecc;
  1503. ecc->ecc_result = 0;
  1504. if (!ecc->mod ||
  1505. (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
  1506. return -EINVAL;
  1507. switch (ecc->function) {
  1508. case CCP_ECC_FUNCTION_MMUL_384BIT:
  1509. case CCP_ECC_FUNCTION_MADD_384BIT:
  1510. case CCP_ECC_FUNCTION_MINV_384BIT:
  1511. return ccp_run_ecc_mm_cmd(cmd_q, cmd);
  1512. case CCP_ECC_FUNCTION_PADD_384BIT:
  1513. case CCP_ECC_FUNCTION_PMUL_384BIT:
  1514. case CCP_ECC_FUNCTION_PDBL_384BIT:
  1515. return ccp_run_ecc_pm_cmd(cmd_q, cmd);
  1516. default:
  1517. return -EINVAL;
  1518. }
  1519. }
  1520. int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1521. {
  1522. int ret;
  1523. cmd->engine_error = 0;
  1524. cmd_q->cmd_error = 0;
  1525. cmd_q->int_rcvd = 0;
  1526. cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
  1527. switch (cmd->engine) {
  1528. case CCP_ENGINE_AES:
  1529. ret = ccp_run_aes_cmd(cmd_q, cmd);
  1530. break;
  1531. case CCP_ENGINE_XTS_AES_128:
  1532. ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
  1533. break;
  1534. case CCP_ENGINE_SHA:
  1535. ret = ccp_run_sha_cmd(cmd_q, cmd);
  1536. break;
  1537. case CCP_ENGINE_RSA:
  1538. ret = ccp_run_rsa_cmd(cmd_q, cmd);
  1539. break;
  1540. case CCP_ENGINE_PASSTHRU:
  1541. if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
  1542. ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
  1543. else
  1544. ret = ccp_run_passthru_cmd(cmd_q, cmd);
  1545. break;
  1546. case CCP_ENGINE_ECC:
  1547. ret = ccp_run_ecc_cmd(cmd_q, cmd);
  1548. break;
  1549. default:
  1550. ret = -EINVAL;
  1551. }
  1552. return ret;
  1553. }