picoxcell_crypto.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861
  1. /*
  2. * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #include <crypto/aead.h>
  19. #include <crypto/aes.h>
  20. #include <crypto/algapi.h>
  21. #include <crypto/authenc.h>
  22. #include <crypto/des.h>
  23. #include <crypto/md5.h>
  24. #include <crypto/sha.h>
  25. #include <crypto/internal/skcipher.h>
  26. #include <linux/clk.h>
  27. #include <linux/crypto.h>
  28. #include <linux/delay.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/dmapool.h>
  31. #include <linux/err.h>
  32. #include <linux/init.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/io.h>
  35. #include <linux/list.h>
  36. #include <linux/module.h>
  37. #include <linux/of.h>
  38. #include <linux/platform_device.h>
  39. #include <linux/pm.h>
  40. #include <linux/rtnetlink.h>
  41. #include <linux/scatterlist.h>
  42. #include <linux/sched.h>
  43. #include <linux/slab.h>
  44. #include <linux/timer.h>
  45. #include "picoxcell_crypto_regs.h"
  46. /*
  47. * The threshold for the number of entries in the CMD FIFO available before
  48. * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
  49. * number of interrupts raised to the CPU.
  50. */
  51. #define CMD0_IRQ_THRESHOLD 1
  52. /*
  53. * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
  54. * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
  55. * When there are packets in flight but lower than the threshold, we enable
  56. * the timer and at expiry, attempt to remove any processed packets from the
  57. * queue and if there are still packets left, schedule the timer again.
  58. */
  59. #define PACKET_TIMEOUT 1
  60. /* The priority to register each algorithm with. */
  61. #define SPACC_CRYPTO_ALG_PRIORITY 10000
  62. #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16
  63. #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
  64. #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64
  65. #define SPACC_CRYPTO_IPSEC_MAX_CTXS 32
  66. #define SPACC_CRYPTO_IPSEC_FIFO_SZ 32
  67. #define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64
  68. #define SPACC_CRYPTO_L2_HASH_PG_SZ 64
  69. #define SPACC_CRYPTO_L2_MAX_CTXS 128
  70. #define SPACC_CRYPTO_L2_FIFO_SZ 128
  71. #define MAX_DDT_LEN 16
  72. /* DDT format. This must match the hardware DDT format exactly. */
  73. struct spacc_ddt {
  74. dma_addr_t p;
  75. u32 len;
  76. };
  77. /*
  78. * Asynchronous crypto request structure.
  79. *
  80. * This structure defines a request that is either queued for processing or
  81. * being processed.
  82. */
  83. struct spacc_req {
  84. struct list_head list;
  85. struct spacc_engine *engine;
  86. struct crypto_async_request *req;
  87. int result;
  88. bool is_encrypt;
  89. unsigned ctx_id;
  90. dma_addr_t src_addr, dst_addr;
  91. struct spacc_ddt *src_ddt, *dst_ddt;
  92. void (*complete)(struct spacc_req *req);
  93. /* AEAD specific bits. */
  94. u8 *giv;
  95. size_t giv_len;
  96. dma_addr_t giv_pa;
  97. };
  98. struct spacc_engine {
  99. void __iomem *regs;
  100. struct list_head pending;
  101. int next_ctx;
  102. spinlock_t hw_lock;
  103. int in_flight;
  104. struct list_head completed;
  105. struct list_head in_progress;
  106. struct tasklet_struct complete;
  107. unsigned long fifo_sz;
  108. void __iomem *cipher_ctx_base;
  109. void __iomem *hash_key_base;
  110. struct spacc_alg *algs;
  111. unsigned num_algs;
  112. struct list_head registered_algs;
  113. size_t cipher_pg_sz;
  114. size_t hash_pg_sz;
  115. const char *name;
  116. struct clk *clk;
  117. struct device *dev;
  118. unsigned max_ctxs;
  119. struct timer_list packet_timeout;
  120. unsigned stat_irq_thresh;
  121. struct dma_pool *req_pool;
  122. };
  123. /* Algorithm type mask. */
  124. #define SPACC_CRYPTO_ALG_MASK 0x7
  125. /* SPACC definition of a crypto algorithm. */
  126. struct spacc_alg {
  127. unsigned long ctrl_default;
  128. unsigned long type;
  129. struct crypto_alg alg;
  130. struct spacc_engine *engine;
  131. struct list_head entry;
  132. int key_offs;
  133. int iv_offs;
  134. };
  135. /* Generic context structure for any algorithm type. */
  136. struct spacc_generic_ctx {
  137. struct spacc_engine *engine;
  138. int flags;
  139. int key_offs;
  140. int iv_offs;
  141. };
  142. /* Block cipher context. */
  143. struct spacc_ablk_ctx {
  144. struct spacc_generic_ctx generic;
  145. u8 key[AES_MAX_KEY_SIZE];
  146. u8 key_len;
  147. /*
  148. * The fallback cipher. If the operation can't be done in hardware,
  149. * fallback to a software version.
  150. */
  151. struct crypto_ablkcipher *sw_cipher;
  152. };
  153. /* AEAD cipher context. */
  154. struct spacc_aead_ctx {
  155. struct spacc_generic_ctx generic;
  156. u8 cipher_key[AES_MAX_KEY_SIZE];
  157. u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
  158. u8 cipher_key_len;
  159. u8 hash_key_len;
  160. struct crypto_aead *sw_cipher;
  161. size_t auth_size;
  162. u8 salt[AES_BLOCK_SIZE];
  163. };
  164. static int spacc_ablk_submit(struct spacc_req *req);
  165. static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
  166. {
  167. return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
  168. }
  169. static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
  170. {
  171. u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
  172. return fifo_stat & SPA_FIFO_CMD_FULL;
  173. }
  174. /*
  175. * Given a cipher context, and a context number, get the base address of the
  176. * context page.
  177. *
  178. * Returns the address of the context page where the key/context may
  179. * be written.
  180. */
  181. static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
  182. unsigned indx,
  183. bool is_cipher_ctx)
  184. {
  185. return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
  186. (indx * ctx->engine->cipher_pg_sz) :
  187. ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
  188. }
  189. /* The context pages can only be written with 32-bit accesses. */
  190. static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
  191. unsigned count)
  192. {
  193. const u32 *src32 = (const u32 *) src;
  194. while (count--)
  195. writel(*src32++, dst++);
  196. }
  197. static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
  198. void __iomem *page_addr, const u8 *key,
  199. size_t key_len, const u8 *iv, size_t iv_len)
  200. {
  201. void __iomem *key_ptr = page_addr + ctx->key_offs;
  202. void __iomem *iv_ptr = page_addr + ctx->iv_offs;
  203. memcpy_toio32(key_ptr, key, key_len / 4);
  204. memcpy_toio32(iv_ptr, iv, iv_len / 4);
  205. }
  206. /*
  207. * Load a context into the engines context memory.
  208. *
  209. * Returns the index of the context page where the context was loaded.
  210. */
  211. static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
  212. const u8 *ciph_key, size_t ciph_len,
  213. const u8 *iv, size_t ivlen, const u8 *hash_key,
  214. size_t hash_len)
  215. {
  216. unsigned indx = ctx->engine->next_ctx++;
  217. void __iomem *ciph_page_addr, *hash_page_addr;
  218. ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
  219. hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
  220. ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
  221. spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
  222. ivlen);
  223. writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
  224. (1 << SPA_KEY_SZ_CIPHER_OFFSET),
  225. ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
  226. if (hash_key) {
  227. memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
  228. writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
  229. ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
  230. }
  231. return indx;
  232. }
  233. /* Count the number of scatterlist entries in a scatterlist. */
  234. static int sg_count(struct scatterlist *sg_list, int nbytes)
  235. {
  236. struct scatterlist *sg = sg_list;
  237. int sg_nents = 0;
  238. while (nbytes > 0) {
  239. ++sg_nents;
  240. nbytes -= sg->length;
  241. sg = sg_next(sg);
  242. }
  243. return sg_nents;
  244. }
  245. static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
  246. {
  247. ddt->p = phys;
  248. ddt->len = len;
  249. }
  250. /*
  251. * Take a crypto request and scatterlists for the data and turn them into DDTs
  252. * for passing to the crypto engines. This also DMA maps the data so that the
  253. * crypto engines can DMA to/from them.
  254. */
  255. static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
  256. struct scatterlist *payload,
  257. unsigned nbytes,
  258. enum dma_data_direction dir,
  259. dma_addr_t *ddt_phys)
  260. {
  261. unsigned nents, mapped_ents;
  262. struct scatterlist *cur;
  263. struct spacc_ddt *ddt;
  264. int i;
  265. nents = sg_count(payload, nbytes);
  266. mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
  267. if (mapped_ents + 1 > MAX_DDT_LEN)
  268. goto out;
  269. ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
  270. if (!ddt)
  271. goto out;
  272. for_each_sg(payload, cur, mapped_ents, i)
  273. ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
  274. ddt_set(&ddt[mapped_ents], 0, 0);
  275. return ddt;
  276. out:
  277. dma_unmap_sg(engine->dev, payload, nents, dir);
  278. return NULL;
  279. }
  280. static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
  281. {
  282. struct aead_request *areq = container_of(req->req, struct aead_request,
  283. base);
  284. struct spacc_engine *engine = req->engine;
  285. struct spacc_ddt *src_ddt, *dst_ddt;
  286. unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
  287. unsigned nents = sg_count(areq->src, areq->cryptlen);
  288. dma_addr_t iv_addr;
  289. struct scatterlist *cur;
  290. int i, dst_ents, src_ents, assoc_ents;
  291. u8 *iv = giv ? giv : areq->iv;
  292. src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
  293. if (!src_ddt)
  294. return -ENOMEM;
  295. dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
  296. if (!dst_ddt) {
  297. dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
  298. return -ENOMEM;
  299. }
  300. req->src_ddt = src_ddt;
  301. req->dst_ddt = dst_ddt;
  302. assoc_ents = dma_map_sg(engine->dev, areq->assoc,
  303. sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
  304. if (areq->src != areq->dst) {
  305. src_ents = dma_map_sg(engine->dev, areq->src, nents,
  306. DMA_TO_DEVICE);
  307. dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
  308. DMA_FROM_DEVICE);
  309. } else {
  310. src_ents = dma_map_sg(engine->dev, areq->src, nents,
  311. DMA_BIDIRECTIONAL);
  312. dst_ents = 0;
  313. }
  314. /*
  315. * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
  316. * formed by the crypto block and sent as the ESP IV for IPSEC.
  317. */
  318. iv_addr = dma_map_single(engine->dev, iv, ivsize,
  319. giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  320. req->giv_pa = iv_addr;
  321. /*
  322. * Map the associated data. For decryption we don't copy the
  323. * associated data.
  324. */
  325. for_each_sg(areq->assoc, cur, assoc_ents, i) {
  326. ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
  327. if (req->is_encrypt)
  328. ddt_set(dst_ddt++, sg_dma_address(cur),
  329. sg_dma_len(cur));
  330. }
  331. ddt_set(src_ddt++, iv_addr, ivsize);
  332. if (giv || req->is_encrypt)
  333. ddt_set(dst_ddt++, iv_addr, ivsize);
  334. /*
  335. * Now map in the payload for the source and destination and terminate
  336. * with the NULL pointers.
  337. */
  338. for_each_sg(areq->src, cur, src_ents, i) {
  339. ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
  340. if (areq->src == areq->dst)
  341. ddt_set(dst_ddt++, sg_dma_address(cur),
  342. sg_dma_len(cur));
  343. }
  344. for_each_sg(areq->dst, cur, dst_ents, i)
  345. ddt_set(dst_ddt++, sg_dma_address(cur),
  346. sg_dma_len(cur));
  347. ddt_set(src_ddt, 0, 0);
  348. ddt_set(dst_ddt, 0, 0);
  349. return 0;
  350. }
  351. static void spacc_aead_free_ddts(struct spacc_req *req)
  352. {
  353. struct aead_request *areq = container_of(req->req, struct aead_request,
  354. base);
  355. struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
  356. struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
  357. struct spacc_engine *engine = aead_ctx->generic.engine;
  358. unsigned ivsize = alg->alg.cra_aead.ivsize;
  359. unsigned nents = sg_count(areq->src, areq->cryptlen);
  360. if (areq->src != areq->dst) {
  361. dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
  362. dma_unmap_sg(engine->dev, areq->dst,
  363. sg_count(areq->dst, areq->cryptlen),
  364. DMA_FROM_DEVICE);
  365. } else
  366. dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
  367. dma_unmap_sg(engine->dev, areq->assoc,
  368. sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
  369. dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
  370. dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
  371. dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
  372. }
  373. static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
  374. dma_addr_t ddt_addr, struct scatterlist *payload,
  375. unsigned nbytes, enum dma_data_direction dir)
  376. {
  377. unsigned nents = sg_count(payload, nbytes);
  378. dma_unmap_sg(req->engine->dev, payload, nents, dir);
  379. dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
  380. }
  381. /*
  382. * Set key for a DES operation in an AEAD cipher. This also performs weak key
  383. * checking if required.
  384. */
  385. static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
  386. unsigned int len)
  387. {
  388. struct crypto_tfm *tfm = crypto_aead_tfm(aead);
  389. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  390. u32 tmp[DES_EXPKEY_WORDS];
  391. if (unlikely(!des_ekey(tmp, key)) &&
  392. (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
  393. tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
  394. return -EINVAL;
  395. }
  396. memcpy(ctx->cipher_key, key, len);
  397. ctx->cipher_key_len = len;
  398. return 0;
  399. }
  400. /* Set the key for the AES block cipher component of the AEAD transform. */
  401. static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
  402. unsigned int len)
  403. {
  404. struct crypto_tfm *tfm = crypto_aead_tfm(aead);
  405. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  406. /*
  407. * IPSec engine only supports 128 and 256 bit AES keys. If we get a
  408. * request for any other size (192 bits) then we need to do a software
  409. * fallback.
  410. */
  411. if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
  412. /*
  413. * Set the fallback transform to use the same request flags as
  414. * the hardware transform.
  415. */
  416. ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  417. ctx->sw_cipher->base.crt_flags |=
  418. tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
  419. return crypto_aead_setkey(ctx->sw_cipher, key, len);
  420. }
  421. memcpy(ctx->cipher_key, key, len);
  422. ctx->cipher_key_len = len;
  423. return 0;
  424. }
  425. static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
  426. unsigned int keylen)
  427. {
  428. struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  429. struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
  430. struct crypto_authenc_keys keys;
  431. int err = -EINVAL;
  432. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  433. goto badkey;
  434. if (keys.enckeylen > AES_MAX_KEY_SIZE)
  435. goto badkey;
  436. if (keys.authkeylen > sizeof(ctx->hash_ctx))
  437. goto badkey;
  438. if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
  439. SPA_CTRL_CIPH_ALG_AES)
  440. err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
  441. else
  442. err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
  443. if (err)
  444. goto badkey;
  445. memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
  446. ctx->hash_key_len = keys.authkeylen;
  447. return 0;
  448. badkey:
  449. crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  450. return -EINVAL;
  451. }
  452. static int spacc_aead_setauthsize(struct crypto_aead *tfm,
  453. unsigned int authsize)
  454. {
  455. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
  456. ctx->auth_size = authsize;
  457. return 0;
  458. }
  459. /*
  460. * Check if an AEAD request requires a fallback operation. Some requests can't
  461. * be completed in hardware because the hardware may not support certain key
  462. * sizes. In these cases we need to complete the request in software.
  463. */
  464. static int spacc_aead_need_fallback(struct spacc_req *req)
  465. {
  466. struct aead_request *aead_req;
  467. struct crypto_tfm *tfm = req->req->tfm;
  468. struct crypto_alg *alg = req->req->tfm->__crt_alg;
  469. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  470. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  471. aead_req = container_of(req->req, struct aead_request, base);
  472. /*
  473. * If we have a non-supported key-length, then we need to do a
  474. * software fallback.
  475. */
  476. if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
  477. SPA_CTRL_CIPH_ALG_AES &&
  478. ctx->cipher_key_len != AES_KEYSIZE_128 &&
  479. ctx->cipher_key_len != AES_KEYSIZE_256)
  480. return 1;
  481. return 0;
  482. }
  483. static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
  484. bool is_encrypt)
  485. {
  486. struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
  487. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
  488. int err;
  489. if (ctx->sw_cipher) {
  490. /*
  491. * Change the request to use the software fallback transform,
  492. * and once the ciphering has completed, put the old transform
  493. * back into the request.
  494. */
  495. aead_request_set_tfm(req, ctx->sw_cipher);
  496. err = is_encrypt ? crypto_aead_encrypt(req) :
  497. crypto_aead_decrypt(req);
  498. aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
  499. } else
  500. err = -EINVAL;
  501. return err;
  502. }
  503. static void spacc_aead_complete(struct spacc_req *req)
  504. {
  505. spacc_aead_free_ddts(req);
  506. req->req->complete(req->req, req->result);
  507. }
  508. static int spacc_aead_submit(struct spacc_req *req)
  509. {
  510. struct crypto_tfm *tfm = req->req->tfm;
  511. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  512. struct crypto_alg *alg = req->req->tfm->__crt_alg;
  513. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  514. struct spacc_engine *engine = ctx->generic.engine;
  515. u32 ctrl, proc_len, assoc_len;
  516. struct aead_request *aead_req =
  517. container_of(req->req, struct aead_request, base);
  518. req->result = -EINPROGRESS;
  519. req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
  520. ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
  521. ctx->hash_ctx, ctx->hash_key_len);
  522. /* Set the source and destination DDT pointers. */
  523. writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
  524. writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
  525. writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
  526. assoc_len = aead_req->assoclen;
  527. proc_len = aead_req->cryptlen + assoc_len;
  528. /*
  529. * If we aren't generating an IV, then we need to include the IV in the
  530. * associated data so that it is included in the hash.
  531. */
  532. if (!req->giv) {
  533. assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
  534. proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
  535. } else
  536. proc_len += req->giv_len;
  537. /*
  538. * If we are decrypting, we need to take the length of the ICV out of
  539. * the processing length.
  540. */
  541. if (!req->is_encrypt)
  542. proc_len -= ctx->auth_size;
  543. writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
  544. writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
  545. writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
  546. writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
  547. writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
  548. ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
  549. (1 << SPA_CTRL_ICV_APPEND);
  550. if (req->is_encrypt)
  551. ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
  552. else
  553. ctrl |= (1 << SPA_CTRL_KEY_EXP);
  554. mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
  555. writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
  556. return -EINPROGRESS;
  557. }
  558. static int spacc_req_submit(struct spacc_req *req);
  559. static void spacc_push(struct spacc_engine *engine)
  560. {
  561. struct spacc_req *req;
  562. while (!list_empty(&engine->pending) &&
  563. engine->in_flight + 1 <= engine->fifo_sz) {
  564. ++engine->in_flight;
  565. req = list_first_entry(&engine->pending, struct spacc_req,
  566. list);
  567. list_move_tail(&req->list, &engine->in_progress);
  568. req->result = spacc_req_submit(req);
  569. }
  570. }
  571. /*
  572. * Setup an AEAD request for processing. This will configure the engine, load
  573. * the context and then start the packet processing.
  574. *
  575. * @giv Pointer to destination address for a generated IV. If the
  576. * request does not need to generate an IV then this should be set to NULL.
  577. */
  578. static int spacc_aead_setup(struct aead_request *req, u8 *giv,
  579. unsigned alg_type, bool is_encrypt)
  580. {
  581. struct crypto_alg *alg = req->base.tfm->__crt_alg;
  582. struct spacc_engine *engine = to_spacc_alg(alg)->engine;
  583. struct spacc_req *dev_req = aead_request_ctx(req);
  584. int err = -EINPROGRESS;
  585. unsigned long flags;
  586. unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
  587. dev_req->giv = giv;
  588. dev_req->giv_len = ivsize;
  589. dev_req->req = &req->base;
  590. dev_req->is_encrypt = is_encrypt;
  591. dev_req->result = -EBUSY;
  592. dev_req->engine = engine;
  593. dev_req->complete = spacc_aead_complete;
  594. if (unlikely(spacc_aead_need_fallback(dev_req)))
  595. return spacc_aead_do_fallback(req, alg_type, is_encrypt);
  596. spacc_aead_make_ddts(dev_req, dev_req->giv);
  597. err = -EINPROGRESS;
  598. spin_lock_irqsave(&engine->hw_lock, flags);
  599. if (unlikely(spacc_fifo_cmd_full(engine)) ||
  600. engine->in_flight + 1 > engine->fifo_sz) {
  601. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  602. err = -EBUSY;
  603. spin_unlock_irqrestore(&engine->hw_lock, flags);
  604. goto out_free_ddts;
  605. }
  606. list_add_tail(&dev_req->list, &engine->pending);
  607. } else {
  608. list_add_tail(&dev_req->list, &engine->pending);
  609. spacc_push(engine);
  610. }
  611. spin_unlock_irqrestore(&engine->hw_lock, flags);
  612. goto out;
  613. out_free_ddts:
  614. spacc_aead_free_ddts(dev_req);
  615. out:
  616. return err;
  617. }
  618. static int spacc_aead_encrypt(struct aead_request *req)
  619. {
  620. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  621. struct crypto_tfm *tfm = crypto_aead_tfm(aead);
  622. struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
  623. return spacc_aead_setup(req, NULL, alg->type, 1);
  624. }
  625. static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
  626. {
  627. struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
  628. struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  629. size_t ivsize = crypto_aead_ivsize(tfm);
  630. struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
  631. unsigned len;
  632. __be64 seq;
  633. memcpy(req->areq.iv, ctx->salt, ivsize);
  634. len = ivsize;
  635. if (ivsize > sizeof(u64)) {
  636. memset(req->giv, 0, ivsize - sizeof(u64));
  637. len = sizeof(u64);
  638. }
  639. seq = cpu_to_be64(req->seq);
  640. memcpy(req->giv + ivsize - len, &seq, len);
  641. return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
  642. }
  643. static int spacc_aead_decrypt(struct aead_request *req)
  644. {
  645. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  646. struct crypto_tfm *tfm = crypto_aead_tfm(aead);
  647. struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
  648. return spacc_aead_setup(req, NULL, alg->type, 0);
  649. }
  650. /*
  651. * Initialise a new AEAD context. This is responsible for allocating the
  652. * fallback cipher and initialising the context.
  653. */
  654. static int spacc_aead_cra_init(struct crypto_tfm *tfm)
  655. {
  656. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  657. struct crypto_alg *alg = tfm->__crt_alg;
  658. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  659. struct spacc_engine *engine = spacc_alg->engine;
  660. ctx->generic.flags = spacc_alg->type;
  661. ctx->generic.engine = engine;
  662. ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
  663. CRYPTO_ALG_ASYNC |
  664. CRYPTO_ALG_NEED_FALLBACK);
  665. if (IS_ERR(ctx->sw_cipher)) {
  666. dev_warn(engine->dev, "failed to allocate fallback for %s\n",
  667. alg->cra_name);
  668. ctx->sw_cipher = NULL;
  669. }
  670. ctx->generic.key_offs = spacc_alg->key_offs;
  671. ctx->generic.iv_offs = spacc_alg->iv_offs;
  672. get_random_bytes(ctx->salt, sizeof(ctx->salt));
  673. tfm->crt_aead.reqsize = sizeof(struct spacc_req);
  674. return 0;
  675. }
  676. /*
  677. * Destructor for an AEAD context. This is called when the transform is freed
  678. * and must free the fallback cipher.
  679. */
  680. static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
  681. {
  682. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  683. if (ctx->sw_cipher)
  684. crypto_free_aead(ctx->sw_cipher);
  685. ctx->sw_cipher = NULL;
  686. }
  687. /*
  688. * Set the DES key for a block cipher transform. This also performs weak key
  689. * checking if the transform has requested it.
  690. */
  691. static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  692. unsigned int len)
  693. {
  694. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  695. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  696. u32 tmp[DES_EXPKEY_WORDS];
  697. if (len > DES3_EDE_KEY_SIZE) {
  698. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  699. return -EINVAL;
  700. }
  701. if (unlikely(!des_ekey(tmp, key)) &&
  702. (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
  703. tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
  704. return -EINVAL;
  705. }
  706. memcpy(ctx->key, key, len);
  707. ctx->key_len = len;
  708. return 0;
  709. }
  710. /*
  711. * Set the key for an AES block cipher. Some key lengths are not supported in
  712. * hardware so this must also check whether a fallback is needed.
  713. */
  714. static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  715. unsigned int len)
  716. {
  717. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  718. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  719. int err = 0;
  720. if (len > AES_MAX_KEY_SIZE) {
  721. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  722. return -EINVAL;
  723. }
  724. /*
  725. * IPSec engine only supports 128 and 256 bit AES keys. If we get a
  726. * request for any other size (192 bits) then we need to do a software
  727. * fallback.
  728. */
  729. if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
  730. ctx->sw_cipher) {
  731. /*
  732. * Set the fallback transform to use the same request flags as
  733. * the hardware transform.
  734. */
  735. ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  736. ctx->sw_cipher->base.crt_flags |=
  737. cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;
  738. err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
  739. if (err)
  740. goto sw_setkey_failed;
  741. } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
  742. !ctx->sw_cipher)
  743. err = -EINVAL;
  744. memcpy(ctx->key, key, len);
  745. ctx->key_len = len;
  746. sw_setkey_failed:
  747. if (err && ctx->sw_cipher) {
  748. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  749. tfm->crt_flags |=
  750. ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
  751. }
  752. return err;
  753. }
  754. static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
  755. const u8 *key, unsigned int len)
  756. {
  757. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  758. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  759. int err = 0;
  760. if (len > AES_MAX_KEY_SIZE) {
  761. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  762. err = -EINVAL;
  763. goto out;
  764. }
  765. memcpy(ctx->key, key, len);
  766. ctx->key_len = len;
  767. out:
  768. return err;
  769. }
  770. static int spacc_ablk_need_fallback(struct spacc_req *req)
  771. {
  772. struct spacc_ablk_ctx *ctx;
  773. struct crypto_tfm *tfm = req->req->tfm;
  774. struct crypto_alg *alg = req->req->tfm->__crt_alg;
  775. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  776. ctx = crypto_tfm_ctx(tfm);
  777. return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
  778. SPA_CTRL_CIPH_ALG_AES &&
  779. ctx->key_len != AES_KEYSIZE_128 &&
  780. ctx->key_len != AES_KEYSIZE_256;
  781. }
  782. static void spacc_ablk_complete(struct spacc_req *req)
  783. {
  784. struct ablkcipher_request *ablk_req =
  785. container_of(req->req, struct ablkcipher_request, base);
  786. if (ablk_req->src != ablk_req->dst) {
  787. spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
  788. ablk_req->nbytes, DMA_TO_DEVICE);
  789. spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
  790. ablk_req->nbytes, DMA_FROM_DEVICE);
  791. } else
  792. spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
  793. ablk_req->nbytes, DMA_BIDIRECTIONAL);
  794. req->req->complete(req->req, req->result);
  795. }
  796. static int spacc_ablk_submit(struct spacc_req *req)
  797. {
  798. struct crypto_tfm *tfm = req->req->tfm;
  799. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  800. struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
  801. struct crypto_alg *alg = req->req->tfm->__crt_alg;
  802. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  803. struct spacc_engine *engine = ctx->generic.engine;
  804. u32 ctrl;
  805. req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
  806. ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
  807. NULL, 0);
  808. writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
  809. writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
  810. writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
  811. writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
  812. writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
  813. writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
  814. writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
  815. ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
  816. (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
  817. (1 << SPA_CTRL_KEY_EXP));
  818. mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
  819. writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
  820. return -EINPROGRESS;
  821. }
  822. static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
  823. unsigned alg_type, bool is_encrypt)
  824. {
  825. struct crypto_tfm *old_tfm =
  826. crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
  827. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
  828. int err;
  829. if (!ctx->sw_cipher)
  830. return -EINVAL;
  831. /*
  832. * Change the request to use the software fallback transform, and once
  833. * the ciphering has completed, put the old transform back into the
  834. * request.
  835. */
  836. ablkcipher_request_set_tfm(req, ctx->sw_cipher);
  837. err = is_encrypt ? crypto_ablkcipher_encrypt(req) :
  838. crypto_ablkcipher_decrypt(req);
  839. ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));
  840. return err;
  841. }
  842. static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
  843. bool is_encrypt)
  844. {
  845. struct crypto_alg *alg = req->base.tfm->__crt_alg;
  846. struct spacc_engine *engine = to_spacc_alg(alg)->engine;
  847. struct spacc_req *dev_req = ablkcipher_request_ctx(req);
  848. unsigned long flags;
  849. int err = -ENOMEM;
  850. dev_req->req = &req->base;
  851. dev_req->is_encrypt = is_encrypt;
  852. dev_req->engine = engine;
  853. dev_req->complete = spacc_ablk_complete;
  854. dev_req->result = -EINPROGRESS;
  855. if (unlikely(spacc_ablk_need_fallback(dev_req)))
  856. return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
  857. /*
  858. * Create the DDT's for the engine. If we share the same source and
  859. * destination then we can optimize by reusing the DDT's.
  860. */
  861. if (req->src != req->dst) {
  862. dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
  863. req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
  864. if (!dev_req->src_ddt)
  865. goto out;
  866. dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
  867. req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
  868. if (!dev_req->dst_ddt)
  869. goto out_free_src;
  870. } else {
  871. dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
  872. req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
  873. if (!dev_req->dst_ddt)
  874. goto out;
  875. dev_req->src_ddt = NULL;
  876. dev_req->src_addr = dev_req->dst_addr;
  877. }
  878. err = -EINPROGRESS;
  879. spin_lock_irqsave(&engine->hw_lock, flags);
  880. /*
  881. * Check if the engine will accept the operation now. If it won't then
  882. * we either stick it on the end of a pending list if we can backlog,
  883. * or bailout with an error if not.
  884. */
  885. if (unlikely(spacc_fifo_cmd_full(engine)) ||
  886. engine->in_flight + 1 > engine->fifo_sz) {
  887. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  888. err = -EBUSY;
  889. spin_unlock_irqrestore(&engine->hw_lock, flags);
  890. goto out_free_ddts;
  891. }
  892. list_add_tail(&dev_req->list, &engine->pending);
  893. } else {
  894. list_add_tail(&dev_req->list, &engine->pending);
  895. spacc_push(engine);
  896. }
  897. spin_unlock_irqrestore(&engine->hw_lock, flags);
  898. goto out;
  899. out_free_ddts:
  900. spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
  901. req->nbytes, req->src == req->dst ?
  902. DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
  903. out_free_src:
  904. if (req->src != req->dst)
  905. spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
  906. req->src, req->nbytes, DMA_TO_DEVICE);
  907. out:
  908. return err;
  909. }
  910. static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
  911. {
  912. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  913. struct crypto_alg *alg = tfm->__crt_alg;
  914. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  915. struct spacc_engine *engine = spacc_alg->engine;
  916. ctx->generic.flags = spacc_alg->type;
  917. ctx->generic.engine = engine;
  918. if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
  919. ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,
  920. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  921. if (IS_ERR(ctx->sw_cipher)) {
  922. dev_warn(engine->dev, "failed to allocate fallback for %s\n",
  923. alg->cra_name);
  924. ctx->sw_cipher = NULL;
  925. }
  926. }
  927. ctx->generic.key_offs = spacc_alg->key_offs;
  928. ctx->generic.iv_offs = spacc_alg->iv_offs;
  929. tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
  930. return 0;
  931. }
  932. static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
  933. {
  934. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  935. if (ctx->sw_cipher)
  936. crypto_free_ablkcipher(ctx->sw_cipher);
  937. ctx->sw_cipher = NULL;
  938. }
  939. static int spacc_ablk_encrypt(struct ablkcipher_request *req)
  940. {
  941. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
  942. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  943. struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
  944. return spacc_ablk_setup(req, alg->type, 1);
  945. }
  946. static int spacc_ablk_decrypt(struct ablkcipher_request *req)
  947. {
  948. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
  949. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  950. struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
  951. return spacc_ablk_setup(req, alg->type, 0);
  952. }
  953. static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
  954. {
  955. return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
  956. SPA_FIFO_STAT_EMPTY;
  957. }
  958. static void spacc_process_done(struct spacc_engine *engine)
  959. {
  960. struct spacc_req *req;
  961. unsigned long flags;
  962. spin_lock_irqsave(&engine->hw_lock, flags);
  963. while (!spacc_fifo_stat_empty(engine)) {
  964. req = list_first_entry(&engine->in_progress, struct spacc_req,
  965. list);
  966. list_move_tail(&req->list, &engine->completed);
  967. --engine->in_flight;
  968. /* POP the status register. */
  969. writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
  970. req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
  971. SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
  972. /*
  973. * Convert the SPAcc error status into the standard POSIX error
  974. * codes.
  975. */
  976. if (unlikely(req->result)) {
  977. switch (req->result) {
  978. case SPA_STATUS_ICV_FAIL:
  979. req->result = -EBADMSG;
  980. break;
  981. case SPA_STATUS_MEMORY_ERROR:
  982. dev_warn(engine->dev,
  983. "memory error triggered\n");
  984. req->result = -EFAULT;
  985. break;
  986. case SPA_STATUS_BLOCK_ERROR:
  987. dev_warn(engine->dev,
  988. "block error triggered\n");
  989. req->result = -EIO;
  990. break;
  991. }
  992. }
  993. }
  994. tasklet_schedule(&engine->complete);
  995. spin_unlock_irqrestore(&engine->hw_lock, flags);
  996. }
  997. static irqreturn_t spacc_spacc_irq(int irq, void *dev)
  998. {
  999. struct spacc_engine *engine = (struct spacc_engine *)dev;
  1000. u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
  1001. writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
  1002. spacc_process_done(engine);
  1003. return IRQ_HANDLED;
  1004. }
  1005. static void spacc_packet_timeout(unsigned long data)
  1006. {
  1007. struct spacc_engine *engine = (struct spacc_engine *)data;
  1008. spacc_process_done(engine);
  1009. }
  1010. static int spacc_req_submit(struct spacc_req *req)
  1011. {
  1012. struct crypto_alg *alg = req->req->tfm->__crt_alg;
  1013. if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
  1014. return spacc_aead_submit(req);
  1015. else
  1016. return spacc_ablk_submit(req);
  1017. }
  1018. static void spacc_spacc_complete(unsigned long data)
  1019. {
  1020. struct spacc_engine *engine = (struct spacc_engine *)data;
  1021. struct spacc_req *req, *tmp;
  1022. unsigned long flags;
  1023. LIST_HEAD(completed);
  1024. spin_lock_irqsave(&engine->hw_lock, flags);
  1025. list_splice_init(&engine->completed, &completed);
  1026. spacc_push(engine);
  1027. if (engine->in_flight)
  1028. mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
  1029. spin_unlock_irqrestore(&engine->hw_lock, flags);
  1030. list_for_each_entry_safe(req, tmp, &completed, list) {
  1031. list_del(&req->list);
  1032. req->complete(req);
  1033. }
  1034. }
  1035. #ifdef CONFIG_PM
  1036. static int spacc_suspend(struct device *dev)
  1037. {
  1038. struct platform_device *pdev = to_platform_device(dev);
  1039. struct spacc_engine *engine = platform_get_drvdata(pdev);
  1040. /*
  1041. * We only support standby mode. All we have to do is gate the clock to
  1042. * the spacc. The hardware will preserve state until we turn it back
  1043. * on again.
  1044. */
  1045. clk_disable(engine->clk);
  1046. return 0;
  1047. }
  1048. static int spacc_resume(struct device *dev)
  1049. {
  1050. struct platform_device *pdev = to_platform_device(dev);
  1051. struct spacc_engine *engine = platform_get_drvdata(pdev);
  1052. return clk_enable(engine->clk);
  1053. }
  1054. static const struct dev_pm_ops spacc_pm_ops = {
  1055. .suspend = spacc_suspend,
  1056. .resume = spacc_resume,
  1057. };
  1058. #endif /* CONFIG_PM */
  1059. static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
  1060. {
  1061. return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
  1062. }
  1063. static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
  1064. struct device_attribute *attr,
  1065. char *buf)
  1066. {
  1067. struct spacc_engine *engine = spacc_dev_to_engine(dev);
  1068. return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
  1069. }
  1070. static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
  1071. struct device_attribute *attr,
  1072. const char *buf, size_t len)
  1073. {
  1074. struct spacc_engine *engine = spacc_dev_to_engine(dev);
  1075. unsigned long thresh;
  1076. if (kstrtoul(buf, 0, &thresh))
  1077. return -EINVAL;
  1078. thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
  1079. engine->stat_irq_thresh = thresh;
  1080. writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
  1081. engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
  1082. return len;
  1083. }
  1084. static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
  1085. spacc_stat_irq_thresh_store);
  1086. static struct spacc_alg ipsec_engine_algs[] = {
  1087. {
  1088. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
  1089. .key_offs = 0,
  1090. .iv_offs = AES_MAX_KEY_SIZE,
  1091. .alg = {
  1092. .cra_name = "cbc(aes)",
  1093. .cra_driver_name = "cbc-aes-picoxcell",
  1094. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1095. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1096. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1097. CRYPTO_ALG_ASYNC |
  1098. CRYPTO_ALG_NEED_FALLBACK,
  1099. .cra_blocksize = AES_BLOCK_SIZE,
  1100. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1101. .cra_type = &crypto_ablkcipher_type,
  1102. .cra_module = THIS_MODULE,
  1103. .cra_ablkcipher = {
  1104. .setkey = spacc_aes_setkey,
  1105. .encrypt = spacc_ablk_encrypt,
  1106. .decrypt = spacc_ablk_decrypt,
  1107. .min_keysize = AES_MIN_KEY_SIZE,
  1108. .max_keysize = AES_MAX_KEY_SIZE,
  1109. .ivsize = AES_BLOCK_SIZE,
  1110. },
  1111. .cra_init = spacc_ablk_cra_init,
  1112. .cra_exit = spacc_ablk_cra_exit,
  1113. },
  1114. },
  1115. {
  1116. .key_offs = 0,
  1117. .iv_offs = AES_MAX_KEY_SIZE,
  1118. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
  1119. .alg = {
  1120. .cra_name = "ecb(aes)",
  1121. .cra_driver_name = "ecb-aes-picoxcell",
  1122. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1123. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1124. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1125. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  1126. .cra_blocksize = AES_BLOCK_SIZE,
  1127. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1128. .cra_type = &crypto_ablkcipher_type,
  1129. .cra_module = THIS_MODULE,
  1130. .cra_ablkcipher = {
  1131. .setkey = spacc_aes_setkey,
  1132. .encrypt = spacc_ablk_encrypt,
  1133. .decrypt = spacc_ablk_decrypt,
  1134. .min_keysize = AES_MIN_KEY_SIZE,
  1135. .max_keysize = AES_MAX_KEY_SIZE,
  1136. },
  1137. .cra_init = spacc_ablk_cra_init,
  1138. .cra_exit = spacc_ablk_cra_exit,
  1139. },
  1140. },
  1141. {
  1142. .key_offs = DES_BLOCK_SIZE,
  1143. .iv_offs = 0,
  1144. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
  1145. .alg = {
  1146. .cra_name = "cbc(des)",
  1147. .cra_driver_name = "cbc-des-picoxcell",
  1148. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1149. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1150. CRYPTO_ALG_ASYNC |
  1151. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1152. .cra_blocksize = DES_BLOCK_SIZE,
  1153. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1154. .cra_type = &crypto_ablkcipher_type,
  1155. .cra_module = THIS_MODULE,
  1156. .cra_ablkcipher = {
  1157. .setkey = spacc_des_setkey,
  1158. .encrypt = spacc_ablk_encrypt,
  1159. .decrypt = spacc_ablk_decrypt,
  1160. .min_keysize = DES_KEY_SIZE,
  1161. .max_keysize = DES_KEY_SIZE,
  1162. .ivsize = DES_BLOCK_SIZE,
  1163. },
  1164. .cra_init = spacc_ablk_cra_init,
  1165. .cra_exit = spacc_ablk_cra_exit,
  1166. },
  1167. },
  1168. {
  1169. .key_offs = DES_BLOCK_SIZE,
  1170. .iv_offs = 0,
  1171. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
  1172. .alg = {
  1173. .cra_name = "ecb(des)",
  1174. .cra_driver_name = "ecb-des-picoxcell",
  1175. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1176. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1177. CRYPTO_ALG_ASYNC |
  1178. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1179. .cra_blocksize = DES_BLOCK_SIZE,
  1180. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1181. .cra_type = &crypto_ablkcipher_type,
  1182. .cra_module = THIS_MODULE,
  1183. .cra_ablkcipher = {
  1184. .setkey = spacc_des_setkey,
  1185. .encrypt = spacc_ablk_encrypt,
  1186. .decrypt = spacc_ablk_decrypt,
  1187. .min_keysize = DES_KEY_SIZE,
  1188. .max_keysize = DES_KEY_SIZE,
  1189. },
  1190. .cra_init = spacc_ablk_cra_init,
  1191. .cra_exit = spacc_ablk_cra_exit,
  1192. },
  1193. },
  1194. {
  1195. .key_offs = DES_BLOCK_SIZE,
  1196. .iv_offs = 0,
  1197. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
  1198. .alg = {
  1199. .cra_name = "cbc(des3_ede)",
  1200. .cra_driver_name = "cbc-des3-ede-picoxcell",
  1201. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1202. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1203. CRYPTO_ALG_ASYNC |
  1204. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1205. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1206. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1207. .cra_type = &crypto_ablkcipher_type,
  1208. .cra_module = THIS_MODULE,
  1209. .cra_ablkcipher = {
  1210. .setkey = spacc_des_setkey,
  1211. .encrypt = spacc_ablk_encrypt,
  1212. .decrypt = spacc_ablk_decrypt,
  1213. .min_keysize = DES3_EDE_KEY_SIZE,
  1214. .max_keysize = DES3_EDE_KEY_SIZE,
  1215. .ivsize = DES3_EDE_BLOCK_SIZE,
  1216. },
  1217. .cra_init = spacc_ablk_cra_init,
  1218. .cra_exit = spacc_ablk_cra_exit,
  1219. },
  1220. },
  1221. {
  1222. .key_offs = DES_BLOCK_SIZE,
  1223. .iv_offs = 0,
  1224. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
  1225. .alg = {
  1226. .cra_name = "ecb(des3_ede)",
  1227. .cra_driver_name = "ecb-des3-ede-picoxcell",
  1228. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1229. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1230. CRYPTO_ALG_ASYNC |
  1231. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1232. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1233. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1234. .cra_type = &crypto_ablkcipher_type,
  1235. .cra_module = THIS_MODULE,
  1236. .cra_ablkcipher = {
  1237. .setkey = spacc_des_setkey,
  1238. .encrypt = spacc_ablk_encrypt,
  1239. .decrypt = spacc_ablk_decrypt,
  1240. .min_keysize = DES3_EDE_KEY_SIZE,
  1241. .max_keysize = DES3_EDE_KEY_SIZE,
  1242. },
  1243. .cra_init = spacc_ablk_cra_init,
  1244. .cra_exit = spacc_ablk_cra_exit,
  1245. },
  1246. },
  1247. {
  1248. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
  1249. SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
  1250. .key_offs = 0,
  1251. .iv_offs = AES_MAX_KEY_SIZE,
  1252. .alg = {
  1253. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1254. .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
  1255. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1256. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1257. CRYPTO_ALG_ASYNC |
  1258. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1259. .cra_blocksize = AES_BLOCK_SIZE,
  1260. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1261. .cra_type = &crypto_aead_type,
  1262. .cra_module = THIS_MODULE,
  1263. .cra_aead = {
  1264. .setkey = spacc_aead_setkey,
  1265. .setauthsize = spacc_aead_setauthsize,
  1266. .encrypt = spacc_aead_encrypt,
  1267. .decrypt = spacc_aead_decrypt,
  1268. .givencrypt = spacc_aead_givencrypt,
  1269. .ivsize = AES_BLOCK_SIZE,
  1270. .maxauthsize = SHA1_DIGEST_SIZE,
  1271. },
  1272. .cra_init = spacc_aead_cra_init,
  1273. .cra_exit = spacc_aead_cra_exit,
  1274. },
  1275. },
  1276. {
  1277. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
  1278. SPA_CTRL_HASH_ALG_SHA256 |
  1279. SPA_CTRL_HASH_MODE_HMAC,
  1280. .key_offs = 0,
  1281. .iv_offs = AES_MAX_KEY_SIZE,
  1282. .alg = {
  1283. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  1284. .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
  1285. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1286. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1287. CRYPTO_ALG_ASYNC |
  1288. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1289. .cra_blocksize = AES_BLOCK_SIZE,
  1290. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1291. .cra_type = &crypto_aead_type,
  1292. .cra_module = THIS_MODULE,
  1293. .cra_aead = {
  1294. .setkey = spacc_aead_setkey,
  1295. .setauthsize = spacc_aead_setauthsize,
  1296. .encrypt = spacc_aead_encrypt,
  1297. .decrypt = spacc_aead_decrypt,
  1298. .givencrypt = spacc_aead_givencrypt,
  1299. .ivsize = AES_BLOCK_SIZE,
  1300. .maxauthsize = SHA256_DIGEST_SIZE,
  1301. },
  1302. .cra_init = spacc_aead_cra_init,
  1303. .cra_exit = spacc_aead_cra_exit,
  1304. },
  1305. },
  1306. {
  1307. .key_offs = 0,
  1308. .iv_offs = AES_MAX_KEY_SIZE,
  1309. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
  1310. SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
  1311. .alg = {
  1312. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1313. .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
  1314. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1315. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1316. CRYPTO_ALG_ASYNC |
  1317. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1318. .cra_blocksize = AES_BLOCK_SIZE,
  1319. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1320. .cra_type = &crypto_aead_type,
  1321. .cra_module = THIS_MODULE,
  1322. .cra_aead = {
  1323. .setkey = spacc_aead_setkey,
  1324. .setauthsize = spacc_aead_setauthsize,
  1325. .encrypt = spacc_aead_encrypt,
  1326. .decrypt = spacc_aead_decrypt,
  1327. .givencrypt = spacc_aead_givencrypt,
  1328. .ivsize = AES_BLOCK_SIZE,
  1329. .maxauthsize = MD5_DIGEST_SIZE,
  1330. },
  1331. .cra_init = spacc_aead_cra_init,
  1332. .cra_exit = spacc_aead_cra_exit,
  1333. },
  1334. },
  1335. {
  1336. .key_offs = DES_BLOCK_SIZE,
  1337. .iv_offs = 0,
  1338. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
  1339. SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
  1340. .alg = {
  1341. .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
  1342. .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
  1343. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1344. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1345. CRYPTO_ALG_ASYNC |
  1346. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1347. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1348. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1349. .cra_type = &crypto_aead_type,
  1350. .cra_module = THIS_MODULE,
  1351. .cra_aead = {
  1352. .setkey = spacc_aead_setkey,
  1353. .setauthsize = spacc_aead_setauthsize,
  1354. .encrypt = spacc_aead_encrypt,
  1355. .decrypt = spacc_aead_decrypt,
  1356. .givencrypt = spacc_aead_givencrypt,
  1357. .ivsize = DES3_EDE_BLOCK_SIZE,
  1358. .maxauthsize = SHA1_DIGEST_SIZE,
  1359. },
  1360. .cra_init = spacc_aead_cra_init,
  1361. .cra_exit = spacc_aead_cra_exit,
  1362. },
  1363. },
  1364. {
  1365. .key_offs = DES_BLOCK_SIZE,
  1366. .iv_offs = 0,
  1367. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
  1368. SPA_CTRL_HASH_ALG_SHA256 |
  1369. SPA_CTRL_HASH_MODE_HMAC,
  1370. .alg = {
  1371. .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
  1372. .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
  1373. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1374. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1375. CRYPTO_ALG_ASYNC |
  1376. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1377. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1378. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1379. .cra_type = &crypto_aead_type,
  1380. .cra_module = THIS_MODULE,
  1381. .cra_aead = {
  1382. .setkey = spacc_aead_setkey,
  1383. .setauthsize = spacc_aead_setauthsize,
  1384. .encrypt = spacc_aead_encrypt,
  1385. .decrypt = spacc_aead_decrypt,
  1386. .givencrypt = spacc_aead_givencrypt,
  1387. .ivsize = DES3_EDE_BLOCK_SIZE,
  1388. .maxauthsize = SHA256_DIGEST_SIZE,
  1389. },
  1390. .cra_init = spacc_aead_cra_init,
  1391. .cra_exit = spacc_aead_cra_exit,
  1392. },
  1393. },
  1394. {
  1395. .key_offs = DES_BLOCK_SIZE,
  1396. .iv_offs = 0,
  1397. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
  1398. SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
  1399. .alg = {
  1400. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  1401. .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
  1402. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1403. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1404. CRYPTO_ALG_ASYNC |
  1405. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1406. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1407. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1408. .cra_type = &crypto_aead_type,
  1409. .cra_module = THIS_MODULE,
  1410. .cra_aead = {
  1411. .setkey = spacc_aead_setkey,
  1412. .setauthsize = spacc_aead_setauthsize,
  1413. .encrypt = spacc_aead_encrypt,
  1414. .decrypt = spacc_aead_decrypt,
  1415. .givencrypt = spacc_aead_givencrypt,
  1416. .ivsize = DES3_EDE_BLOCK_SIZE,
  1417. .maxauthsize = MD5_DIGEST_SIZE,
  1418. },
  1419. .cra_init = spacc_aead_cra_init,
  1420. .cra_exit = spacc_aead_cra_exit,
  1421. },
  1422. },
  1423. };
  1424. static struct spacc_alg l2_engine_algs[] = {
  1425. {
  1426. .key_offs = 0,
  1427. .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
  1428. .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
  1429. SPA_CTRL_CIPH_MODE_F8,
  1430. .alg = {
  1431. .cra_name = "f8(kasumi)",
  1432. .cra_driver_name = "f8-kasumi-picoxcell",
  1433. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1434. .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER |
  1435. CRYPTO_ALG_ASYNC |
  1436. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1437. .cra_blocksize = 8,
  1438. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1439. .cra_type = &crypto_ablkcipher_type,
  1440. .cra_module = THIS_MODULE,
  1441. .cra_ablkcipher = {
  1442. .setkey = spacc_kasumi_f8_setkey,
  1443. .encrypt = spacc_ablk_encrypt,
  1444. .decrypt = spacc_ablk_decrypt,
  1445. .min_keysize = 16,
  1446. .max_keysize = 16,
  1447. .ivsize = 8,
  1448. },
  1449. .cra_init = spacc_ablk_cra_init,
  1450. .cra_exit = spacc_ablk_cra_exit,
  1451. },
  1452. },
  1453. };
  1454. #ifdef CONFIG_OF
  1455. static const struct of_device_id spacc_of_id_table[] = {
  1456. { .compatible = "picochip,spacc-ipsec" },
  1457. { .compatible = "picochip,spacc-l2" },
  1458. {}
  1459. };
  1460. #endif /* CONFIG_OF */
  1461. static bool spacc_is_compatible(struct platform_device *pdev,
  1462. const char *spacc_type)
  1463. {
  1464. const struct platform_device_id *platid = platform_get_device_id(pdev);
  1465. if (platid && !strcmp(platid->name, spacc_type))
  1466. return true;
  1467. #ifdef CONFIG_OF
  1468. if (of_device_is_compatible(pdev->dev.of_node, spacc_type))
  1469. return true;
  1470. #endif /* CONFIG_OF */
  1471. return false;
  1472. }
  1473. static int spacc_probe(struct platform_device *pdev)
  1474. {
  1475. int i, err, ret = -EINVAL;
  1476. struct resource *mem, *irq;
  1477. struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
  1478. GFP_KERNEL);
  1479. if (!engine)
  1480. return -ENOMEM;
  1481. if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) {
  1482. engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS;
  1483. engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
  1484. engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
  1485. engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
  1486. engine->algs = ipsec_engine_algs;
  1487. engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
  1488. } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
  1489. engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
  1490. engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
  1491. engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ;
  1492. engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ;
  1493. engine->algs = l2_engine_algs;
  1494. engine->num_algs = ARRAY_SIZE(l2_engine_algs);
  1495. } else {
  1496. return -EINVAL;
  1497. }
  1498. engine->name = dev_name(&pdev->dev);
  1499. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1500. engine->regs = devm_ioremap_resource(&pdev->dev, mem);
  1501. if (IS_ERR(engine->regs))
  1502. return PTR_ERR(engine->regs);
  1503. irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1504. if (!irq) {
  1505. dev_err(&pdev->dev, "no memory/irq resource for engine\n");
  1506. return -ENXIO;
  1507. }
  1508. if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
  1509. engine->name, engine)) {
  1510. dev_err(engine->dev, "failed to request IRQ\n");
  1511. return -EBUSY;
  1512. }
  1513. engine->dev = &pdev->dev;
  1514. engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
  1515. engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
  1516. engine->req_pool = dmam_pool_create(engine->name, engine->dev,
  1517. MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
  1518. if (!engine->req_pool)
  1519. return -ENOMEM;
  1520. spin_lock_init(&engine->hw_lock);
  1521. engine->clk = clk_get(&pdev->dev, "ref");
  1522. if (IS_ERR(engine->clk)) {
  1523. dev_info(&pdev->dev, "clk unavailable\n");
  1524. device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
  1525. return PTR_ERR(engine->clk);
  1526. }
  1527. if (clk_enable(engine->clk)) {
  1528. dev_info(&pdev->dev, "unable to enable clk\n");
  1529. clk_put(engine->clk);
  1530. return -EIO;
  1531. }
  1532. err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
  1533. if (err) {
  1534. clk_disable(engine->clk);
  1535. clk_put(engine->clk);
  1536. return err;
  1537. }
  1538. /*
  1539. * Use an IRQ threshold of 50% as a default. This seems to be a
  1540. * reasonable trade off of latency against throughput but can be
  1541. * changed at runtime.
  1542. */
  1543. engine->stat_irq_thresh = (engine->fifo_sz / 2);
  1544. /*
  1545. * Configure the interrupts. We only use the STAT_CNT interrupt as we
  1546. * only submit a new packet for processing when we complete another in
  1547. * the queue. This minimizes time spent in the interrupt handler.
  1548. */
  1549. writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
  1550. engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
  1551. writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
  1552. engine->regs + SPA_IRQ_EN_REG_OFFSET);
  1553. setup_timer(&engine->packet_timeout, spacc_packet_timeout,
  1554. (unsigned long)engine);
  1555. INIT_LIST_HEAD(&engine->pending);
  1556. INIT_LIST_HEAD(&engine->completed);
  1557. INIT_LIST_HEAD(&engine->in_progress);
  1558. engine->in_flight = 0;
  1559. tasklet_init(&engine->complete, spacc_spacc_complete,
  1560. (unsigned long)engine);
  1561. platform_set_drvdata(pdev, engine);
  1562. INIT_LIST_HEAD(&engine->registered_algs);
  1563. for (i = 0; i < engine->num_algs; ++i) {
  1564. engine->algs[i].engine = engine;
  1565. err = crypto_register_alg(&engine->algs[i].alg);
  1566. if (!err) {
  1567. list_add_tail(&engine->algs[i].entry,
  1568. &engine->registered_algs);
  1569. ret = 0;
  1570. }
  1571. if (err)
  1572. dev_err(engine->dev, "failed to register alg \"%s\"\n",
  1573. engine->algs[i].alg.cra_name);
  1574. else
  1575. dev_dbg(engine->dev, "registered alg \"%s\"\n",
  1576. engine->algs[i].alg.cra_name);
  1577. }
  1578. return ret;
  1579. }
  1580. static int spacc_remove(struct platform_device *pdev)
  1581. {
  1582. struct spacc_alg *alg, *next;
  1583. struct spacc_engine *engine = platform_get_drvdata(pdev);
  1584. del_timer_sync(&engine->packet_timeout);
  1585. device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
  1586. list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
  1587. list_del(&alg->entry);
  1588. crypto_unregister_alg(&alg->alg);
  1589. }
  1590. clk_disable(engine->clk);
  1591. clk_put(engine->clk);
  1592. return 0;
  1593. }
  1594. static const struct platform_device_id spacc_id_table[] = {
  1595. { "picochip,spacc-ipsec", },
  1596. { "picochip,spacc-l2", },
  1597. { }
  1598. };
  1599. static struct platform_driver spacc_driver = {
  1600. .probe = spacc_probe,
  1601. .remove = spacc_remove,
  1602. .driver = {
  1603. .name = "picochip,spacc",
  1604. #ifdef CONFIG_PM
  1605. .pm = &spacc_pm_ops,
  1606. #endif /* CONFIG_PM */
  1607. .of_match_table = of_match_ptr(spacc_of_id_table),
  1608. },
  1609. .id_table = spacc_id_table,
  1610. };
  1611. module_platform_driver(spacc_driver);
  1612. MODULE_LICENSE("GPL");
  1613. MODULE_AUTHOR("Jamie Iles");