crypto4xx_core.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302
  1. /**
  2. * AMCC SoC PPC4xx Crypto Driver
  3. *
  4. * Copyright (c) 2008 Applied Micro Circuits Corporation.
  5. * All rights reserved. James Hsiao <jhsiao@amcc.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * This file implements AMCC crypto offload Linux device driver for use with
  18. * Linux CryptoAPI.
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock_types.h>
  23. #include <linux/random.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/crypto.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/init.h>
  29. #include <linux/module.h>
  30. #include <linux/of_address.h>
  31. #include <linux/of_irq.h>
  32. #include <linux/of_platform.h>
  33. #include <linux/slab.h>
  34. #include <asm/dcr.h>
  35. #include <asm/dcr-regs.h>
  36. #include <asm/cacheflush.h>
  37. #include <crypto/aes.h>
  38. #include <crypto/sha.h>
  39. #include "crypto4xx_reg_def.h"
  40. #include "crypto4xx_core.h"
  41. #include "crypto4xx_sa.h"
  42. #define PPC4XX_SEC_VERSION_STR "0.5"
  43. /**
  44. * PPC4xx Crypto Engine Initialization Routine
  45. */
  46. static void crypto4xx_hw_init(struct crypto4xx_device *dev)
  47. {
  48. union ce_ring_size ring_size;
  49. union ce_ring_contol ring_ctrl;
  50. union ce_part_ring_size part_ring_size;
  51. union ce_io_threshold io_threshold;
  52. u32 rand_num;
  53. union ce_pe_dma_cfg pe_dma_cfg;
  54. u32 device_ctrl;
  55. writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
  56. /* setup pe dma, include reset sg, pdr and pe, then release reset */
  57. pe_dma_cfg.w = 0;
  58. pe_dma_cfg.bf.bo_sgpd_en = 1;
  59. pe_dma_cfg.bf.bo_data_en = 0;
  60. pe_dma_cfg.bf.bo_sa_en = 1;
  61. pe_dma_cfg.bf.bo_pd_en = 1;
  62. pe_dma_cfg.bf.dynamic_sa_en = 1;
  63. pe_dma_cfg.bf.reset_sg = 1;
  64. pe_dma_cfg.bf.reset_pdr = 1;
  65. pe_dma_cfg.bf.reset_pe = 1;
  66. writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
  67. /* un reset pe,sg and pdr */
  68. pe_dma_cfg.bf.pe_mode = 0;
  69. pe_dma_cfg.bf.reset_sg = 0;
  70. pe_dma_cfg.bf.reset_pdr = 0;
  71. pe_dma_cfg.bf.reset_pe = 0;
  72. pe_dma_cfg.bf.bo_td_en = 0;
  73. writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
  74. writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
  75. writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
  76. writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
  77. get_random_bytes(&rand_num, sizeof(rand_num));
  78. writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
  79. get_random_bytes(&rand_num, sizeof(rand_num));
  80. writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
  81. ring_size.w = 0;
  82. ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
  83. ring_size.bf.ring_size = PPC4XX_NUM_PD;
  84. writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
  85. ring_ctrl.w = 0;
  86. writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
  87. device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
  88. device_ctrl |= PPC4XX_DC_3DES_EN;
  89. writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
  90. writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
  91. writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
  92. part_ring_size.w = 0;
  93. part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
  94. part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
  95. writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
  96. writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
  97. io_threshold.w = 0;
  98. io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
  99. io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
  100. writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
  101. writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
  102. writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
  103. writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
  104. writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
  105. writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
  106. writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
  107. writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
  108. /* un reset pe,sg and pdr */
  109. pe_dma_cfg.bf.pe_mode = 1;
  110. pe_dma_cfg.bf.reset_sg = 0;
  111. pe_dma_cfg.bf.reset_pdr = 0;
  112. pe_dma_cfg.bf.reset_pe = 0;
  113. pe_dma_cfg.bf.bo_td_en = 0;
  114. writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
  115. /*clear all pending interrupt*/
  116. writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
  117. writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
  118. writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
  119. writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
  120. writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
  121. }
  122. int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
  123. {
  124. ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
  125. &ctx->sa_in_dma_addr, GFP_ATOMIC);
  126. if (ctx->sa_in == NULL)
  127. return -ENOMEM;
  128. ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
  129. &ctx->sa_out_dma_addr, GFP_ATOMIC);
  130. if (ctx->sa_out == NULL) {
  131. dma_free_coherent(ctx->dev->core_dev->device,
  132. ctx->sa_len * 4,
  133. ctx->sa_in, ctx->sa_in_dma_addr);
  134. return -ENOMEM;
  135. }
  136. memset(ctx->sa_in, 0, size * 4);
  137. memset(ctx->sa_out, 0, size * 4);
  138. ctx->sa_len = size;
  139. return 0;
  140. }
  141. void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
  142. {
  143. if (ctx->sa_in != NULL)
  144. dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
  145. ctx->sa_in, ctx->sa_in_dma_addr);
  146. if (ctx->sa_out != NULL)
  147. dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
  148. ctx->sa_out, ctx->sa_out_dma_addr);
  149. ctx->sa_in_dma_addr = 0;
  150. ctx->sa_out_dma_addr = 0;
  151. ctx->sa_len = 0;
  152. }
  153. u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
  154. {
  155. ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
  156. sizeof(struct sa_state_record),
  157. &ctx->state_record_dma_addr, GFP_ATOMIC);
  158. if (!ctx->state_record_dma_addr)
  159. return -ENOMEM;
  160. memset(ctx->state_record, 0, sizeof(struct sa_state_record));
  161. return 0;
  162. }
  163. void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
  164. {
  165. if (ctx->state_record != NULL)
  166. dma_free_coherent(ctx->dev->core_dev->device,
  167. sizeof(struct sa_state_record),
  168. ctx->state_record,
  169. ctx->state_record_dma_addr);
  170. ctx->state_record_dma_addr = 0;
  171. }
  172. /**
  173. * alloc memory for the gather ring
  174. * no need to alloc buf for the ring
  175. * gdr_tail, gdr_head and gdr_count are initialized by this function
  176. */
  177. static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
  178. {
  179. int i;
  180. struct pd_uinfo *pd_uinfo;
  181. dev->pdr = dma_alloc_coherent(dev->core_dev->device,
  182. sizeof(struct ce_pd) * PPC4XX_NUM_PD,
  183. &dev->pdr_pa, GFP_ATOMIC);
  184. if (!dev->pdr)
  185. return -ENOMEM;
  186. dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
  187. GFP_KERNEL);
  188. if (!dev->pdr_uinfo) {
  189. dma_free_coherent(dev->core_dev->device,
  190. sizeof(struct ce_pd) * PPC4XX_NUM_PD,
  191. dev->pdr,
  192. dev->pdr_pa);
  193. return -ENOMEM;
  194. }
  195. memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
  196. dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
  197. 256 * PPC4XX_NUM_PD,
  198. &dev->shadow_sa_pool_pa,
  199. GFP_ATOMIC);
  200. if (!dev->shadow_sa_pool)
  201. return -ENOMEM;
  202. dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
  203. sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
  204. &dev->shadow_sr_pool_pa, GFP_ATOMIC);
  205. if (!dev->shadow_sr_pool)
  206. return -ENOMEM;
  207. for (i = 0; i < PPC4XX_NUM_PD; i++) {
  208. pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
  209. sizeof(struct pd_uinfo) * i);
  210. /* alloc 256 bytes which is enough for any kind of dynamic sa */
  211. pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
  212. pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
  213. /* alloc state record */
  214. pd_uinfo->sr_va = dev->shadow_sr_pool +
  215. sizeof(struct sa_state_record) * i;
  216. pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
  217. sizeof(struct sa_state_record) * i;
  218. }
  219. return 0;
  220. }
  221. static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
  222. {
  223. if (dev->pdr != NULL)
  224. dma_free_coherent(dev->core_dev->device,
  225. sizeof(struct ce_pd) * PPC4XX_NUM_PD,
  226. dev->pdr, dev->pdr_pa);
  227. if (dev->shadow_sa_pool)
  228. dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
  229. dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
  230. if (dev->shadow_sr_pool)
  231. dma_free_coherent(dev->core_dev->device,
  232. sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
  233. dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
  234. kfree(dev->pdr_uinfo);
  235. }
  236. static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
  237. {
  238. u32 retval;
  239. u32 tmp;
  240. retval = dev->pdr_head;
  241. tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
  242. if (tmp == dev->pdr_tail)
  243. return ERING_WAS_FULL;
  244. dev->pdr_head = tmp;
  245. return retval;
  246. }
  247. static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
  248. {
  249. struct pd_uinfo *pd_uinfo;
  250. unsigned long flags;
  251. pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
  252. sizeof(struct pd_uinfo) * idx);
  253. spin_lock_irqsave(&dev->core_dev->lock, flags);
  254. if (dev->pdr_tail != PPC4XX_LAST_PD)
  255. dev->pdr_tail++;
  256. else
  257. dev->pdr_tail = 0;
  258. pd_uinfo->state = PD_ENTRY_FREE;
  259. spin_unlock_irqrestore(&dev->core_dev->lock, flags);
  260. return 0;
  261. }
  262. static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
  263. dma_addr_t *pd_dma, u32 idx)
  264. {
  265. *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
  266. return dev->pdr + sizeof(struct ce_pd) * idx;
  267. }
  268. /**
  269. * alloc memory for the gather ring
  270. * no need to alloc buf for the ring
  271. * gdr_tail, gdr_head and gdr_count are initialized by this function
  272. */
  273. static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
  274. {
  275. dev->gdr = dma_alloc_coherent(dev->core_dev->device,
  276. sizeof(struct ce_gd) * PPC4XX_NUM_GD,
  277. &dev->gdr_pa, GFP_ATOMIC);
  278. if (!dev->gdr)
  279. return -ENOMEM;
  280. memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
  281. return 0;
  282. }
  283. static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
  284. {
  285. dma_free_coherent(dev->core_dev->device,
  286. sizeof(struct ce_gd) * PPC4XX_NUM_GD,
  287. dev->gdr, dev->gdr_pa);
  288. }
  289. /*
  290. * when this function is called.
  291. * preemption or interrupt must be disabled
  292. */
  293. u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
  294. {
  295. u32 retval;
  296. u32 tmp;
  297. if (n >= PPC4XX_NUM_GD)
  298. return ERING_WAS_FULL;
  299. retval = dev->gdr_head;
  300. tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
  301. if (dev->gdr_head > dev->gdr_tail) {
  302. if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
  303. return ERING_WAS_FULL;
  304. } else if (dev->gdr_head < dev->gdr_tail) {
  305. if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
  306. return ERING_WAS_FULL;
  307. }
  308. dev->gdr_head = tmp;
  309. return retval;
  310. }
  311. static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
  312. {
  313. unsigned long flags;
  314. spin_lock_irqsave(&dev->core_dev->lock, flags);
  315. if (dev->gdr_tail == dev->gdr_head) {
  316. spin_unlock_irqrestore(&dev->core_dev->lock, flags);
  317. return 0;
  318. }
  319. if (dev->gdr_tail != PPC4XX_LAST_GD)
  320. dev->gdr_tail++;
  321. else
  322. dev->gdr_tail = 0;
  323. spin_unlock_irqrestore(&dev->core_dev->lock, flags);
  324. return 0;
  325. }
  326. static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
  327. dma_addr_t *gd_dma, u32 idx)
  328. {
  329. *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
  330. return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
  331. }
  332. /**
  333. * alloc memory for the scatter ring
  334. * need to alloc buf for the ring
  335. * sdr_tail, sdr_head and sdr_count are initialized by this function
  336. */
  337. static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
  338. {
  339. int i;
  340. struct ce_sd *sd_array;
  341. /* alloc memory for scatter descriptor ring */
  342. dev->sdr = dma_alloc_coherent(dev->core_dev->device,
  343. sizeof(struct ce_sd) * PPC4XX_NUM_SD,
  344. &dev->sdr_pa, GFP_ATOMIC);
  345. if (!dev->sdr)
  346. return -ENOMEM;
  347. dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
  348. dev->scatter_buffer_va =
  349. dma_alloc_coherent(dev->core_dev->device,
  350. dev->scatter_buffer_size * PPC4XX_NUM_SD,
  351. &dev->scatter_buffer_pa, GFP_ATOMIC);
  352. if (!dev->scatter_buffer_va) {
  353. dma_free_coherent(dev->core_dev->device,
  354. sizeof(struct ce_sd) * PPC4XX_NUM_SD,
  355. dev->sdr, dev->sdr_pa);
  356. return -ENOMEM;
  357. }
  358. sd_array = dev->sdr;
  359. for (i = 0; i < PPC4XX_NUM_SD; i++) {
  360. sd_array[i].ptr = dev->scatter_buffer_pa +
  361. dev->scatter_buffer_size * i;
  362. }
  363. return 0;
  364. }
  365. static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
  366. {
  367. if (dev->sdr != NULL)
  368. dma_free_coherent(dev->core_dev->device,
  369. sizeof(struct ce_sd) * PPC4XX_NUM_SD,
  370. dev->sdr, dev->sdr_pa);
  371. if (dev->scatter_buffer_va != NULL)
  372. dma_free_coherent(dev->core_dev->device,
  373. dev->scatter_buffer_size * PPC4XX_NUM_SD,
  374. dev->scatter_buffer_va,
  375. dev->scatter_buffer_pa);
  376. }
  377. /*
  378. * when this function is called.
  379. * preemption or interrupt must be disabled
  380. */
  381. static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
  382. {
  383. u32 retval;
  384. u32 tmp;
  385. if (n >= PPC4XX_NUM_SD)
  386. return ERING_WAS_FULL;
  387. retval = dev->sdr_head;
  388. tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
  389. if (dev->sdr_head > dev->gdr_tail) {
  390. if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
  391. return ERING_WAS_FULL;
  392. } else if (dev->sdr_head < dev->sdr_tail) {
  393. if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
  394. return ERING_WAS_FULL;
  395. } /* the head = tail, or empty case is already take cared */
  396. dev->sdr_head = tmp;
  397. return retval;
  398. }
  399. static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
  400. {
  401. unsigned long flags;
  402. spin_lock_irqsave(&dev->core_dev->lock, flags);
  403. if (dev->sdr_tail == dev->sdr_head) {
  404. spin_unlock_irqrestore(&dev->core_dev->lock, flags);
  405. return 0;
  406. }
  407. if (dev->sdr_tail != PPC4XX_LAST_SD)
  408. dev->sdr_tail++;
  409. else
  410. dev->sdr_tail = 0;
  411. spin_unlock_irqrestore(&dev->core_dev->lock, flags);
  412. return 0;
  413. }
  414. static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
  415. dma_addr_t *sd_dma, u32 idx)
  416. {
  417. *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
  418. return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
  419. }
  420. static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
  421. dma_addr_t *addr, u32 *length,
  422. u32 *idx, u32 *offset, u32 *nbytes)
  423. {
  424. u32 len;
  425. if (*length > dev->scatter_buffer_size) {
  426. memcpy(phys_to_virt(*addr),
  427. dev->scatter_buffer_va +
  428. *idx * dev->scatter_buffer_size + *offset,
  429. dev->scatter_buffer_size);
  430. *offset = 0;
  431. *length -= dev->scatter_buffer_size;
  432. *nbytes -= dev->scatter_buffer_size;
  433. if (*idx == PPC4XX_LAST_SD)
  434. *idx = 0;
  435. else
  436. (*idx)++;
  437. *addr = *addr + dev->scatter_buffer_size;
  438. return 1;
  439. } else if (*length < dev->scatter_buffer_size) {
  440. memcpy(phys_to_virt(*addr),
  441. dev->scatter_buffer_va +
  442. *idx * dev->scatter_buffer_size + *offset, *length);
  443. if ((*offset + *length) == dev->scatter_buffer_size) {
  444. if (*idx == PPC4XX_LAST_SD)
  445. *idx = 0;
  446. else
  447. (*idx)++;
  448. *nbytes -= *length;
  449. *offset = 0;
  450. } else {
  451. *nbytes -= *length;
  452. *offset += *length;
  453. }
  454. return 0;
  455. } else {
  456. len = (*nbytes <= dev->scatter_buffer_size) ?
  457. (*nbytes) : dev->scatter_buffer_size;
  458. memcpy(phys_to_virt(*addr),
  459. dev->scatter_buffer_va +
  460. *idx * dev->scatter_buffer_size + *offset,
  461. len);
  462. *offset = 0;
  463. *nbytes -= len;
  464. if (*idx == PPC4XX_LAST_SD)
  465. *idx = 0;
  466. else
  467. (*idx)++;
  468. return 0;
  469. }
  470. }
  471. static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
  472. struct ce_pd *pd,
  473. struct pd_uinfo *pd_uinfo,
  474. u32 nbytes,
  475. struct scatterlist *dst)
  476. {
  477. dma_addr_t addr;
  478. u32 this_sd;
  479. u32 offset;
  480. u32 len;
  481. u32 i;
  482. u32 sg_len;
  483. struct scatterlist *sg;
  484. this_sd = pd_uinfo->first_sd;
  485. offset = 0;
  486. i = 0;
  487. while (nbytes) {
  488. sg = &dst[i];
  489. sg_len = sg->length;
  490. addr = dma_map_page(dev->core_dev->device, sg_page(sg),
  491. sg->offset, sg->length, DMA_TO_DEVICE);
  492. if (offset == 0) {
  493. len = (nbytes <= sg->length) ? nbytes : sg->length;
  494. while (crypto4xx_fill_one_page(dev, &addr, &len,
  495. &this_sd, &offset, &nbytes))
  496. ;
  497. if (!nbytes)
  498. return;
  499. i++;
  500. } else {
  501. len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
  502. nbytes : (dev->scatter_buffer_size - offset);
  503. len = (sg->length < len) ? sg->length : len;
  504. while (crypto4xx_fill_one_page(dev, &addr, &len,
  505. &this_sd, &offset, &nbytes))
  506. ;
  507. if (!nbytes)
  508. return;
  509. sg_len -= len;
  510. if (sg_len) {
  511. addr += len;
  512. while (crypto4xx_fill_one_page(dev, &addr,
  513. &sg_len, &this_sd, &offset, &nbytes))
  514. ;
  515. }
  516. i++;
  517. }
  518. }
  519. }
  520. static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
  521. struct crypto4xx_ctx *ctx)
  522. {
  523. struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
  524. struct sa_state_record *state_record =
  525. (struct sa_state_record *) pd_uinfo->sr_va;
  526. if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
  527. memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
  528. SA_HASH_ALG_SHA1_DIGEST_SIZE);
  529. }
  530. return 0;
  531. }
  532. static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
  533. struct pd_uinfo *pd_uinfo)
  534. {
  535. int i;
  536. if (pd_uinfo->num_gd) {
  537. for (i = 0; i < pd_uinfo->num_gd; i++)
  538. crypto4xx_put_gd_to_gdr(dev);
  539. pd_uinfo->first_gd = 0xffffffff;
  540. pd_uinfo->num_gd = 0;
  541. }
  542. if (pd_uinfo->num_sd) {
  543. for (i = 0; i < pd_uinfo->num_sd; i++)
  544. crypto4xx_put_sd_to_sdr(dev);
  545. pd_uinfo->first_sd = 0xffffffff;
  546. pd_uinfo->num_sd = 0;
  547. }
  548. }
  549. static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
  550. struct pd_uinfo *pd_uinfo,
  551. struct ce_pd *pd)
  552. {
  553. struct crypto4xx_ctx *ctx;
  554. struct ablkcipher_request *ablk_req;
  555. struct scatterlist *dst;
  556. dma_addr_t addr;
  557. ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
  558. ctx = crypto_tfm_ctx(ablk_req->base.tfm);
  559. if (pd_uinfo->using_sd) {
  560. crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
  561. ablk_req->dst);
  562. } else {
  563. dst = pd_uinfo->dest_va;
  564. addr = dma_map_page(dev->core_dev->device, sg_page(dst),
  565. dst->offset, dst->length, DMA_FROM_DEVICE);
  566. }
  567. crypto4xx_ret_sg_desc(dev, pd_uinfo);
  568. if (ablk_req->base.complete != NULL)
  569. ablk_req->base.complete(&ablk_req->base, 0);
  570. return 0;
  571. }
  572. static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
  573. struct pd_uinfo *pd_uinfo)
  574. {
  575. struct crypto4xx_ctx *ctx;
  576. struct ahash_request *ahash_req;
  577. ahash_req = ahash_request_cast(pd_uinfo->async_req);
  578. ctx = crypto_tfm_ctx(ahash_req->base.tfm);
  579. crypto4xx_copy_digest_to_dst(pd_uinfo,
  580. crypto_tfm_ctx(ahash_req->base.tfm));
  581. crypto4xx_ret_sg_desc(dev, pd_uinfo);
  582. /* call user provided callback function x */
  583. if (ahash_req->base.complete != NULL)
  584. ahash_req->base.complete(&ahash_req->base, 0);
  585. return 0;
  586. }
  587. static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
  588. {
  589. struct ce_pd *pd;
  590. struct pd_uinfo *pd_uinfo;
  591. pd = dev->pdr + sizeof(struct ce_pd)*idx;
  592. pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
  593. if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
  594. CRYPTO_ALG_TYPE_ABLKCIPHER)
  595. return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
  596. else
  597. return crypto4xx_ahash_done(dev, pd_uinfo);
  598. }
  599. /**
  600. * Note: Only use this function to copy items that is word aligned.
  601. */
  602. void crypto4xx_memcpy_le(unsigned int *dst,
  603. const unsigned char *buf,
  604. int len)
  605. {
  606. u8 *tmp;
  607. for (; len >= 4; buf += 4, len -= 4)
  608. *dst++ = cpu_to_le32(*(unsigned int *) buf);
  609. tmp = (u8 *)dst;
  610. switch (len) {
  611. case 3:
  612. *tmp++ = 0;
  613. *tmp++ = *(buf+2);
  614. *tmp++ = *(buf+1);
  615. *tmp++ = *buf;
  616. break;
  617. case 2:
  618. *tmp++ = 0;
  619. *tmp++ = 0;
  620. *tmp++ = *(buf+1);
  621. *tmp++ = *buf;
  622. break;
  623. case 1:
  624. *tmp++ = 0;
  625. *tmp++ = 0;
  626. *tmp++ = 0;
  627. *tmp++ = *buf;
  628. break;
  629. default:
  630. break;
  631. }
  632. }
  633. static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
  634. {
  635. crypto4xx_destroy_pdr(core_dev->dev);
  636. crypto4xx_destroy_gdr(core_dev->dev);
  637. crypto4xx_destroy_sdr(core_dev->dev);
  638. iounmap(core_dev->dev->ce_base);
  639. kfree(core_dev->dev);
  640. kfree(core_dev);
  641. }
  642. void crypto4xx_return_pd(struct crypto4xx_device *dev,
  643. u32 pd_entry, struct ce_pd *pd,
  644. struct pd_uinfo *pd_uinfo)
  645. {
  646. /* irq should be already disabled */
  647. dev->pdr_head = pd_entry;
  648. pd->pd_ctl.w = 0;
  649. pd->pd_ctl_len.w = 0;
  650. pd_uinfo->state = PD_ENTRY_FREE;
  651. }
  652. /*
  653. * derive number of elements in scatterlist
  654. * Shamlessly copy from talitos.c
  655. */
  656. static int get_sg_count(struct scatterlist *sg_list, int nbytes)
  657. {
  658. struct scatterlist *sg = sg_list;
  659. int sg_nents = 0;
  660. while (nbytes) {
  661. sg_nents++;
  662. if (sg->length > nbytes)
  663. break;
  664. nbytes -= sg->length;
  665. sg = sg_next(sg);
  666. }
  667. return sg_nents;
  668. }
  669. static u32 get_next_gd(u32 current)
  670. {
  671. if (current != PPC4XX_LAST_GD)
  672. return current + 1;
  673. else
  674. return 0;
  675. }
  676. static u32 get_next_sd(u32 current)
  677. {
  678. if (current != PPC4XX_LAST_SD)
  679. return current + 1;
  680. else
  681. return 0;
  682. }
  683. u32 crypto4xx_build_pd(struct crypto_async_request *req,
  684. struct crypto4xx_ctx *ctx,
  685. struct scatterlist *src,
  686. struct scatterlist *dst,
  687. unsigned int datalen,
  688. void *iv, u32 iv_len)
  689. {
  690. struct crypto4xx_device *dev = ctx->dev;
  691. dma_addr_t addr, pd_dma, sd_dma, gd_dma;
  692. struct dynamic_sa_ctl *sa;
  693. struct scatterlist *sg;
  694. struct ce_gd *gd;
  695. struct ce_pd *pd;
  696. u32 num_gd, num_sd;
  697. u32 fst_gd = 0xffffffff;
  698. u32 fst_sd = 0xffffffff;
  699. u32 pd_entry;
  700. unsigned long flags;
  701. struct pd_uinfo *pd_uinfo = NULL;
  702. unsigned int nbytes = datalen, idx;
  703. unsigned int ivlen = 0;
  704. u32 gd_idx = 0;
  705. /* figure how many gd is needed */
  706. num_gd = get_sg_count(src, datalen);
  707. if (num_gd == 1)
  708. num_gd = 0;
  709. /* figure how many sd is needed */
  710. if (sg_is_last(dst) || ctx->is_hash) {
  711. num_sd = 0;
  712. } else {
  713. if (datalen > PPC4XX_SD_BUFFER_SIZE) {
  714. num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
  715. if (datalen % PPC4XX_SD_BUFFER_SIZE)
  716. num_sd++;
  717. } else {
  718. num_sd = 1;
  719. }
  720. }
  721. /*
  722. * The follow section of code needs to be protected
  723. * The gather ring and scatter ring needs to be consecutive
  724. * In case of run out of any kind of descriptor, the descriptor
  725. * already got must be return the original place.
  726. */
  727. spin_lock_irqsave(&dev->core_dev->lock, flags);
  728. if (num_gd) {
  729. fst_gd = crypto4xx_get_n_gd(dev, num_gd);
  730. if (fst_gd == ERING_WAS_FULL) {
  731. spin_unlock_irqrestore(&dev->core_dev->lock, flags);
  732. return -EAGAIN;
  733. }
  734. }
  735. if (num_sd) {
  736. fst_sd = crypto4xx_get_n_sd(dev, num_sd);
  737. if (fst_sd == ERING_WAS_FULL) {
  738. if (num_gd)
  739. dev->gdr_head = fst_gd;
  740. spin_unlock_irqrestore(&dev->core_dev->lock, flags);
  741. return -EAGAIN;
  742. }
  743. }
  744. pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
  745. if (pd_entry == ERING_WAS_FULL) {
  746. if (num_gd)
  747. dev->gdr_head = fst_gd;
  748. if (num_sd)
  749. dev->sdr_head = fst_sd;
  750. spin_unlock_irqrestore(&dev->core_dev->lock, flags);
  751. return -EAGAIN;
  752. }
  753. spin_unlock_irqrestore(&dev->core_dev->lock, flags);
  754. pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
  755. sizeof(struct pd_uinfo) * pd_entry);
  756. pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
  757. pd_uinfo->async_req = req;
  758. pd_uinfo->num_gd = num_gd;
  759. pd_uinfo->num_sd = num_sd;
  760. if (iv_len || ctx->is_hash) {
  761. ivlen = iv_len;
  762. pd->sa = pd_uinfo->sa_pa;
  763. sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
  764. if (ctx->direction == DIR_INBOUND)
  765. memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
  766. else
  767. memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
  768. memcpy((void *) sa + ctx->offset_to_sr_ptr,
  769. &pd_uinfo->sr_pa, 4);
  770. if (iv_len)
  771. crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
  772. } else {
  773. if (ctx->direction == DIR_INBOUND) {
  774. pd->sa = ctx->sa_in_dma_addr;
  775. sa = (struct dynamic_sa_ctl *) ctx->sa_in;
  776. } else {
  777. pd->sa = ctx->sa_out_dma_addr;
  778. sa = (struct dynamic_sa_ctl *) ctx->sa_out;
  779. }
  780. }
  781. pd->sa_len = ctx->sa_len;
  782. if (num_gd) {
  783. /* get first gd we are going to use */
  784. gd_idx = fst_gd;
  785. pd_uinfo->first_gd = fst_gd;
  786. pd_uinfo->num_gd = num_gd;
  787. gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
  788. pd->src = gd_dma;
  789. /* enable gather */
  790. sa->sa_command_0.bf.gather = 1;
  791. idx = 0;
  792. src = &src[0];
  793. /* walk the sg, and setup gather array */
  794. while (nbytes) {
  795. sg = &src[idx];
  796. addr = dma_map_page(dev->core_dev->device, sg_page(sg),
  797. sg->offset, sg->length, DMA_TO_DEVICE);
  798. gd->ptr = addr;
  799. gd->ctl_len.len = sg->length;
  800. gd->ctl_len.done = 0;
  801. gd->ctl_len.ready = 1;
  802. if (sg->length >= nbytes)
  803. break;
  804. nbytes -= sg->length;
  805. gd_idx = get_next_gd(gd_idx);
  806. gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
  807. idx++;
  808. }
  809. } else {
  810. pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
  811. src->offset, src->length, DMA_TO_DEVICE);
  812. /*
  813. * Disable gather in sa command
  814. */
  815. sa->sa_command_0.bf.gather = 0;
  816. /*
  817. * Indicate gather array is not used
  818. */
  819. pd_uinfo->first_gd = 0xffffffff;
  820. pd_uinfo->num_gd = 0;
  821. }
  822. if (ctx->is_hash || sg_is_last(dst)) {
  823. /*
  824. * we know application give us dst a whole piece of memory
  825. * no need to use scatter ring.
  826. * In case of is_hash, the icv is always at end of src data.
  827. */
  828. pd_uinfo->using_sd = 0;
  829. pd_uinfo->first_sd = 0xffffffff;
  830. pd_uinfo->num_sd = 0;
  831. pd_uinfo->dest_va = dst;
  832. sa->sa_command_0.bf.scatter = 0;
  833. if (ctx->is_hash)
  834. pd->dest = virt_to_phys((void *)dst);
  835. else
  836. pd->dest = (u32)dma_map_page(dev->core_dev->device,
  837. sg_page(dst), dst->offset,
  838. dst->length, DMA_TO_DEVICE);
  839. } else {
  840. struct ce_sd *sd = NULL;
  841. u32 sd_idx = fst_sd;
  842. nbytes = datalen;
  843. sa->sa_command_0.bf.scatter = 1;
  844. pd_uinfo->using_sd = 1;
  845. pd_uinfo->dest_va = dst;
  846. pd_uinfo->first_sd = fst_sd;
  847. pd_uinfo->num_sd = num_sd;
  848. sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
  849. pd->dest = sd_dma;
  850. /* setup scatter descriptor */
  851. sd->ctl.done = 0;
  852. sd->ctl.rdy = 1;
  853. /* sd->ptr should be setup by sd_init routine*/
  854. idx = 0;
  855. if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
  856. nbytes -= PPC4XX_SD_BUFFER_SIZE;
  857. else
  858. nbytes = 0;
  859. while (nbytes) {
  860. sd_idx = get_next_sd(sd_idx);
  861. sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
  862. /* setup scatter descriptor */
  863. sd->ctl.done = 0;
  864. sd->ctl.rdy = 1;
  865. if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
  866. nbytes -= PPC4XX_SD_BUFFER_SIZE;
  867. else
  868. /*
  869. * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
  870. * which is more than nbytes, so done.
  871. */
  872. nbytes = 0;
  873. }
  874. }
  875. sa->sa_command_1.bf.hash_crypto_offset = 0;
  876. pd->pd_ctl.w = ctx->pd_ctl;
  877. pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
  878. pd_uinfo->state = PD_ENTRY_INUSE;
  879. wmb();
  880. /* write any value to push engine to read a pd */
  881. writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
  882. return -EINPROGRESS;
  883. }
  884. /**
  885. * Algorithm Registration Functions
  886. */
  887. static int crypto4xx_alg_init(struct crypto_tfm *tfm)
  888. {
  889. struct crypto_alg *alg = tfm->__crt_alg;
  890. struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
  891. struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
  892. ctx->dev = amcc_alg->dev;
  893. ctx->sa_in = NULL;
  894. ctx->sa_out = NULL;
  895. ctx->sa_in_dma_addr = 0;
  896. ctx->sa_out_dma_addr = 0;
  897. ctx->sa_len = 0;
  898. switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
  899. default:
  900. tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
  901. break;
  902. case CRYPTO_ALG_TYPE_AHASH:
  903. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  904. sizeof(struct crypto4xx_ctx));
  905. break;
  906. }
  907. return 0;
  908. }
  909. static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
  910. {
  911. struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
  912. crypto4xx_free_sa(ctx);
  913. crypto4xx_free_state_record(ctx);
  914. }
  915. int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
  916. struct crypto4xx_alg_common *crypto_alg,
  917. int array_size)
  918. {
  919. struct crypto4xx_alg *alg;
  920. int i;
  921. int rc = 0;
  922. for (i = 0; i < array_size; i++) {
  923. alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
  924. if (!alg)
  925. return -ENOMEM;
  926. alg->alg = crypto_alg[i];
  927. alg->dev = sec_dev;
  928. switch (alg->alg.type) {
  929. case CRYPTO_ALG_TYPE_AHASH:
  930. rc = crypto_register_ahash(&alg->alg.u.hash);
  931. break;
  932. default:
  933. rc = crypto_register_alg(&alg->alg.u.cipher);
  934. break;
  935. }
  936. if (rc) {
  937. list_del(&alg->entry);
  938. kfree(alg);
  939. } else {
  940. list_add_tail(&alg->entry, &sec_dev->alg_list);
  941. }
  942. }
  943. return 0;
  944. }
  945. static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
  946. {
  947. struct crypto4xx_alg *alg, *tmp;
  948. list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
  949. list_del(&alg->entry);
  950. switch (alg->alg.type) {
  951. case CRYPTO_ALG_TYPE_AHASH:
  952. crypto_unregister_ahash(&alg->alg.u.hash);
  953. break;
  954. default:
  955. crypto_unregister_alg(&alg->alg.u.cipher);
  956. }
  957. kfree(alg);
  958. }
  959. }
  960. static void crypto4xx_bh_tasklet_cb(unsigned long data)
  961. {
  962. struct device *dev = (struct device *)data;
  963. struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
  964. struct pd_uinfo *pd_uinfo;
  965. struct ce_pd *pd;
  966. u32 tail;
  967. while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
  968. tail = core_dev->dev->pdr_tail;
  969. pd_uinfo = core_dev->dev->pdr_uinfo +
  970. sizeof(struct pd_uinfo)*tail;
  971. pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
  972. if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
  973. pd->pd_ctl.bf.pe_done &&
  974. !pd->pd_ctl.bf.host_ready) {
  975. pd->pd_ctl.bf.pe_done = 0;
  976. crypto4xx_pd_done(core_dev->dev, tail);
  977. crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
  978. pd_uinfo->state = PD_ENTRY_FREE;
  979. } else {
  980. /* if tail not done, break */
  981. break;
  982. }
  983. }
  984. }
  985. /**
  986. * Top Half of isr.
  987. */
  988. static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
  989. {
  990. struct device *dev = (struct device *)data;
  991. struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
  992. if (!core_dev->dev->ce_base)
  993. return 0;
  994. writel(PPC4XX_INTERRUPT_CLR,
  995. core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
  996. tasklet_schedule(&core_dev->tasklet);
  997. return IRQ_HANDLED;
  998. }
  999. /**
  1000. * Supported Crypto Algorithms
  1001. */
  1002. struct crypto4xx_alg_common crypto4xx_alg[] = {
  1003. /* Crypto AES modes */
  1004. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
  1005. .cra_name = "cbc(aes)",
  1006. .cra_driver_name = "cbc-aes-ppc4xx",
  1007. .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
  1008. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  1009. .cra_blocksize = AES_BLOCK_SIZE,
  1010. .cra_ctxsize = sizeof(struct crypto4xx_ctx),
  1011. .cra_type = &crypto_ablkcipher_type,
  1012. .cra_init = crypto4xx_alg_init,
  1013. .cra_exit = crypto4xx_alg_exit,
  1014. .cra_module = THIS_MODULE,
  1015. .cra_u = {
  1016. .ablkcipher = {
  1017. .min_keysize = AES_MIN_KEY_SIZE,
  1018. .max_keysize = AES_MAX_KEY_SIZE,
  1019. .ivsize = AES_IV_SIZE,
  1020. .setkey = crypto4xx_setkey_aes_cbc,
  1021. .encrypt = crypto4xx_encrypt,
  1022. .decrypt = crypto4xx_decrypt,
  1023. }
  1024. }
  1025. }},
  1026. };
  1027. /**
  1028. * Module Initialization Routine
  1029. */
  1030. static int crypto4xx_probe(struct platform_device *ofdev)
  1031. {
  1032. int rc;
  1033. struct resource res;
  1034. struct device *dev = &ofdev->dev;
  1035. struct crypto4xx_core_device *core_dev;
  1036. rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
  1037. if (rc)
  1038. return -ENODEV;
  1039. if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
  1040. mtdcri(SDR0, PPC460EX_SDR0_SRST,
  1041. mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
  1042. mtdcri(SDR0, PPC460EX_SDR0_SRST,
  1043. mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
  1044. } else if (of_find_compatible_node(NULL, NULL,
  1045. "amcc,ppc405ex-crypto")) {
  1046. mtdcri(SDR0, PPC405EX_SDR0_SRST,
  1047. mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
  1048. mtdcri(SDR0, PPC405EX_SDR0_SRST,
  1049. mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
  1050. } else if (of_find_compatible_node(NULL, NULL,
  1051. "amcc,ppc460sx-crypto")) {
  1052. mtdcri(SDR0, PPC460SX_SDR0_SRST,
  1053. mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
  1054. mtdcri(SDR0, PPC460SX_SDR0_SRST,
  1055. mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
  1056. } else {
  1057. printk(KERN_ERR "Crypto Function Not supported!\n");
  1058. return -EINVAL;
  1059. }
  1060. core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
  1061. if (!core_dev)
  1062. return -ENOMEM;
  1063. dev_set_drvdata(dev, core_dev);
  1064. core_dev->ofdev = ofdev;
  1065. core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
  1066. if (!core_dev->dev)
  1067. goto err_alloc_dev;
  1068. core_dev->dev->core_dev = core_dev;
  1069. core_dev->device = dev;
  1070. spin_lock_init(&core_dev->lock);
  1071. INIT_LIST_HEAD(&core_dev->dev->alg_list);
  1072. rc = crypto4xx_build_pdr(core_dev->dev);
  1073. if (rc)
  1074. goto err_build_pdr;
  1075. rc = crypto4xx_build_gdr(core_dev->dev);
  1076. if (rc)
  1077. goto err_build_gdr;
  1078. rc = crypto4xx_build_sdr(core_dev->dev);
  1079. if (rc)
  1080. goto err_build_sdr;
  1081. /* Init tasklet for bottom half processing */
  1082. tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
  1083. (unsigned long) dev);
  1084. /* Register for Crypto isr, Crypto Engine IRQ */
  1085. core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
  1086. rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
  1087. core_dev->dev->name, dev);
  1088. if (rc)
  1089. goto err_request_irq;
  1090. core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
  1091. if (!core_dev->dev->ce_base) {
  1092. dev_err(dev, "failed to of_iomap\n");
  1093. rc = -ENOMEM;
  1094. goto err_iomap;
  1095. }
  1096. /* need to setup pdr, rdr, gdr and sdr before this */
  1097. crypto4xx_hw_init(core_dev->dev);
  1098. /* Register security algorithms with Linux CryptoAPI */
  1099. rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
  1100. ARRAY_SIZE(crypto4xx_alg));
  1101. if (rc)
  1102. goto err_start_dev;
  1103. return 0;
  1104. err_start_dev:
  1105. iounmap(core_dev->dev->ce_base);
  1106. err_iomap:
  1107. free_irq(core_dev->irq, dev);
  1108. err_request_irq:
  1109. irq_dispose_mapping(core_dev->irq);
  1110. tasklet_kill(&core_dev->tasklet);
  1111. crypto4xx_destroy_sdr(core_dev->dev);
  1112. err_build_sdr:
  1113. crypto4xx_destroy_gdr(core_dev->dev);
  1114. err_build_gdr:
  1115. crypto4xx_destroy_pdr(core_dev->dev);
  1116. err_build_pdr:
  1117. kfree(core_dev->dev);
  1118. err_alloc_dev:
  1119. kfree(core_dev);
  1120. return rc;
  1121. }
  1122. static int crypto4xx_remove(struct platform_device *ofdev)
  1123. {
  1124. struct device *dev = &ofdev->dev;
  1125. struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
  1126. free_irq(core_dev->irq, dev);
  1127. irq_dispose_mapping(core_dev->irq);
  1128. tasklet_kill(&core_dev->tasklet);
  1129. /* Un-register with Linux CryptoAPI */
  1130. crypto4xx_unregister_alg(core_dev->dev);
  1131. /* Free all allocated memory */
  1132. crypto4xx_stop_all(core_dev);
  1133. return 0;
  1134. }
  1135. static const struct of_device_id crypto4xx_match[] = {
  1136. { .compatible = "amcc,ppc4xx-crypto",},
  1137. { },
  1138. };
  1139. static struct platform_driver crypto4xx_driver = {
  1140. .driver = {
  1141. .name = "crypto4xx",
  1142. .of_match_table = crypto4xx_match,
  1143. },
  1144. .probe = crypto4xx_probe,
  1145. .remove = crypto4xx_remove,
  1146. };
  1147. module_platform_driver(crypto4xx_driver);
  1148. MODULE_LICENSE("GPL");
  1149. MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
  1150. MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");