safexcel.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/firmware.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/workqueue.h>
  21. #include <crypto/internal/aead.h>
  22. #include <crypto/internal/hash.h>
  23. #include <crypto/internal/skcipher.h>
  24. #include "safexcel.h"
  25. static u32 max_rings = EIP197_MAX_RINGS;
  26. module_param(max_rings, uint, 0644);
  27. MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
  28. static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
  29. {
  30. u32 val, htable_offset;
  31. int i;
  32. /* Enable the record cache memory access */
  33. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  34. val &= ~EIP197_TRC_ENABLE_MASK;
  35. val |= EIP197_TRC_ENABLE_0;
  36. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  37. /* Clear all ECC errors */
  38. writel(0, priv->base + EIP197_TRC_ECCCTRL);
  39. /*
  40. * Make sure the cache memory is accessible by taking record cache into
  41. * reset.
  42. */
  43. val = readl(priv->base + EIP197_TRC_PARAMS);
  44. val |= EIP197_TRC_PARAMS_SW_RESET;
  45. val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
  46. writel(val, priv->base + EIP197_TRC_PARAMS);
  47. /* Clear all records */
  48. for (i = 0; i < EIP197_CS_RC_MAX; i++) {
  49. u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
  50. writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
  51. EIP197_CS_RC_PREV(EIP197_RC_NULL),
  52. priv->base + offset);
  53. val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
  54. if (i == 0)
  55. val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
  56. else if (i == EIP197_CS_RC_MAX - 1)
  57. val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
  58. writel(val, priv->base + offset + sizeof(u32));
  59. }
  60. /* Clear the hash table entries */
  61. htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
  62. for (i = 0; i < 64; i++)
  63. writel(GENMASK(29, 0),
  64. priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
  65. /* Disable the record cache memory access */
  66. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  67. val &= ~EIP197_TRC_ENABLE_MASK;
  68. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  69. /* Write head and tail pointers of the record free chain */
  70. val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
  71. EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
  72. writel(val, priv->base + EIP197_TRC_FREECHAIN);
  73. /* Configure the record cache #1 */
  74. val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
  75. EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
  76. writel(val, priv->base + EIP197_TRC_PARAMS2);
  77. /* Configure the record cache #2 */
  78. val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
  79. EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
  80. EIP197_TRC_PARAMS_HTABLE_SZ(2);
  81. writel(val, priv->base + EIP197_TRC_PARAMS);
  82. }
  83. static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
  84. const struct firmware *fw, u32 ctrl,
  85. u32 prog_en)
  86. {
  87. const u32 *data = (const u32 *)fw->data;
  88. u32 val;
  89. int i;
  90. /* Reset the engine to make its program memory accessible */
  91. writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
  92. EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
  93. EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
  94. EIP197_PE(priv) + ctrl);
  95. /* Enable access to the program memory */
  96. writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
  97. /* Write the firmware */
  98. for (i = 0; i < fw->size / sizeof(u32); i++)
  99. writel(be32_to_cpu(data[i]),
  100. priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
  101. /* Disable access to the program memory */
  102. writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
  103. /* Release engine from reset */
  104. val = readl(EIP197_PE(priv) + ctrl);
  105. val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
  106. writel(val, EIP197_PE(priv) + ctrl);
  107. }
  108. static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
  109. {
  110. const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
  111. const struct firmware *fw[FW_NB];
  112. int i, j, ret = 0;
  113. u32 val;
  114. for (i = 0; i < FW_NB; i++) {
  115. ret = request_firmware(&fw[i], fw_name[i], priv->dev);
  116. if (ret) {
  117. dev_err(priv->dev,
  118. "Failed to request firmware %s (%d)\n",
  119. fw_name[i], ret);
  120. goto release_fw;
  121. }
  122. }
  123. /* Clear the scratchpad memory */
  124. val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
  125. val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
  126. EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
  127. EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
  128. EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
  129. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
  130. memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
  131. EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
  132. eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
  133. EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
  134. eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
  135. EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
  136. release_fw:
  137. for (j = 0; j < i; j++)
  138. release_firmware(fw[j]);
  139. return ret;
  140. }
  141. static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
  142. {
  143. u32 hdw, cd_size_rnd, val;
  144. int i;
  145. hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  146. hdw &= GENMASK(27, 25);
  147. hdw >>= 25;
  148. cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
  149. for (i = 0; i < priv->config.rings; i++) {
  150. /* ring base address */
  151. writel(lower_32_bits(priv->ring[i].cdr.base_dma),
  152. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  153. writel(upper_32_bits(priv->ring[i].cdr.base_dma),
  154. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  155. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
  156. priv->config.cd_size,
  157. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  158. writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
  159. (EIP197_FETCH_COUNT * priv->config.cd_offset),
  160. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  161. /* Configure DMA tx control */
  162. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  163. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  164. writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  165. /* clear any pending interrupt */
  166. writel(GENMASK(5, 0),
  167. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  168. }
  169. return 0;
  170. }
  171. static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
  172. {
  173. u32 hdw, rd_size_rnd, val;
  174. int i;
  175. hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  176. hdw &= GENMASK(27, 25);
  177. hdw >>= 25;
  178. rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
  179. for (i = 0; i < priv->config.rings; i++) {
  180. /* ring base address */
  181. writel(lower_32_bits(priv->ring[i].rdr.base_dma),
  182. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  183. writel(upper_32_bits(priv->ring[i].rdr.base_dma),
  184. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  185. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
  186. priv->config.rd_size,
  187. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  188. writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
  189. (EIP197_FETCH_COUNT * priv->config.rd_offset),
  190. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  191. /* Configure DMA tx control */
  192. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  193. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  194. val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
  195. writel(val,
  196. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  197. /* clear any pending interrupt */
  198. writel(GENMASK(7, 0),
  199. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  200. /* enable ring interrupt */
  201. val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  202. val |= EIP197_RDR_IRQ(i);
  203. writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  204. }
  205. return 0;
  206. }
  207. static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
  208. {
  209. u32 version, val;
  210. int i, ret;
  211. /* Determine endianess and configure byte swap */
  212. version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
  213. val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  214. if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
  215. val |= EIP197_MST_CTRL_BYTE_SWAP;
  216. else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
  217. val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
  218. writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  219. /* Configure wr/rd cache values */
  220. writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
  221. EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
  222. EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
  223. /* Interrupts reset */
  224. /* Disable all global interrupts */
  225. writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
  226. /* Clear any pending interrupt */
  227. writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  228. /* Data Fetch Engine configuration */
  229. /* Reset all DFE threads */
  230. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  231. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
  232. if (priv->version == EIP197) {
  233. /* Reset HIA input interface arbiter */
  234. writel(EIP197_HIA_RA_PE_CTRL_RESET,
  235. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
  236. }
  237. /* DMA transfer size to use */
  238. val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
  239. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
  240. val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
  241. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
  242. val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
  243. writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
  244. /* Leave the DFE threads reset state */
  245. writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
  246. /* Configure the procesing engine thresholds */
  247. writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
  248. EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
  249. writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
  250. EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
  251. if (priv->version == EIP197) {
  252. /* enable HIA input interface arbiter and rings */
  253. writel(EIP197_HIA_RA_PE_CTRL_EN |
  254. GENMASK(priv->config.rings - 1, 0),
  255. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
  256. }
  257. /* Data Store Engine configuration */
  258. /* Reset all DSE threads */
  259. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  260. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
  261. /* Wait for all DSE threads to complete */
  262. while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
  263. GENMASK(15, 12)) != GENMASK(15, 12))
  264. ;
  265. /* DMA transfer size to use */
  266. val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
  267. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
  268. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
  269. val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
  270. /* FIXME: instability issues can occur for EIP97 but disabling it impact
  271. * performances.
  272. */
  273. if (priv->version == EIP197)
  274. val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
  275. writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
  276. /* Leave the DSE threads reset state */
  277. writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
  278. /* Configure the procesing engine thresholds */
  279. writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
  280. EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
  281. /* Processing Engine configuration */
  282. /* H/W capabilities selection */
  283. val = EIP197_FUNCTION_RSVD;
  284. val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
  285. val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
  286. val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
  287. val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
  288. val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
  289. writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
  290. /* Command Descriptor Rings prepare */
  291. for (i = 0; i < priv->config.rings; i++) {
  292. /* Clear interrupts for this ring */
  293. writel(GENMASK(31, 0),
  294. EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
  295. /* Disable external triggering */
  296. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  297. /* Clear the pending prepared counter */
  298. writel(EIP197_xDR_PREP_CLR_COUNT,
  299. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  300. /* Clear the pending processed counter */
  301. writel(EIP197_xDR_PROC_CLR_COUNT,
  302. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  303. writel(0,
  304. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  305. writel(0,
  306. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  307. writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
  308. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  309. }
  310. /* Result Descriptor Ring prepare */
  311. for (i = 0; i < priv->config.rings; i++) {
  312. /* Disable external triggering*/
  313. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  314. /* Clear the pending prepared counter */
  315. writel(EIP197_xDR_PREP_CLR_COUNT,
  316. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  317. /* Clear the pending processed counter */
  318. writel(EIP197_xDR_PROC_CLR_COUNT,
  319. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  320. writel(0,
  321. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  322. writel(0,
  323. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  324. /* Ring size */
  325. writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
  326. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  327. }
  328. /* Enable command descriptor rings */
  329. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  330. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
  331. /* Enable result descriptor rings */
  332. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  333. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
  334. /* Clear any HIA interrupt */
  335. writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  336. if (priv->version == EIP197) {
  337. eip197_trc_cache_init(priv);
  338. ret = eip197_load_firmwares(priv);
  339. if (ret)
  340. return ret;
  341. }
  342. safexcel_hw_setup_cdesc_rings(priv);
  343. safexcel_hw_setup_rdesc_rings(priv);
  344. return 0;
  345. }
  346. /* Called with ring's lock taken */
  347. static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
  348. int ring)
  349. {
  350. int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
  351. if (!coal)
  352. return;
  353. /* Configure when we want an interrupt */
  354. writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
  355. EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
  356. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
  357. }
  358. void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
  359. {
  360. struct crypto_async_request *req, *backlog;
  361. struct safexcel_context *ctx;
  362. struct safexcel_request *request;
  363. int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
  364. /* If a request wasn't properly dequeued because of a lack of resources,
  365. * proceeded it first,
  366. */
  367. req = priv->ring[ring].req;
  368. backlog = priv->ring[ring].backlog;
  369. if (req)
  370. goto handle_req;
  371. while (true) {
  372. spin_lock_bh(&priv->ring[ring].queue_lock);
  373. backlog = crypto_get_backlog(&priv->ring[ring].queue);
  374. req = crypto_dequeue_request(&priv->ring[ring].queue);
  375. spin_unlock_bh(&priv->ring[ring].queue_lock);
  376. if (!req) {
  377. priv->ring[ring].req = NULL;
  378. priv->ring[ring].backlog = NULL;
  379. goto finalize;
  380. }
  381. handle_req:
  382. request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
  383. if (!request)
  384. goto request_failed;
  385. ctx = crypto_tfm_ctx(req->tfm);
  386. ret = ctx->send(req, ring, request, &commands, &results);
  387. if (ret) {
  388. kfree(request);
  389. goto request_failed;
  390. }
  391. if (backlog)
  392. backlog->complete(backlog, -EINPROGRESS);
  393. /* In case the send() helper did not issue any command to push
  394. * to the engine because the input data was cached, continue to
  395. * dequeue other requests as this is valid and not an error.
  396. */
  397. if (!commands && !results) {
  398. kfree(request);
  399. continue;
  400. }
  401. spin_lock_bh(&priv->ring[ring].egress_lock);
  402. list_add_tail(&request->list, &priv->ring[ring].list);
  403. spin_unlock_bh(&priv->ring[ring].egress_lock);
  404. cdesc += commands;
  405. rdesc += results;
  406. nreq++;
  407. }
  408. request_failed:
  409. /* Not enough resources to handle all the requests. Bail out and save
  410. * the request and the backlog for the next dequeue call (per-ring).
  411. */
  412. priv->ring[ring].req = req;
  413. priv->ring[ring].backlog = backlog;
  414. finalize:
  415. if (!nreq)
  416. return;
  417. spin_lock_bh(&priv->ring[ring].egress_lock);
  418. priv->ring[ring].requests += nreq;
  419. if (!priv->ring[ring].busy) {
  420. safexcel_try_push_requests(priv, ring);
  421. priv->ring[ring].busy = true;
  422. }
  423. spin_unlock_bh(&priv->ring[ring].egress_lock);
  424. /* let the RDR know we have pending descriptors */
  425. writel((rdesc * priv->config.rd_offset) << 2,
  426. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  427. /* let the CDR know we have pending descriptors */
  428. writel((cdesc * priv->config.cd_offset) << 2,
  429. EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  430. }
  431. inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
  432. struct safexcel_result_desc *rdesc)
  433. {
  434. if (likely(!rdesc->result_data.error_code))
  435. return 0;
  436. if (rdesc->result_data.error_code & 0x407f) {
  437. /* Fatal error (bits 0-7, 14) */
  438. dev_err(priv->dev,
  439. "cipher: result: result descriptor error (%d)\n",
  440. rdesc->result_data.error_code);
  441. return -EIO;
  442. } else if (rdesc->result_data.error_code == BIT(9)) {
  443. /* Authentication failed */
  444. return -EBADMSG;
  445. }
  446. /* All other non-fatal errors */
  447. return -EINVAL;
  448. }
  449. void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
  450. {
  451. struct safexcel_command_desc *cdesc;
  452. /* Acknowledge the command descriptors */
  453. do {
  454. cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
  455. if (IS_ERR(cdesc)) {
  456. dev_err(priv->dev,
  457. "Could not retrieve the command descriptor\n");
  458. return;
  459. }
  460. } while (!cdesc->last_seg);
  461. }
  462. void safexcel_inv_complete(struct crypto_async_request *req, int error)
  463. {
  464. struct safexcel_inv_result *result = req->data;
  465. if (error == -EINPROGRESS)
  466. return;
  467. result->error = error;
  468. complete(&result->completion);
  469. }
  470. int safexcel_invalidate_cache(struct crypto_async_request *async,
  471. struct safexcel_crypto_priv *priv,
  472. dma_addr_t ctxr_dma, int ring,
  473. struct safexcel_request *request)
  474. {
  475. struct safexcel_command_desc *cdesc;
  476. struct safexcel_result_desc *rdesc;
  477. int ret = 0;
  478. spin_lock_bh(&priv->ring[ring].egress_lock);
  479. /* Prepare command descriptor */
  480. cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
  481. if (IS_ERR(cdesc)) {
  482. ret = PTR_ERR(cdesc);
  483. goto unlock;
  484. }
  485. cdesc->control_data.type = EIP197_TYPE_EXTENDED;
  486. cdesc->control_data.options = 0;
  487. cdesc->control_data.refresh = 0;
  488. cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
  489. /* Prepare result descriptor */
  490. rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
  491. if (IS_ERR(rdesc)) {
  492. ret = PTR_ERR(rdesc);
  493. goto cdesc_rollback;
  494. }
  495. request->req = async;
  496. goto unlock;
  497. cdesc_rollback:
  498. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  499. unlock:
  500. spin_unlock_bh(&priv->ring[ring].egress_lock);
  501. return ret;
  502. }
  503. static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
  504. int ring)
  505. {
  506. struct safexcel_request *sreq;
  507. struct safexcel_context *ctx;
  508. int ret, i, nreq, ndesc, tot_descs, handled = 0;
  509. bool should_complete;
  510. handle_results:
  511. tot_descs = 0;
  512. nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  513. nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
  514. nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
  515. if (!nreq)
  516. goto requests_left;
  517. for (i = 0; i < nreq; i++) {
  518. spin_lock_bh(&priv->ring[ring].egress_lock);
  519. sreq = list_first_entry(&priv->ring[ring].list,
  520. struct safexcel_request, list);
  521. list_del(&sreq->list);
  522. spin_unlock_bh(&priv->ring[ring].egress_lock);
  523. ctx = crypto_tfm_ctx(sreq->req->tfm);
  524. ndesc = ctx->handle_result(priv, ring, sreq->req,
  525. &should_complete, &ret);
  526. if (ndesc < 0) {
  527. kfree(sreq);
  528. dev_err(priv->dev, "failed to handle result (%d)", ndesc);
  529. goto acknowledge;
  530. }
  531. if (should_complete) {
  532. local_bh_disable();
  533. sreq->req->complete(sreq->req, ret);
  534. local_bh_enable();
  535. }
  536. kfree(sreq);
  537. tot_descs += ndesc;
  538. handled++;
  539. }
  540. acknowledge:
  541. if (i) {
  542. writel(EIP197_xDR_PROC_xD_PKT(i) |
  543. EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
  544. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  545. }
  546. /* If the number of requests overflowed the counter, try to proceed more
  547. * requests.
  548. */
  549. if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
  550. goto handle_results;
  551. requests_left:
  552. spin_lock_bh(&priv->ring[ring].egress_lock);
  553. priv->ring[ring].requests -= handled;
  554. safexcel_try_push_requests(priv, ring);
  555. if (!priv->ring[ring].requests)
  556. priv->ring[ring].busy = false;
  557. spin_unlock_bh(&priv->ring[ring].egress_lock);
  558. }
  559. static void safexcel_dequeue_work(struct work_struct *work)
  560. {
  561. struct safexcel_work_data *data =
  562. container_of(work, struct safexcel_work_data, work);
  563. safexcel_dequeue(data->priv, data->ring);
  564. }
  565. struct safexcel_ring_irq_data {
  566. struct safexcel_crypto_priv *priv;
  567. int ring;
  568. };
  569. static irqreturn_t safexcel_irq_ring(int irq, void *data)
  570. {
  571. struct safexcel_ring_irq_data *irq_data = data;
  572. struct safexcel_crypto_priv *priv = irq_data->priv;
  573. int ring = irq_data->ring, rc = IRQ_NONE;
  574. u32 status, stat;
  575. status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
  576. if (!status)
  577. return rc;
  578. /* RDR interrupts */
  579. if (status & EIP197_RDR_IRQ(ring)) {
  580. stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  581. if (unlikely(stat & EIP197_xDR_ERR)) {
  582. /*
  583. * Fatal error, the RDR is unusable and must be
  584. * reinitialized. This should not happen under
  585. * normal circumstances.
  586. */
  587. dev_err(priv->dev, "RDR: fatal error.");
  588. } else if (likely(stat & EIP197_xDR_THRESH)) {
  589. rc = IRQ_WAKE_THREAD;
  590. }
  591. /* ACK the interrupts */
  592. writel(stat & 0xff,
  593. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  594. }
  595. /* ACK the interrupts */
  596. writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
  597. return rc;
  598. }
  599. static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
  600. {
  601. struct safexcel_ring_irq_data *irq_data = data;
  602. struct safexcel_crypto_priv *priv = irq_data->priv;
  603. int ring = irq_data->ring;
  604. safexcel_handle_result_descriptor(priv, ring);
  605. queue_work(priv->ring[ring].workqueue,
  606. &priv->ring[ring].work_data.work);
  607. return IRQ_HANDLED;
  608. }
  609. static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
  610. irq_handler_t handler,
  611. irq_handler_t threaded_handler,
  612. struct safexcel_ring_irq_data *ring_irq_priv)
  613. {
  614. int ret, irq = platform_get_irq_byname(pdev, name);
  615. if (irq < 0) {
  616. dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
  617. return irq;
  618. }
  619. ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
  620. threaded_handler, IRQF_ONESHOT,
  621. dev_name(&pdev->dev), ring_irq_priv);
  622. if (ret) {
  623. dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
  624. return ret;
  625. }
  626. return irq;
  627. }
  628. static struct safexcel_alg_template *safexcel_algs[] = {
  629. &safexcel_alg_ecb_aes,
  630. &safexcel_alg_cbc_aes,
  631. &safexcel_alg_sha1,
  632. &safexcel_alg_sha224,
  633. &safexcel_alg_sha256,
  634. &safexcel_alg_hmac_sha1,
  635. &safexcel_alg_hmac_sha224,
  636. &safexcel_alg_hmac_sha256,
  637. &safexcel_alg_authenc_hmac_sha256_cbc_aes,
  638. };
  639. static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
  640. {
  641. int i, j, ret = 0;
  642. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  643. safexcel_algs[i]->priv = priv;
  644. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  645. ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
  646. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  647. ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
  648. else
  649. ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
  650. if (ret)
  651. goto fail;
  652. }
  653. return 0;
  654. fail:
  655. for (j = 0; j < i; j++) {
  656. if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  657. crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
  658. else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
  659. crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
  660. else
  661. crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
  662. }
  663. return ret;
  664. }
  665. static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
  666. {
  667. int i;
  668. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  669. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  670. crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
  671. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  672. crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
  673. else
  674. crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
  675. }
  676. }
  677. static void safexcel_configure(struct safexcel_crypto_priv *priv)
  678. {
  679. u32 val, mask;
  680. val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  681. val = (val & GENMASK(27, 25)) >> 25;
  682. mask = BIT(val) - 1;
  683. val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  684. priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
  685. priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
  686. priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
  687. priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
  688. priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
  689. }
  690. static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
  691. {
  692. struct safexcel_register_offsets *offsets = &priv->offsets;
  693. if (priv->version == EIP197) {
  694. offsets->hia_aic = EIP197_HIA_AIC_BASE;
  695. offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
  696. offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
  697. offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
  698. offsets->hia_dfe = EIP197_HIA_DFE_BASE;
  699. offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
  700. offsets->hia_dse = EIP197_HIA_DSE_BASE;
  701. offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
  702. offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
  703. offsets->pe = EIP197_PE_BASE;
  704. } else {
  705. offsets->hia_aic = EIP97_HIA_AIC_BASE;
  706. offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
  707. offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
  708. offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
  709. offsets->hia_dfe = EIP97_HIA_DFE_BASE;
  710. offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
  711. offsets->hia_dse = EIP97_HIA_DSE_BASE;
  712. offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
  713. offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
  714. offsets->pe = EIP97_PE_BASE;
  715. }
  716. }
  717. static int safexcel_probe(struct platform_device *pdev)
  718. {
  719. struct device *dev = &pdev->dev;
  720. struct resource *res;
  721. struct safexcel_crypto_priv *priv;
  722. int i, ret;
  723. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  724. if (!priv)
  725. return -ENOMEM;
  726. priv->dev = dev;
  727. priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
  728. safexcel_init_register_offsets(priv);
  729. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  730. priv->base = devm_ioremap_resource(dev, res);
  731. if (IS_ERR(priv->base)) {
  732. dev_err(dev, "failed to get resource\n");
  733. return PTR_ERR(priv->base);
  734. }
  735. priv->clk = devm_clk_get(&pdev->dev, NULL);
  736. ret = PTR_ERR_OR_ZERO(priv->clk);
  737. /* The clock isn't mandatory */
  738. if (ret != -ENOENT) {
  739. if (ret)
  740. return ret;
  741. ret = clk_prepare_enable(priv->clk);
  742. if (ret) {
  743. dev_err(dev, "unable to enable clk (%d)\n", ret);
  744. return ret;
  745. }
  746. }
  747. priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
  748. ret = PTR_ERR_OR_ZERO(priv->reg_clk);
  749. /* The clock isn't mandatory */
  750. if (ret != -ENOENT) {
  751. if (ret)
  752. goto err_core_clk;
  753. ret = clk_prepare_enable(priv->reg_clk);
  754. if (ret) {
  755. dev_err(dev, "unable to enable reg clk (%d)\n", ret);
  756. goto err_core_clk;
  757. }
  758. }
  759. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  760. if (ret)
  761. goto err_reg_clk;
  762. priv->context_pool = dmam_pool_create("safexcel-context", dev,
  763. sizeof(struct safexcel_context_record),
  764. 1, 0);
  765. if (!priv->context_pool) {
  766. ret = -ENOMEM;
  767. goto err_reg_clk;
  768. }
  769. safexcel_configure(priv);
  770. for (i = 0; i < priv->config.rings; i++) {
  771. char irq_name[6] = {0}; /* "ringX\0" */
  772. char wq_name[9] = {0}; /* "wq_ringX\0" */
  773. int irq;
  774. struct safexcel_ring_irq_data *ring_irq;
  775. ret = safexcel_init_ring_descriptors(priv,
  776. &priv->ring[i].cdr,
  777. &priv->ring[i].rdr);
  778. if (ret)
  779. goto err_reg_clk;
  780. ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
  781. if (!ring_irq) {
  782. ret = -ENOMEM;
  783. goto err_reg_clk;
  784. }
  785. ring_irq->priv = priv;
  786. ring_irq->ring = i;
  787. snprintf(irq_name, 6, "ring%d", i);
  788. irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
  789. safexcel_irq_ring_thread,
  790. ring_irq);
  791. if (irq < 0) {
  792. ret = irq;
  793. goto err_reg_clk;
  794. }
  795. priv->ring[i].work_data.priv = priv;
  796. priv->ring[i].work_data.ring = i;
  797. INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
  798. snprintf(wq_name, 9, "wq_ring%d", i);
  799. priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
  800. if (!priv->ring[i].workqueue) {
  801. ret = -ENOMEM;
  802. goto err_reg_clk;
  803. }
  804. priv->ring[i].requests = 0;
  805. priv->ring[i].busy = false;
  806. crypto_init_queue(&priv->ring[i].queue,
  807. EIP197_DEFAULT_RING_SIZE);
  808. INIT_LIST_HEAD(&priv->ring[i].list);
  809. spin_lock_init(&priv->ring[i].lock);
  810. spin_lock_init(&priv->ring[i].egress_lock);
  811. spin_lock_init(&priv->ring[i].queue_lock);
  812. }
  813. platform_set_drvdata(pdev, priv);
  814. atomic_set(&priv->ring_used, 0);
  815. ret = safexcel_hw_init(priv);
  816. if (ret) {
  817. dev_err(dev, "EIP h/w init failed (%d)\n", ret);
  818. goto err_reg_clk;
  819. }
  820. ret = safexcel_register_algorithms(priv);
  821. if (ret) {
  822. dev_err(dev, "Failed to register algorithms (%d)\n", ret);
  823. goto err_reg_clk;
  824. }
  825. return 0;
  826. err_reg_clk:
  827. clk_disable_unprepare(priv->reg_clk);
  828. err_core_clk:
  829. clk_disable_unprepare(priv->clk);
  830. return ret;
  831. }
  832. static int safexcel_remove(struct platform_device *pdev)
  833. {
  834. struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
  835. int i;
  836. safexcel_unregister_algorithms(priv);
  837. clk_disable_unprepare(priv->clk);
  838. for (i = 0; i < priv->config.rings; i++)
  839. destroy_workqueue(priv->ring[i].workqueue);
  840. return 0;
  841. }
  842. static const struct of_device_id safexcel_of_match_table[] = {
  843. {
  844. .compatible = "inside-secure,safexcel-eip97",
  845. .data = (void *)EIP97,
  846. },
  847. {
  848. .compatible = "inside-secure,safexcel-eip197",
  849. .data = (void *)EIP197,
  850. },
  851. {},
  852. };
  853. static struct platform_driver crypto_safexcel = {
  854. .probe = safexcel_probe,
  855. .remove = safexcel_remove,
  856. .driver = {
  857. .name = "crypto-safexcel",
  858. .of_match_table = safexcel_of_match_table,
  859. },
  860. };
  861. module_platform_driver(crypto_safexcel);
  862. MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
  863. MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
  864. MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
  865. MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
  866. MODULE_LICENSE("GPL v2");