safexcel.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/firmware.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/workqueue.h>
  21. #include <crypto/internal/aead.h>
  22. #include <crypto/internal/hash.h>
  23. #include <crypto/internal/skcipher.h>
  24. #include "safexcel.h"
  25. static u32 max_rings = EIP197_MAX_RINGS;
  26. module_param(max_rings, uint, 0644);
  27. MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
  28. static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
  29. {
  30. u32 val, htable_offset;
  31. int i;
  32. /* Enable the record cache memory access */
  33. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  34. val &= ~EIP197_TRC_ENABLE_MASK;
  35. val |= EIP197_TRC_ENABLE_0;
  36. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  37. /* Clear all ECC errors */
  38. writel(0, priv->base + EIP197_TRC_ECCCTRL);
  39. /*
  40. * Make sure the cache memory is accessible by taking record cache into
  41. * reset.
  42. */
  43. val = readl(priv->base + EIP197_TRC_PARAMS);
  44. val |= EIP197_TRC_PARAMS_SW_RESET;
  45. val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
  46. writel(val, priv->base + EIP197_TRC_PARAMS);
  47. /* Clear all records */
  48. for (i = 0; i < EIP197_CS_RC_MAX; i++) {
  49. u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
  50. writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
  51. EIP197_CS_RC_PREV(EIP197_RC_NULL),
  52. priv->base + offset);
  53. val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
  54. if (i == 0)
  55. val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
  56. else if (i == EIP197_CS_RC_MAX - 1)
  57. val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
  58. writel(val, priv->base + offset + sizeof(u32));
  59. }
  60. /* Clear the hash table entries */
  61. htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
  62. for (i = 0; i < 64; i++)
  63. writel(GENMASK(29, 0),
  64. priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
  65. /* Disable the record cache memory access */
  66. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  67. val &= ~EIP197_TRC_ENABLE_MASK;
  68. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  69. /* Write head and tail pointers of the record free chain */
  70. val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
  71. EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
  72. writel(val, priv->base + EIP197_TRC_FREECHAIN);
  73. /* Configure the record cache #1 */
  74. val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
  75. EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
  76. writel(val, priv->base + EIP197_TRC_PARAMS2);
  77. /* Configure the record cache #2 */
  78. val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
  79. EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
  80. EIP197_TRC_PARAMS_HTABLE_SZ(2);
  81. writel(val, priv->base + EIP197_TRC_PARAMS);
  82. }
  83. static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
  84. const struct firmware *fw, u32 ctrl,
  85. u32 prog_en)
  86. {
  87. const u32 *data = (const u32 *)fw->data;
  88. u32 val;
  89. int i;
  90. /* Reset the engine to make its program memory accessible */
  91. writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
  92. EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
  93. EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
  94. EIP197_PE(priv) + ctrl);
  95. /* Enable access to the program memory */
  96. writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
  97. /* Write the firmware */
  98. for (i = 0; i < fw->size / sizeof(u32); i++)
  99. writel(be32_to_cpu(data[i]),
  100. priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
  101. /* Disable access to the program memory */
  102. writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
  103. /* Release engine from reset */
  104. val = readl(EIP197_PE(priv) + ctrl);
  105. val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
  106. writel(val, EIP197_PE(priv) + ctrl);
  107. }
  108. static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
  109. {
  110. const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
  111. const struct firmware *fw[FW_NB];
  112. int i, j, ret = 0;
  113. u32 val;
  114. for (i = 0; i < FW_NB; i++) {
  115. ret = request_firmware(&fw[i], fw_name[i], priv->dev);
  116. if (ret) {
  117. dev_err(priv->dev,
  118. "Failed to request firmware %s (%d)\n",
  119. fw_name[i], ret);
  120. goto release_fw;
  121. }
  122. }
  123. /* Clear the scratchpad memory */
  124. val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
  125. val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
  126. EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
  127. EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
  128. EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
  129. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
  130. memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
  131. EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
  132. eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
  133. EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
  134. eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
  135. EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
  136. release_fw:
  137. for (j = 0; j < i; j++)
  138. release_firmware(fw[j]);
  139. return ret;
  140. }
  141. static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
  142. {
  143. u32 hdw, cd_size_rnd, val;
  144. int i;
  145. hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  146. hdw &= GENMASK(27, 25);
  147. hdw >>= 25;
  148. cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
  149. for (i = 0; i < priv->config.rings; i++) {
  150. /* ring base address */
  151. writel(lower_32_bits(priv->ring[i].cdr.base_dma),
  152. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  153. writel(upper_32_bits(priv->ring[i].cdr.base_dma),
  154. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  155. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
  156. priv->config.cd_size,
  157. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  158. writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
  159. (EIP197_FETCH_COUNT * priv->config.cd_offset),
  160. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  161. /* Configure DMA tx control */
  162. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  163. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  164. writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  165. /* clear any pending interrupt */
  166. writel(GENMASK(5, 0),
  167. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  168. }
  169. return 0;
  170. }
  171. static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
  172. {
  173. u32 hdw, rd_size_rnd, val;
  174. int i;
  175. hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  176. hdw &= GENMASK(27, 25);
  177. hdw >>= 25;
  178. rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
  179. for (i = 0; i < priv->config.rings; i++) {
  180. /* ring base address */
  181. writel(lower_32_bits(priv->ring[i].rdr.base_dma),
  182. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  183. writel(upper_32_bits(priv->ring[i].rdr.base_dma),
  184. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  185. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
  186. priv->config.rd_size,
  187. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  188. writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
  189. (EIP197_FETCH_COUNT * priv->config.rd_offset),
  190. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  191. /* Configure DMA tx control */
  192. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  193. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  194. val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
  195. writel(val,
  196. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  197. /* clear any pending interrupt */
  198. writel(GENMASK(7, 0),
  199. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  200. /* enable ring interrupt */
  201. val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  202. val |= EIP197_RDR_IRQ(i);
  203. writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  204. }
  205. return 0;
  206. }
  207. static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
  208. {
  209. u32 version, val;
  210. int i, ret;
  211. /* Determine endianess and configure byte swap */
  212. version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
  213. val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  214. if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
  215. val |= EIP197_MST_CTRL_BYTE_SWAP;
  216. else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
  217. val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
  218. writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  219. /* Configure wr/rd cache values */
  220. writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
  221. EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
  222. EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
  223. /* Interrupts reset */
  224. /* Disable all global interrupts */
  225. writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
  226. /* Clear any pending interrupt */
  227. writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  228. /* Data Fetch Engine configuration */
  229. /* Reset all DFE threads */
  230. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  231. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
  232. if (priv->version == EIP197) {
  233. /* Reset HIA input interface arbiter */
  234. writel(EIP197_HIA_RA_PE_CTRL_RESET,
  235. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
  236. }
  237. /* DMA transfer size to use */
  238. val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
  239. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
  240. val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
  241. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
  242. val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
  243. writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
  244. /* Leave the DFE threads reset state */
  245. writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
  246. /* Configure the procesing engine thresholds */
  247. writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
  248. EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
  249. writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
  250. EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
  251. if (priv->version == EIP197) {
  252. /* enable HIA input interface arbiter and rings */
  253. writel(EIP197_HIA_RA_PE_CTRL_EN |
  254. GENMASK(priv->config.rings - 1, 0),
  255. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
  256. }
  257. /* Data Store Engine configuration */
  258. /* Reset all DSE threads */
  259. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  260. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
  261. /* Wait for all DSE threads to complete */
  262. while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
  263. GENMASK(15, 12)) != GENMASK(15, 12))
  264. ;
  265. /* DMA transfer size to use */
  266. val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
  267. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
  268. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
  269. val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
  270. /* FIXME: instability issues can occur for EIP97 but disabling it impact
  271. * performances.
  272. */
  273. if (priv->version == EIP197)
  274. val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
  275. writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
  276. /* Leave the DSE threads reset state */
  277. writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
  278. /* Configure the procesing engine thresholds */
  279. writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
  280. EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
  281. /* Processing Engine configuration */
  282. /* H/W capabilities selection */
  283. val = EIP197_FUNCTION_RSVD;
  284. val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
  285. val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
  286. val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
  287. val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
  288. val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
  289. writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
  290. /* Command Descriptor Rings prepare */
  291. for (i = 0; i < priv->config.rings; i++) {
  292. /* Clear interrupts for this ring */
  293. writel(GENMASK(31, 0),
  294. EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
  295. /* Disable external triggering */
  296. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  297. /* Clear the pending prepared counter */
  298. writel(EIP197_xDR_PREP_CLR_COUNT,
  299. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  300. /* Clear the pending processed counter */
  301. writel(EIP197_xDR_PROC_CLR_COUNT,
  302. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  303. writel(0,
  304. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  305. writel(0,
  306. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  307. writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
  308. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  309. }
  310. /* Result Descriptor Ring prepare */
  311. for (i = 0; i < priv->config.rings; i++) {
  312. /* Disable external triggering*/
  313. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  314. /* Clear the pending prepared counter */
  315. writel(EIP197_xDR_PREP_CLR_COUNT,
  316. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  317. /* Clear the pending processed counter */
  318. writel(EIP197_xDR_PROC_CLR_COUNT,
  319. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  320. writel(0,
  321. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  322. writel(0,
  323. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  324. /* Ring size */
  325. writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
  326. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  327. }
  328. /* Enable command descriptor rings */
  329. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  330. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
  331. /* Enable result descriptor rings */
  332. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  333. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
  334. /* Clear any HIA interrupt */
  335. writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  336. if (priv->version == EIP197) {
  337. eip197_trc_cache_init(priv);
  338. ret = eip197_load_firmwares(priv);
  339. if (ret)
  340. return ret;
  341. }
  342. safexcel_hw_setup_cdesc_rings(priv);
  343. safexcel_hw_setup_rdesc_rings(priv);
  344. return 0;
  345. }
  346. /* Called with ring's lock taken */
  347. static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
  348. int ring)
  349. {
  350. int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
  351. if (!coal)
  352. return;
  353. /* Configure when we want an interrupt */
  354. writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
  355. EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
  356. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
  357. }
  358. void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
  359. {
  360. struct crypto_async_request *req, *backlog;
  361. struct safexcel_context *ctx;
  362. struct safexcel_request *request;
  363. int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
  364. /* If a request wasn't properly dequeued because of a lack of resources,
  365. * proceeded it first,
  366. */
  367. req = priv->ring[ring].req;
  368. backlog = priv->ring[ring].backlog;
  369. if (req)
  370. goto handle_req;
  371. while (true) {
  372. spin_lock_bh(&priv->ring[ring].queue_lock);
  373. backlog = crypto_get_backlog(&priv->ring[ring].queue);
  374. req = crypto_dequeue_request(&priv->ring[ring].queue);
  375. spin_unlock_bh(&priv->ring[ring].queue_lock);
  376. if (!req) {
  377. priv->ring[ring].req = NULL;
  378. priv->ring[ring].backlog = NULL;
  379. goto finalize;
  380. }
  381. handle_req:
  382. request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
  383. if (!request)
  384. goto request_failed;
  385. ctx = crypto_tfm_ctx(req->tfm);
  386. ret = ctx->send(req, ring, request, &commands, &results);
  387. if (ret) {
  388. kfree(request);
  389. goto request_failed;
  390. }
  391. if (backlog)
  392. backlog->complete(backlog, -EINPROGRESS);
  393. /* In case the send() helper did not issue any command to push
  394. * to the engine because the input data was cached, continue to
  395. * dequeue other requests as this is valid and not an error.
  396. */
  397. if (!commands && !results) {
  398. kfree(request);
  399. continue;
  400. }
  401. spin_lock_bh(&priv->ring[ring].egress_lock);
  402. list_add_tail(&request->list, &priv->ring[ring].list);
  403. spin_unlock_bh(&priv->ring[ring].egress_lock);
  404. cdesc += commands;
  405. rdesc += results;
  406. nreq++;
  407. }
  408. request_failed:
  409. /* Not enough resources to handle all the requests. Bail out and save
  410. * the request and the backlog for the next dequeue call (per-ring).
  411. */
  412. priv->ring[ring].req = req;
  413. priv->ring[ring].backlog = backlog;
  414. finalize:
  415. if (!nreq)
  416. return;
  417. spin_lock_bh(&priv->ring[ring].egress_lock);
  418. priv->ring[ring].requests += nreq;
  419. if (!priv->ring[ring].busy) {
  420. safexcel_try_push_requests(priv, ring);
  421. priv->ring[ring].busy = true;
  422. }
  423. spin_unlock_bh(&priv->ring[ring].egress_lock);
  424. /* let the RDR know we have pending descriptors */
  425. writel((rdesc * priv->config.rd_offset) << 2,
  426. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  427. /* let the CDR know we have pending descriptors */
  428. writel((cdesc * priv->config.cd_offset) << 2,
  429. EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  430. }
  431. inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
  432. struct safexcel_result_desc *rdesc)
  433. {
  434. if (likely(!rdesc->result_data.error_code))
  435. return 0;
  436. if (rdesc->result_data.error_code & 0x407f) {
  437. /* Fatal error (bits 0-7, 14) */
  438. dev_err(priv->dev,
  439. "cipher: result: result descriptor error (%d)\n",
  440. rdesc->result_data.error_code);
  441. return -EIO;
  442. } else if (rdesc->result_data.error_code == BIT(9)) {
  443. /* Authentication failed */
  444. return -EBADMSG;
  445. }
  446. /* All other non-fatal errors */
  447. return -EINVAL;
  448. }
  449. void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
  450. {
  451. struct safexcel_command_desc *cdesc;
  452. /* Acknowledge the command descriptors */
  453. do {
  454. cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
  455. if (IS_ERR(cdesc)) {
  456. dev_err(priv->dev,
  457. "Could not retrieve the command descriptor\n");
  458. return;
  459. }
  460. } while (!cdesc->last_seg);
  461. }
  462. void safexcel_inv_complete(struct crypto_async_request *req, int error)
  463. {
  464. struct safexcel_inv_result *result = req->data;
  465. if (error == -EINPROGRESS)
  466. return;
  467. result->error = error;
  468. complete(&result->completion);
  469. }
  470. int safexcel_invalidate_cache(struct crypto_async_request *async,
  471. struct safexcel_crypto_priv *priv,
  472. dma_addr_t ctxr_dma, int ring,
  473. struct safexcel_request *request)
  474. {
  475. struct safexcel_command_desc *cdesc;
  476. struct safexcel_result_desc *rdesc;
  477. int ret = 0;
  478. spin_lock_bh(&priv->ring[ring].egress_lock);
  479. /* Prepare command descriptor */
  480. cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
  481. if (IS_ERR(cdesc)) {
  482. ret = PTR_ERR(cdesc);
  483. goto unlock;
  484. }
  485. cdesc->control_data.type = EIP197_TYPE_EXTENDED;
  486. cdesc->control_data.options = 0;
  487. cdesc->control_data.refresh = 0;
  488. cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
  489. /* Prepare result descriptor */
  490. rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
  491. if (IS_ERR(rdesc)) {
  492. ret = PTR_ERR(rdesc);
  493. goto cdesc_rollback;
  494. }
  495. request->req = async;
  496. goto unlock;
  497. cdesc_rollback:
  498. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  499. unlock:
  500. spin_unlock_bh(&priv->ring[ring].egress_lock);
  501. return ret;
  502. }
  503. static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
  504. int ring)
  505. {
  506. struct safexcel_request *sreq;
  507. struct safexcel_context *ctx;
  508. int ret, i, nreq, ndesc, tot_descs, handled = 0;
  509. bool should_complete;
  510. handle_results:
  511. tot_descs = 0;
  512. nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  513. nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
  514. nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
  515. if (!nreq)
  516. goto requests_left;
  517. for (i = 0; i < nreq; i++) {
  518. spin_lock_bh(&priv->ring[ring].egress_lock);
  519. sreq = list_first_entry(&priv->ring[ring].list,
  520. struct safexcel_request, list);
  521. list_del(&sreq->list);
  522. spin_unlock_bh(&priv->ring[ring].egress_lock);
  523. ctx = crypto_tfm_ctx(sreq->req->tfm);
  524. ndesc = ctx->handle_result(priv, ring, sreq->req,
  525. &should_complete, &ret);
  526. if (ndesc < 0) {
  527. kfree(sreq);
  528. dev_err(priv->dev, "failed to handle result (%d)", ndesc);
  529. goto acknowledge;
  530. }
  531. if (should_complete) {
  532. local_bh_disable();
  533. sreq->req->complete(sreq->req, ret);
  534. local_bh_enable();
  535. }
  536. kfree(sreq);
  537. tot_descs += ndesc;
  538. handled++;
  539. }
  540. acknowledge:
  541. if (i) {
  542. writel(EIP197_xDR_PROC_xD_PKT(i) |
  543. EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
  544. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  545. }
  546. /* If the number of requests overflowed the counter, try to proceed more
  547. * requests.
  548. */
  549. if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
  550. goto handle_results;
  551. requests_left:
  552. spin_lock_bh(&priv->ring[ring].egress_lock);
  553. priv->ring[ring].requests -= handled;
  554. safexcel_try_push_requests(priv, ring);
  555. if (!priv->ring[ring].requests)
  556. priv->ring[ring].busy = false;
  557. spin_unlock_bh(&priv->ring[ring].egress_lock);
  558. }
  559. static void safexcel_dequeue_work(struct work_struct *work)
  560. {
  561. struct safexcel_work_data *data =
  562. container_of(work, struct safexcel_work_data, work);
  563. safexcel_dequeue(data->priv, data->ring);
  564. }
  565. struct safexcel_ring_irq_data {
  566. struct safexcel_crypto_priv *priv;
  567. int ring;
  568. };
  569. static irqreturn_t safexcel_irq_ring(int irq, void *data)
  570. {
  571. struct safexcel_ring_irq_data *irq_data = data;
  572. struct safexcel_crypto_priv *priv = irq_data->priv;
  573. int ring = irq_data->ring, rc = IRQ_NONE;
  574. u32 status, stat;
  575. status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
  576. if (!status)
  577. return rc;
  578. /* RDR interrupts */
  579. if (status & EIP197_RDR_IRQ(ring)) {
  580. stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  581. if (unlikely(stat & EIP197_xDR_ERR)) {
  582. /*
  583. * Fatal error, the RDR is unusable and must be
  584. * reinitialized. This should not happen under
  585. * normal circumstances.
  586. */
  587. dev_err(priv->dev, "RDR: fatal error.");
  588. } else if (likely(stat & EIP197_xDR_THRESH)) {
  589. rc = IRQ_WAKE_THREAD;
  590. }
  591. /* ACK the interrupts */
  592. writel(stat & 0xff,
  593. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  594. }
  595. /* ACK the interrupts */
  596. writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
  597. return rc;
  598. }
  599. static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
  600. {
  601. struct safexcel_ring_irq_data *irq_data = data;
  602. struct safexcel_crypto_priv *priv = irq_data->priv;
  603. int ring = irq_data->ring;
  604. safexcel_handle_result_descriptor(priv, ring);
  605. queue_work(priv->ring[ring].workqueue,
  606. &priv->ring[ring].work_data.work);
  607. return IRQ_HANDLED;
  608. }
  609. static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
  610. irq_handler_t handler,
  611. irq_handler_t threaded_handler,
  612. struct safexcel_ring_irq_data *ring_irq_priv)
  613. {
  614. int ret, irq = platform_get_irq_byname(pdev, name);
  615. if (irq < 0) {
  616. dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
  617. return irq;
  618. }
  619. ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
  620. threaded_handler, IRQF_ONESHOT,
  621. dev_name(&pdev->dev), ring_irq_priv);
  622. if (ret) {
  623. dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
  624. return ret;
  625. }
  626. return irq;
  627. }
  628. static struct safexcel_alg_template *safexcel_algs[] = {
  629. &safexcel_alg_ecb_aes,
  630. &safexcel_alg_cbc_aes,
  631. &safexcel_alg_sha1,
  632. &safexcel_alg_sha224,
  633. &safexcel_alg_sha256,
  634. &safexcel_alg_hmac_sha1,
  635. &safexcel_alg_hmac_sha224,
  636. &safexcel_alg_hmac_sha256,
  637. &safexcel_alg_authenc_hmac_sha1_cbc_aes,
  638. &safexcel_alg_authenc_hmac_sha224_cbc_aes,
  639. &safexcel_alg_authenc_hmac_sha256_cbc_aes,
  640. };
  641. static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
  642. {
  643. int i, j, ret = 0;
  644. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  645. safexcel_algs[i]->priv = priv;
  646. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  647. ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
  648. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  649. ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
  650. else
  651. ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
  652. if (ret)
  653. goto fail;
  654. }
  655. return 0;
  656. fail:
  657. for (j = 0; j < i; j++) {
  658. if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  659. crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
  660. else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
  661. crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
  662. else
  663. crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
  664. }
  665. return ret;
  666. }
  667. static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
  668. {
  669. int i;
  670. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  671. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  672. crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
  673. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  674. crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
  675. else
  676. crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
  677. }
  678. }
  679. static void safexcel_configure(struct safexcel_crypto_priv *priv)
  680. {
  681. u32 val, mask;
  682. val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  683. val = (val & GENMASK(27, 25)) >> 25;
  684. mask = BIT(val) - 1;
  685. val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  686. priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
  687. priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
  688. priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
  689. priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
  690. priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
  691. }
  692. static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
  693. {
  694. struct safexcel_register_offsets *offsets = &priv->offsets;
  695. if (priv->version == EIP197) {
  696. offsets->hia_aic = EIP197_HIA_AIC_BASE;
  697. offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
  698. offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
  699. offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
  700. offsets->hia_dfe = EIP197_HIA_DFE_BASE;
  701. offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
  702. offsets->hia_dse = EIP197_HIA_DSE_BASE;
  703. offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
  704. offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
  705. offsets->pe = EIP197_PE_BASE;
  706. } else {
  707. offsets->hia_aic = EIP97_HIA_AIC_BASE;
  708. offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
  709. offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
  710. offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
  711. offsets->hia_dfe = EIP97_HIA_DFE_BASE;
  712. offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
  713. offsets->hia_dse = EIP97_HIA_DSE_BASE;
  714. offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
  715. offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
  716. offsets->pe = EIP97_PE_BASE;
  717. }
  718. }
  719. static int safexcel_probe(struct platform_device *pdev)
  720. {
  721. struct device *dev = &pdev->dev;
  722. struct resource *res;
  723. struct safexcel_crypto_priv *priv;
  724. int i, ret;
  725. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  726. if (!priv)
  727. return -ENOMEM;
  728. priv->dev = dev;
  729. priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
  730. safexcel_init_register_offsets(priv);
  731. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  732. priv->base = devm_ioremap_resource(dev, res);
  733. if (IS_ERR(priv->base)) {
  734. dev_err(dev, "failed to get resource\n");
  735. return PTR_ERR(priv->base);
  736. }
  737. priv->clk = devm_clk_get(&pdev->dev, NULL);
  738. ret = PTR_ERR_OR_ZERO(priv->clk);
  739. /* The clock isn't mandatory */
  740. if (ret != -ENOENT) {
  741. if (ret)
  742. return ret;
  743. ret = clk_prepare_enable(priv->clk);
  744. if (ret) {
  745. dev_err(dev, "unable to enable clk (%d)\n", ret);
  746. return ret;
  747. }
  748. }
  749. priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
  750. ret = PTR_ERR_OR_ZERO(priv->reg_clk);
  751. /* The clock isn't mandatory */
  752. if (ret != -ENOENT) {
  753. if (ret)
  754. goto err_core_clk;
  755. ret = clk_prepare_enable(priv->reg_clk);
  756. if (ret) {
  757. dev_err(dev, "unable to enable reg clk (%d)\n", ret);
  758. goto err_core_clk;
  759. }
  760. }
  761. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  762. if (ret)
  763. goto err_reg_clk;
  764. priv->context_pool = dmam_pool_create("safexcel-context", dev,
  765. sizeof(struct safexcel_context_record),
  766. 1, 0);
  767. if (!priv->context_pool) {
  768. ret = -ENOMEM;
  769. goto err_reg_clk;
  770. }
  771. safexcel_configure(priv);
  772. for (i = 0; i < priv->config.rings; i++) {
  773. char irq_name[6] = {0}; /* "ringX\0" */
  774. char wq_name[9] = {0}; /* "wq_ringX\0" */
  775. int irq;
  776. struct safexcel_ring_irq_data *ring_irq;
  777. ret = safexcel_init_ring_descriptors(priv,
  778. &priv->ring[i].cdr,
  779. &priv->ring[i].rdr);
  780. if (ret)
  781. goto err_reg_clk;
  782. ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
  783. if (!ring_irq) {
  784. ret = -ENOMEM;
  785. goto err_reg_clk;
  786. }
  787. ring_irq->priv = priv;
  788. ring_irq->ring = i;
  789. snprintf(irq_name, 6, "ring%d", i);
  790. irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
  791. safexcel_irq_ring_thread,
  792. ring_irq);
  793. if (irq < 0) {
  794. ret = irq;
  795. goto err_reg_clk;
  796. }
  797. priv->ring[i].work_data.priv = priv;
  798. priv->ring[i].work_data.ring = i;
  799. INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
  800. snprintf(wq_name, 9, "wq_ring%d", i);
  801. priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
  802. if (!priv->ring[i].workqueue) {
  803. ret = -ENOMEM;
  804. goto err_reg_clk;
  805. }
  806. priv->ring[i].requests = 0;
  807. priv->ring[i].busy = false;
  808. crypto_init_queue(&priv->ring[i].queue,
  809. EIP197_DEFAULT_RING_SIZE);
  810. INIT_LIST_HEAD(&priv->ring[i].list);
  811. spin_lock_init(&priv->ring[i].lock);
  812. spin_lock_init(&priv->ring[i].egress_lock);
  813. spin_lock_init(&priv->ring[i].queue_lock);
  814. }
  815. platform_set_drvdata(pdev, priv);
  816. atomic_set(&priv->ring_used, 0);
  817. ret = safexcel_hw_init(priv);
  818. if (ret) {
  819. dev_err(dev, "EIP h/w init failed (%d)\n", ret);
  820. goto err_reg_clk;
  821. }
  822. ret = safexcel_register_algorithms(priv);
  823. if (ret) {
  824. dev_err(dev, "Failed to register algorithms (%d)\n", ret);
  825. goto err_reg_clk;
  826. }
  827. return 0;
  828. err_reg_clk:
  829. clk_disable_unprepare(priv->reg_clk);
  830. err_core_clk:
  831. clk_disable_unprepare(priv->clk);
  832. return ret;
  833. }
  834. static int safexcel_remove(struct platform_device *pdev)
  835. {
  836. struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
  837. int i;
  838. safexcel_unregister_algorithms(priv);
  839. clk_disable_unprepare(priv->clk);
  840. for (i = 0; i < priv->config.rings; i++)
  841. destroy_workqueue(priv->ring[i].workqueue);
  842. return 0;
  843. }
  844. static const struct of_device_id safexcel_of_match_table[] = {
  845. {
  846. .compatible = "inside-secure,safexcel-eip97",
  847. .data = (void *)EIP97,
  848. },
  849. {
  850. .compatible = "inside-secure,safexcel-eip197",
  851. .data = (void *)EIP197,
  852. },
  853. {},
  854. };
  855. static struct platform_driver crypto_safexcel = {
  856. .probe = safexcel_probe,
  857. .remove = safexcel_remove,
  858. .driver = {
  859. .name = "crypto-safexcel",
  860. .of_match_table = safexcel_of_match_table,
  861. },
  862. };
  863. module_platform_driver(crypto_safexcel);
  864. MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
  865. MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
  866. MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
  867. MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
  868. MODULE_LICENSE("GPL v2");