safexcel.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/firmware.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/workqueue.h>
  21. #include <crypto/internal/aead.h>
  22. #include <crypto/internal/hash.h>
  23. #include <crypto/internal/skcipher.h>
  24. #include "safexcel.h"
  25. static u32 max_rings = EIP197_MAX_RINGS;
  26. module_param(max_rings, uint, 0644);
  27. MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
  28. static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
  29. {
  30. u32 val, htable_offset;
  31. int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
  32. if (priv->version == EIP197B) {
  33. cs_rc_max = EIP197B_CS_RC_MAX;
  34. cs_ht_wc = EIP197B_CS_HT_WC;
  35. cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
  36. cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
  37. } else {
  38. cs_rc_max = EIP197D_CS_RC_MAX;
  39. cs_ht_wc = EIP197D_CS_HT_WC;
  40. cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
  41. cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
  42. }
  43. /* Enable the record cache memory access */
  44. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  45. val &= ~EIP197_TRC_ENABLE_MASK;
  46. val |= EIP197_TRC_ENABLE_0;
  47. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  48. /* Clear all ECC errors */
  49. writel(0, priv->base + EIP197_TRC_ECCCTRL);
  50. /*
  51. * Make sure the cache memory is accessible by taking record cache into
  52. * reset.
  53. */
  54. val = readl(priv->base + EIP197_TRC_PARAMS);
  55. val |= EIP197_TRC_PARAMS_SW_RESET;
  56. val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
  57. writel(val, priv->base + EIP197_TRC_PARAMS);
  58. /* Clear all records */
  59. for (i = 0; i < cs_rc_max; i++) {
  60. u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
  61. writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
  62. EIP197_CS_RC_PREV(EIP197_RC_NULL),
  63. priv->base + offset);
  64. val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
  65. if (i == 0)
  66. val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
  67. else if (i == cs_rc_max - 1)
  68. val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
  69. writel(val, priv->base + offset + sizeof(u32));
  70. }
  71. /* Clear the hash table entries */
  72. htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
  73. for (i = 0; i < cs_ht_wc; i++)
  74. writel(GENMASK(29, 0),
  75. priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
  76. /* Disable the record cache memory access */
  77. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  78. val &= ~EIP197_TRC_ENABLE_MASK;
  79. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  80. /* Write head and tail pointers of the record free chain */
  81. val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
  82. EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
  83. writel(val, priv->base + EIP197_TRC_FREECHAIN);
  84. /* Configure the record cache #1 */
  85. val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
  86. EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
  87. writel(val, priv->base + EIP197_TRC_PARAMS2);
  88. /* Configure the record cache #2 */
  89. val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
  90. EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
  91. EIP197_TRC_PARAMS_HTABLE_SZ(2);
  92. writel(val, priv->base + EIP197_TRC_PARAMS);
  93. }
  94. static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
  95. const struct firmware *fw, int pe, u32 ctrl,
  96. u32 prog_en)
  97. {
  98. const u32 *data = (const u32 *)fw->data;
  99. u32 val;
  100. int i;
  101. /* Reset the engine to make its program memory accessible */
  102. writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
  103. EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
  104. EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
  105. EIP197_PE(priv) + ctrl);
  106. /* Enable access to the program memory */
  107. writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
  108. /* Write the firmware */
  109. for (i = 0; i < fw->size / sizeof(u32); i++)
  110. writel(be32_to_cpu(data[i]),
  111. priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
  112. /* Disable access to the program memory */
  113. writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
  114. /* Release engine from reset */
  115. val = readl(EIP197_PE(priv) + ctrl);
  116. val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
  117. writel(val, EIP197_PE(priv) + ctrl);
  118. }
  119. static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
  120. {
  121. const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
  122. const struct firmware *fw[FW_NB];
  123. char fw_path[31], *dir = NULL;
  124. int i, j, ret = 0, pe;
  125. u32 val;
  126. switch (priv->version) {
  127. case EIP197B:
  128. dir = "eip197b";
  129. break;
  130. case EIP197D:
  131. dir = "eip197d";
  132. break;
  133. default:
  134. /* No firmware is required */
  135. return 0;
  136. }
  137. for (i = 0; i < FW_NB; i++) {
  138. snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
  139. ret = request_firmware(&fw[i], fw_path, priv->dev);
  140. if (ret) {
  141. if (priv->version != EIP197B)
  142. goto release_fw;
  143. /* Fallback to the old firmware location for the
  144. * EIP197b.
  145. */
  146. ret = request_firmware(&fw[i], fw_name[i], priv->dev);
  147. if (ret) {
  148. dev_err(priv->dev,
  149. "Failed to request firmware %s (%d)\n",
  150. fw_name[i], ret);
  151. goto release_fw;
  152. }
  153. }
  154. }
  155. for (pe = 0; pe < priv->config.pes; pe++) {
  156. /* Clear the scratchpad memory */
  157. val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
  158. val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
  159. EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
  160. EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
  161. EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
  162. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
  163. memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
  164. EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
  165. eip197_write_firmware(priv, fw[FW_IFPP], pe,
  166. EIP197_PE_ICE_FPP_CTRL(pe),
  167. EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
  168. eip197_write_firmware(priv, fw[FW_IPUE], pe,
  169. EIP197_PE_ICE_PUE_CTRL(pe),
  170. EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
  171. }
  172. release_fw:
  173. for (j = 0; j < i; j++)
  174. release_firmware(fw[j]);
  175. return ret;
  176. }
  177. static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
  178. {
  179. u32 hdw, cd_size_rnd, val;
  180. int i;
  181. hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  182. hdw &= GENMASK(27, 25);
  183. hdw >>= 25;
  184. cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
  185. for (i = 0; i < priv->config.rings; i++) {
  186. /* ring base address */
  187. writel(lower_32_bits(priv->ring[i].cdr.base_dma),
  188. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  189. writel(upper_32_bits(priv->ring[i].cdr.base_dma),
  190. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  191. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
  192. priv->config.cd_size,
  193. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  194. writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
  195. (EIP197_FETCH_COUNT * priv->config.cd_offset),
  196. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  197. /* Configure DMA tx control */
  198. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  199. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  200. writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  201. /* clear any pending interrupt */
  202. writel(GENMASK(5, 0),
  203. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  204. }
  205. return 0;
  206. }
  207. static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
  208. {
  209. u32 hdw, rd_size_rnd, val;
  210. int i;
  211. hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  212. hdw &= GENMASK(27, 25);
  213. hdw >>= 25;
  214. rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
  215. for (i = 0; i < priv->config.rings; i++) {
  216. /* ring base address */
  217. writel(lower_32_bits(priv->ring[i].rdr.base_dma),
  218. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  219. writel(upper_32_bits(priv->ring[i].rdr.base_dma),
  220. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  221. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
  222. priv->config.rd_size,
  223. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  224. writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
  225. (EIP197_FETCH_COUNT * priv->config.rd_offset),
  226. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  227. /* Configure DMA tx control */
  228. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  229. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  230. val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
  231. writel(val,
  232. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  233. /* clear any pending interrupt */
  234. writel(GENMASK(7, 0),
  235. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  236. /* enable ring interrupt */
  237. val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  238. val |= EIP197_RDR_IRQ(i);
  239. writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  240. }
  241. return 0;
  242. }
  243. static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
  244. {
  245. u32 version, val;
  246. int i, ret, pe;
  247. /* Determine endianess and configure byte swap */
  248. version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
  249. val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  250. if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
  251. val |= EIP197_MST_CTRL_BYTE_SWAP;
  252. else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
  253. val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
  254. /* For EIP197 set maximum number of TX commands to 2^5 = 32 */
  255. if (priv->version == EIP197B || priv->version == EIP197D)
  256. val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
  257. writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  258. /* Configure wr/rd cache values */
  259. writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
  260. EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
  261. EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
  262. /* Interrupts reset */
  263. /* Disable all global interrupts */
  264. writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
  265. /* Clear any pending interrupt */
  266. writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  267. /* Processing Engine configuration */
  268. for (pe = 0; pe < priv->config.pes; pe++) {
  269. /* Data Fetch Engine configuration */
  270. /* Reset all DFE threads */
  271. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  272. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  273. if (priv->version == EIP197B || priv->version == EIP197D) {
  274. /* Reset HIA input interface arbiter */
  275. writel(EIP197_HIA_RA_PE_CTRL_RESET,
  276. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
  277. }
  278. /* DMA transfer size to use */
  279. val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
  280. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
  281. EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
  282. val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
  283. EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
  284. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
  285. val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
  286. writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
  287. /* Leave the DFE threads reset state */
  288. writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  289. /* Configure the processing engine thresholds */
  290. writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
  291. EIP197_PE_IN_xBUF_THRES_MAX(9),
  292. EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
  293. writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
  294. EIP197_PE_IN_xBUF_THRES_MAX(7),
  295. EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
  296. if (priv->version == EIP197B || priv->version == EIP197D) {
  297. /* enable HIA input interface arbiter and rings */
  298. writel(EIP197_HIA_RA_PE_CTRL_EN |
  299. GENMASK(priv->config.rings - 1, 0),
  300. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
  301. }
  302. /* Data Store Engine configuration */
  303. /* Reset all DSE threads */
  304. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  305. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  306. /* Wait for all DSE threads to complete */
  307. while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
  308. GENMASK(15, 12)) != GENMASK(15, 12))
  309. ;
  310. /* DMA transfer size to use */
  311. val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
  312. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
  313. EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
  314. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
  315. val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
  316. /* FIXME: instability issues can occur for EIP97 but disabling it impact
  317. * performances.
  318. */
  319. if (priv->version == EIP197B || priv->version == EIP197D)
  320. val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
  321. writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
  322. /* Leave the DSE threads reset state */
  323. writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  324. /* Configure the procesing engine thresholds */
  325. writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
  326. EIP197_PE_OUT_DBUF_THRES_MAX(8),
  327. EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
  328. /* Processing Engine configuration */
  329. /* H/W capabilities selection */
  330. val = EIP197_FUNCTION_RSVD;
  331. val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
  332. val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
  333. val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
  334. val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
  335. val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
  336. val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
  337. val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
  338. writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
  339. }
  340. /* Command Descriptor Rings prepare */
  341. for (i = 0; i < priv->config.rings; i++) {
  342. /* Clear interrupts for this ring */
  343. writel(GENMASK(31, 0),
  344. EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
  345. /* Disable external triggering */
  346. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  347. /* Clear the pending prepared counter */
  348. writel(EIP197_xDR_PREP_CLR_COUNT,
  349. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  350. /* Clear the pending processed counter */
  351. writel(EIP197_xDR_PROC_CLR_COUNT,
  352. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  353. writel(0,
  354. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  355. writel(0,
  356. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  357. writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
  358. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  359. }
  360. /* Result Descriptor Ring prepare */
  361. for (i = 0; i < priv->config.rings; i++) {
  362. /* Disable external triggering*/
  363. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  364. /* Clear the pending prepared counter */
  365. writel(EIP197_xDR_PREP_CLR_COUNT,
  366. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  367. /* Clear the pending processed counter */
  368. writel(EIP197_xDR_PROC_CLR_COUNT,
  369. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  370. writel(0,
  371. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  372. writel(0,
  373. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  374. /* Ring size */
  375. writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
  376. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  377. }
  378. for (pe = 0; pe < priv->config.pes; pe++) {
  379. /* Enable command descriptor rings */
  380. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  381. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
  382. /* Enable result descriptor rings */
  383. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  384. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
  385. }
  386. /* Clear any HIA interrupt */
  387. writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  388. if (priv->version == EIP197B || priv->version == EIP197D) {
  389. eip197_trc_cache_init(priv);
  390. ret = eip197_load_firmwares(priv);
  391. if (ret)
  392. return ret;
  393. }
  394. safexcel_hw_setup_cdesc_rings(priv);
  395. safexcel_hw_setup_rdesc_rings(priv);
  396. return 0;
  397. }
  398. /* Called with ring's lock taken */
  399. static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
  400. int ring)
  401. {
  402. int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
  403. if (!coal)
  404. return;
  405. /* Configure when we want an interrupt */
  406. writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
  407. EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
  408. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
  409. }
  410. void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
  411. {
  412. struct crypto_async_request *req, *backlog;
  413. struct safexcel_context *ctx;
  414. struct safexcel_request *request;
  415. int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
  416. /* If a request wasn't properly dequeued because of a lack of resources,
  417. * proceeded it first,
  418. */
  419. req = priv->ring[ring].req;
  420. backlog = priv->ring[ring].backlog;
  421. if (req)
  422. goto handle_req;
  423. while (true) {
  424. spin_lock_bh(&priv->ring[ring].queue_lock);
  425. backlog = crypto_get_backlog(&priv->ring[ring].queue);
  426. req = crypto_dequeue_request(&priv->ring[ring].queue);
  427. spin_unlock_bh(&priv->ring[ring].queue_lock);
  428. if (!req) {
  429. priv->ring[ring].req = NULL;
  430. priv->ring[ring].backlog = NULL;
  431. goto finalize;
  432. }
  433. handle_req:
  434. request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
  435. if (!request)
  436. goto request_failed;
  437. ctx = crypto_tfm_ctx(req->tfm);
  438. ret = ctx->send(req, ring, request, &commands, &results);
  439. if (ret) {
  440. kfree(request);
  441. goto request_failed;
  442. }
  443. if (backlog)
  444. backlog->complete(backlog, -EINPROGRESS);
  445. /* In case the send() helper did not issue any command to push
  446. * to the engine because the input data was cached, continue to
  447. * dequeue other requests as this is valid and not an error.
  448. */
  449. if (!commands && !results) {
  450. kfree(request);
  451. continue;
  452. }
  453. spin_lock_bh(&priv->ring[ring].egress_lock);
  454. list_add_tail(&request->list, &priv->ring[ring].list);
  455. spin_unlock_bh(&priv->ring[ring].egress_lock);
  456. cdesc += commands;
  457. rdesc += results;
  458. nreq++;
  459. }
  460. request_failed:
  461. /* Not enough resources to handle all the requests. Bail out and save
  462. * the request and the backlog for the next dequeue call (per-ring).
  463. */
  464. priv->ring[ring].req = req;
  465. priv->ring[ring].backlog = backlog;
  466. finalize:
  467. if (!nreq)
  468. return;
  469. spin_lock_bh(&priv->ring[ring].egress_lock);
  470. priv->ring[ring].requests += nreq;
  471. if (!priv->ring[ring].busy) {
  472. safexcel_try_push_requests(priv, ring);
  473. priv->ring[ring].busy = true;
  474. }
  475. spin_unlock_bh(&priv->ring[ring].egress_lock);
  476. /* let the RDR know we have pending descriptors */
  477. writel((rdesc * priv->config.rd_offset) << 2,
  478. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  479. /* let the CDR know we have pending descriptors */
  480. writel((cdesc * priv->config.cd_offset) << 2,
  481. EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  482. }
  483. inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
  484. struct safexcel_result_desc *rdesc)
  485. {
  486. if (likely(!rdesc->result_data.error_code))
  487. return 0;
  488. if (rdesc->result_data.error_code & 0x407f) {
  489. /* Fatal error (bits 0-7, 14) */
  490. dev_err(priv->dev,
  491. "cipher: result: result descriptor error (%d)\n",
  492. rdesc->result_data.error_code);
  493. return -EIO;
  494. } else if (rdesc->result_data.error_code == BIT(9)) {
  495. /* Authentication failed */
  496. return -EBADMSG;
  497. }
  498. /* All other non-fatal errors */
  499. return -EINVAL;
  500. }
  501. void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
  502. {
  503. struct safexcel_command_desc *cdesc;
  504. /* Acknowledge the command descriptors */
  505. do {
  506. cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
  507. if (IS_ERR(cdesc)) {
  508. dev_err(priv->dev,
  509. "Could not retrieve the command descriptor\n");
  510. return;
  511. }
  512. } while (!cdesc->last_seg);
  513. }
  514. void safexcel_inv_complete(struct crypto_async_request *req, int error)
  515. {
  516. struct safexcel_inv_result *result = req->data;
  517. if (error == -EINPROGRESS)
  518. return;
  519. result->error = error;
  520. complete(&result->completion);
  521. }
  522. int safexcel_invalidate_cache(struct crypto_async_request *async,
  523. struct safexcel_crypto_priv *priv,
  524. dma_addr_t ctxr_dma, int ring,
  525. struct safexcel_request *request)
  526. {
  527. struct safexcel_command_desc *cdesc;
  528. struct safexcel_result_desc *rdesc;
  529. int ret = 0;
  530. spin_lock_bh(&priv->ring[ring].egress_lock);
  531. /* Prepare command descriptor */
  532. cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
  533. if (IS_ERR(cdesc)) {
  534. ret = PTR_ERR(cdesc);
  535. goto unlock;
  536. }
  537. cdesc->control_data.type = EIP197_TYPE_EXTENDED;
  538. cdesc->control_data.options = 0;
  539. cdesc->control_data.refresh = 0;
  540. cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
  541. /* Prepare result descriptor */
  542. rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
  543. if (IS_ERR(rdesc)) {
  544. ret = PTR_ERR(rdesc);
  545. goto cdesc_rollback;
  546. }
  547. request->req = async;
  548. goto unlock;
  549. cdesc_rollback:
  550. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  551. unlock:
  552. spin_unlock_bh(&priv->ring[ring].egress_lock);
  553. return ret;
  554. }
  555. static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
  556. int ring)
  557. {
  558. struct safexcel_request *sreq;
  559. struct safexcel_context *ctx;
  560. int ret, i, nreq, ndesc, tot_descs, handled = 0;
  561. bool should_complete;
  562. handle_results:
  563. tot_descs = 0;
  564. nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  565. nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
  566. nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
  567. if (!nreq)
  568. goto requests_left;
  569. for (i = 0; i < nreq; i++) {
  570. spin_lock_bh(&priv->ring[ring].egress_lock);
  571. sreq = list_first_entry(&priv->ring[ring].list,
  572. struct safexcel_request, list);
  573. list_del(&sreq->list);
  574. spin_unlock_bh(&priv->ring[ring].egress_lock);
  575. ctx = crypto_tfm_ctx(sreq->req->tfm);
  576. ndesc = ctx->handle_result(priv, ring, sreq->req,
  577. &should_complete, &ret);
  578. if (ndesc < 0) {
  579. kfree(sreq);
  580. dev_err(priv->dev, "failed to handle result (%d)", ndesc);
  581. goto acknowledge;
  582. }
  583. if (should_complete) {
  584. local_bh_disable();
  585. sreq->req->complete(sreq->req, ret);
  586. local_bh_enable();
  587. }
  588. kfree(sreq);
  589. tot_descs += ndesc;
  590. handled++;
  591. }
  592. acknowledge:
  593. if (i) {
  594. writel(EIP197_xDR_PROC_xD_PKT(i) |
  595. EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
  596. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  597. }
  598. /* If the number of requests overflowed the counter, try to proceed more
  599. * requests.
  600. */
  601. if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
  602. goto handle_results;
  603. requests_left:
  604. spin_lock_bh(&priv->ring[ring].egress_lock);
  605. priv->ring[ring].requests -= handled;
  606. safexcel_try_push_requests(priv, ring);
  607. if (!priv->ring[ring].requests)
  608. priv->ring[ring].busy = false;
  609. spin_unlock_bh(&priv->ring[ring].egress_lock);
  610. }
  611. static void safexcel_dequeue_work(struct work_struct *work)
  612. {
  613. struct safexcel_work_data *data =
  614. container_of(work, struct safexcel_work_data, work);
  615. safexcel_dequeue(data->priv, data->ring);
  616. }
  617. struct safexcel_ring_irq_data {
  618. struct safexcel_crypto_priv *priv;
  619. int ring;
  620. };
  621. static irqreturn_t safexcel_irq_ring(int irq, void *data)
  622. {
  623. struct safexcel_ring_irq_data *irq_data = data;
  624. struct safexcel_crypto_priv *priv = irq_data->priv;
  625. int ring = irq_data->ring, rc = IRQ_NONE;
  626. u32 status, stat;
  627. status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
  628. if (!status)
  629. return rc;
  630. /* RDR interrupts */
  631. if (status & EIP197_RDR_IRQ(ring)) {
  632. stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  633. if (unlikely(stat & EIP197_xDR_ERR)) {
  634. /*
  635. * Fatal error, the RDR is unusable and must be
  636. * reinitialized. This should not happen under
  637. * normal circumstances.
  638. */
  639. dev_err(priv->dev, "RDR: fatal error.");
  640. } else if (likely(stat & EIP197_xDR_THRESH)) {
  641. rc = IRQ_WAKE_THREAD;
  642. }
  643. /* ACK the interrupts */
  644. writel(stat & 0xff,
  645. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  646. }
  647. /* ACK the interrupts */
  648. writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
  649. return rc;
  650. }
  651. static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
  652. {
  653. struct safexcel_ring_irq_data *irq_data = data;
  654. struct safexcel_crypto_priv *priv = irq_data->priv;
  655. int ring = irq_data->ring;
  656. safexcel_handle_result_descriptor(priv, ring);
  657. queue_work(priv->ring[ring].workqueue,
  658. &priv->ring[ring].work_data.work);
  659. return IRQ_HANDLED;
  660. }
  661. static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
  662. irq_handler_t handler,
  663. irq_handler_t threaded_handler,
  664. struct safexcel_ring_irq_data *ring_irq_priv)
  665. {
  666. int ret, irq = platform_get_irq_byname(pdev, name);
  667. if (irq < 0) {
  668. dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
  669. return irq;
  670. }
  671. ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
  672. threaded_handler, IRQF_ONESHOT,
  673. dev_name(&pdev->dev), ring_irq_priv);
  674. if (ret) {
  675. dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
  676. return ret;
  677. }
  678. return irq;
  679. }
  680. static struct safexcel_alg_template *safexcel_algs[] = {
  681. &safexcel_alg_ecb_des,
  682. &safexcel_alg_cbc_des,
  683. &safexcel_alg_ecb_aes,
  684. &safexcel_alg_cbc_aes,
  685. &safexcel_alg_md5,
  686. &safexcel_alg_sha1,
  687. &safexcel_alg_sha224,
  688. &safexcel_alg_sha256,
  689. &safexcel_alg_sha384,
  690. &safexcel_alg_sha512,
  691. &safexcel_alg_hmac_md5,
  692. &safexcel_alg_hmac_sha1,
  693. &safexcel_alg_hmac_sha224,
  694. &safexcel_alg_hmac_sha256,
  695. &safexcel_alg_hmac_sha384,
  696. &safexcel_alg_hmac_sha512,
  697. &safexcel_alg_authenc_hmac_sha1_cbc_aes,
  698. &safexcel_alg_authenc_hmac_sha224_cbc_aes,
  699. &safexcel_alg_authenc_hmac_sha256_cbc_aes,
  700. &safexcel_alg_authenc_hmac_sha384_cbc_aes,
  701. &safexcel_alg_authenc_hmac_sha512_cbc_aes,
  702. };
  703. static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
  704. {
  705. int i, j, ret = 0;
  706. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  707. safexcel_algs[i]->priv = priv;
  708. if (!(safexcel_algs[i]->engines & priv->version))
  709. continue;
  710. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  711. ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
  712. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  713. ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
  714. else
  715. ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
  716. if (ret)
  717. goto fail;
  718. }
  719. return 0;
  720. fail:
  721. for (j = 0; j < i; j++) {
  722. if (!(safexcel_algs[j]->engines & priv->version))
  723. continue;
  724. if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  725. crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
  726. else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
  727. crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
  728. else
  729. crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
  730. }
  731. return ret;
  732. }
  733. static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
  734. {
  735. int i;
  736. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  737. if (!(safexcel_algs[i]->engines & priv->version))
  738. continue;
  739. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  740. crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
  741. else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
  742. crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
  743. else
  744. crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
  745. }
  746. }
  747. static void safexcel_configure(struct safexcel_crypto_priv *priv)
  748. {
  749. u32 val, mask = 0;
  750. val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  751. /* Read number of PEs from the engine */
  752. switch (priv->version) {
  753. case EIP197B:
  754. case EIP197D:
  755. mask = EIP197_N_PES_MASK;
  756. break;
  757. default:
  758. mask = EIP97_N_PES_MASK;
  759. }
  760. priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
  761. val = (val & GENMASK(27, 25)) >> 25;
  762. mask = BIT(val) - 1;
  763. val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  764. priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
  765. priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
  766. priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
  767. priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
  768. priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
  769. }
  770. static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
  771. {
  772. struct safexcel_register_offsets *offsets = &priv->offsets;
  773. switch (priv->version) {
  774. case EIP197B:
  775. case EIP197D:
  776. offsets->hia_aic = EIP197_HIA_AIC_BASE;
  777. offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
  778. offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
  779. offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
  780. offsets->hia_dfe = EIP197_HIA_DFE_BASE;
  781. offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
  782. offsets->hia_dse = EIP197_HIA_DSE_BASE;
  783. offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
  784. offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
  785. offsets->pe = EIP197_PE_BASE;
  786. break;
  787. case EIP97IES:
  788. offsets->hia_aic = EIP97_HIA_AIC_BASE;
  789. offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
  790. offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
  791. offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
  792. offsets->hia_dfe = EIP97_HIA_DFE_BASE;
  793. offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
  794. offsets->hia_dse = EIP97_HIA_DSE_BASE;
  795. offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
  796. offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
  797. offsets->pe = EIP97_PE_BASE;
  798. break;
  799. }
  800. }
  801. static int safexcel_probe(struct platform_device *pdev)
  802. {
  803. struct device *dev = &pdev->dev;
  804. struct resource *res;
  805. struct safexcel_crypto_priv *priv;
  806. int i, ret;
  807. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  808. if (!priv)
  809. return -ENOMEM;
  810. priv->dev = dev;
  811. priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
  812. if (priv->version == EIP197B || priv->version == EIP197D)
  813. priv->flags |= EIP197_TRC_CACHE;
  814. safexcel_init_register_offsets(priv);
  815. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  816. priv->base = devm_ioremap_resource(dev, res);
  817. if (IS_ERR(priv->base)) {
  818. dev_err(dev, "failed to get resource\n");
  819. return PTR_ERR(priv->base);
  820. }
  821. priv->clk = devm_clk_get(&pdev->dev, NULL);
  822. ret = PTR_ERR_OR_ZERO(priv->clk);
  823. /* The clock isn't mandatory */
  824. if (ret != -ENOENT) {
  825. if (ret)
  826. return ret;
  827. ret = clk_prepare_enable(priv->clk);
  828. if (ret) {
  829. dev_err(dev, "unable to enable clk (%d)\n", ret);
  830. return ret;
  831. }
  832. }
  833. priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
  834. ret = PTR_ERR_OR_ZERO(priv->reg_clk);
  835. /* The clock isn't mandatory */
  836. if (ret != -ENOENT) {
  837. if (ret)
  838. goto err_core_clk;
  839. ret = clk_prepare_enable(priv->reg_clk);
  840. if (ret) {
  841. dev_err(dev, "unable to enable reg clk (%d)\n", ret);
  842. goto err_core_clk;
  843. }
  844. }
  845. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  846. if (ret)
  847. goto err_reg_clk;
  848. priv->context_pool = dmam_pool_create("safexcel-context", dev,
  849. sizeof(struct safexcel_context_record),
  850. 1, 0);
  851. if (!priv->context_pool) {
  852. ret = -ENOMEM;
  853. goto err_reg_clk;
  854. }
  855. safexcel_configure(priv);
  856. priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
  857. GFP_KERNEL);
  858. if (!priv->ring) {
  859. ret = -ENOMEM;
  860. goto err_reg_clk;
  861. }
  862. for (i = 0; i < priv->config.rings; i++) {
  863. char irq_name[6] = {0}; /* "ringX\0" */
  864. char wq_name[9] = {0}; /* "wq_ringX\0" */
  865. int irq;
  866. struct safexcel_ring_irq_data *ring_irq;
  867. ret = safexcel_init_ring_descriptors(priv,
  868. &priv->ring[i].cdr,
  869. &priv->ring[i].rdr);
  870. if (ret)
  871. goto err_reg_clk;
  872. ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
  873. if (!ring_irq) {
  874. ret = -ENOMEM;
  875. goto err_reg_clk;
  876. }
  877. ring_irq->priv = priv;
  878. ring_irq->ring = i;
  879. snprintf(irq_name, 6, "ring%d", i);
  880. irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
  881. safexcel_irq_ring_thread,
  882. ring_irq);
  883. if (irq < 0) {
  884. ret = irq;
  885. goto err_reg_clk;
  886. }
  887. priv->ring[i].work_data.priv = priv;
  888. priv->ring[i].work_data.ring = i;
  889. INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
  890. snprintf(wq_name, 9, "wq_ring%d", i);
  891. priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
  892. if (!priv->ring[i].workqueue) {
  893. ret = -ENOMEM;
  894. goto err_reg_clk;
  895. }
  896. priv->ring[i].requests = 0;
  897. priv->ring[i].busy = false;
  898. crypto_init_queue(&priv->ring[i].queue,
  899. EIP197_DEFAULT_RING_SIZE);
  900. INIT_LIST_HEAD(&priv->ring[i].list);
  901. spin_lock_init(&priv->ring[i].lock);
  902. spin_lock_init(&priv->ring[i].egress_lock);
  903. spin_lock_init(&priv->ring[i].queue_lock);
  904. }
  905. platform_set_drvdata(pdev, priv);
  906. atomic_set(&priv->ring_used, 0);
  907. ret = safexcel_hw_init(priv);
  908. if (ret) {
  909. dev_err(dev, "EIP h/w init failed (%d)\n", ret);
  910. goto err_reg_clk;
  911. }
  912. ret = safexcel_register_algorithms(priv);
  913. if (ret) {
  914. dev_err(dev, "Failed to register algorithms (%d)\n", ret);
  915. goto err_reg_clk;
  916. }
  917. return 0;
  918. err_reg_clk:
  919. clk_disable_unprepare(priv->reg_clk);
  920. err_core_clk:
  921. clk_disable_unprepare(priv->clk);
  922. return ret;
  923. }
  924. static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
  925. {
  926. int i;
  927. for (i = 0; i < priv->config.rings; i++) {
  928. /* clear any pending interrupt */
  929. writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  930. writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  931. /* Reset the CDR base address */
  932. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  933. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  934. /* Reset the RDR base address */
  935. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  936. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  937. }
  938. }
  939. static int safexcel_remove(struct platform_device *pdev)
  940. {
  941. struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
  942. int i;
  943. safexcel_unregister_algorithms(priv);
  944. safexcel_hw_reset_rings(priv);
  945. clk_disable_unprepare(priv->clk);
  946. for (i = 0; i < priv->config.rings; i++)
  947. destroy_workqueue(priv->ring[i].workqueue);
  948. return 0;
  949. }
  950. static const struct of_device_id safexcel_of_match_table[] = {
  951. {
  952. .compatible = "inside-secure,safexcel-eip97ies",
  953. .data = (void *)EIP97IES,
  954. },
  955. {
  956. .compatible = "inside-secure,safexcel-eip197b",
  957. .data = (void *)EIP197B,
  958. },
  959. {
  960. .compatible = "inside-secure,safexcel-eip197d",
  961. .data = (void *)EIP197D,
  962. },
  963. {
  964. /* Deprecated. Kept for backward compatibility. */
  965. .compatible = "inside-secure,safexcel-eip97",
  966. .data = (void *)EIP97IES,
  967. },
  968. {
  969. /* Deprecated. Kept for backward compatibility. */
  970. .compatible = "inside-secure,safexcel-eip197",
  971. .data = (void *)EIP197B,
  972. },
  973. {},
  974. };
  975. static struct platform_driver crypto_safexcel = {
  976. .probe = safexcel_probe,
  977. .remove = safexcel_remove,
  978. .driver = {
  979. .name = "crypto-safexcel",
  980. .of_match_table = safexcel_of_match_table,
  981. },
  982. };
  983. module_platform_driver(crypto_safexcel);
  984. MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
  985. MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
  986. MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
  987. MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
  988. MODULE_LICENSE("GPL v2");