safexcel.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/firmware.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/workqueue.h>
  21. #include <crypto/internal/hash.h>
  22. #include <crypto/internal/skcipher.h>
  23. #include "safexcel.h"
  24. static u32 max_rings = EIP197_MAX_RINGS;
  25. module_param(max_rings, uint, 0644);
  26. MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
  27. static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
  28. {
  29. u32 val, htable_offset;
  30. int i;
  31. /* Enable the record cache memory access */
  32. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  33. val &= ~EIP197_TRC_ENABLE_MASK;
  34. val |= EIP197_TRC_ENABLE_0;
  35. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  36. /* Clear all ECC errors */
  37. writel(0, priv->base + EIP197_TRC_ECCCTRL);
  38. /*
  39. * Make sure the cache memory is accessible by taking record cache into
  40. * reset.
  41. */
  42. val = readl(priv->base + EIP197_TRC_PARAMS);
  43. val |= EIP197_TRC_PARAMS_SW_RESET;
  44. val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
  45. writel(val, priv->base + EIP197_TRC_PARAMS);
  46. /* Clear all records */
  47. for (i = 0; i < EIP197_CS_RC_MAX; i++) {
  48. u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
  49. writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
  50. EIP197_CS_RC_PREV(EIP197_RC_NULL),
  51. priv->base + offset);
  52. val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
  53. if (i == 0)
  54. val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
  55. else if (i == EIP197_CS_RC_MAX - 1)
  56. val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
  57. writel(val, priv->base + offset + sizeof(u32));
  58. }
  59. /* Clear the hash table entries */
  60. htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
  61. for (i = 0; i < 64; i++)
  62. writel(GENMASK(29, 0),
  63. priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
  64. /* Disable the record cache memory access */
  65. val = readl(priv->base + EIP197_CS_RAM_CTRL);
  66. val &= ~EIP197_TRC_ENABLE_MASK;
  67. writel(val, priv->base + EIP197_CS_RAM_CTRL);
  68. /* Write head and tail pointers of the record free chain */
  69. val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
  70. EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
  71. writel(val, priv->base + EIP197_TRC_FREECHAIN);
  72. /* Configure the record cache #1 */
  73. val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
  74. EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
  75. writel(val, priv->base + EIP197_TRC_PARAMS2);
  76. /* Configure the record cache #2 */
  77. val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
  78. EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
  79. EIP197_TRC_PARAMS_HTABLE_SZ(2);
  80. writel(val, priv->base + EIP197_TRC_PARAMS);
  81. }
  82. static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
  83. const struct firmware *fw, u32 ctrl,
  84. u32 prog_en)
  85. {
  86. const u32 *data = (const u32 *)fw->data;
  87. u32 val;
  88. int i;
  89. /* Reset the engine to make its program memory accessible */
  90. writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
  91. EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
  92. EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
  93. EIP197_PE(priv) + ctrl);
  94. /* Enable access to the program memory */
  95. writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
  96. /* Write the firmware */
  97. for (i = 0; i < fw->size / sizeof(u32); i++)
  98. writel(be32_to_cpu(data[i]),
  99. priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
  100. /* Disable access to the program memory */
  101. writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
  102. /* Release engine from reset */
  103. val = readl(EIP197_PE(priv) + ctrl);
  104. val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
  105. writel(val, EIP197_PE(priv) + ctrl);
  106. }
  107. static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
  108. {
  109. const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
  110. const struct firmware *fw[FW_NB];
  111. int i, j, ret = 0;
  112. u32 val;
  113. for (i = 0; i < FW_NB; i++) {
  114. ret = request_firmware(&fw[i], fw_name[i], priv->dev);
  115. if (ret) {
  116. dev_err(priv->dev,
  117. "Failed to request firmware %s (%d)\n",
  118. fw_name[i], ret);
  119. goto release_fw;
  120. }
  121. }
  122. /* Clear the scratchpad memory */
  123. val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
  124. val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
  125. EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
  126. EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
  127. EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
  128. writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
  129. memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
  130. EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
  131. eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
  132. EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
  133. eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
  134. EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
  135. release_fw:
  136. for (j = 0; j < i; j++)
  137. release_firmware(fw[j]);
  138. return ret;
  139. }
  140. static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
  141. {
  142. u32 hdw, cd_size_rnd, val;
  143. int i;
  144. hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  145. hdw &= GENMASK(27, 25);
  146. hdw >>= 25;
  147. cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
  148. for (i = 0; i < priv->config.rings; i++) {
  149. /* ring base address */
  150. writel(lower_32_bits(priv->ring[i].cdr.base_dma),
  151. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  152. writel(upper_32_bits(priv->ring[i].cdr.base_dma),
  153. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  154. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
  155. priv->config.cd_size,
  156. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  157. writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
  158. (EIP197_FETCH_COUNT * priv->config.cd_offset),
  159. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  160. /* Configure DMA tx control */
  161. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  162. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  163. writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  164. /* clear any pending interrupt */
  165. writel(GENMASK(5, 0),
  166. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
  167. }
  168. return 0;
  169. }
  170. static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
  171. {
  172. u32 hdw, rd_size_rnd, val;
  173. int i;
  174. hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  175. hdw &= GENMASK(27, 25);
  176. hdw >>= 25;
  177. rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
  178. for (i = 0; i < priv->config.rings; i++) {
  179. /* ring base address */
  180. writel(lower_32_bits(priv->ring[i].rdr.base_dma),
  181. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
  182. writel(upper_32_bits(priv->ring[i].rdr.base_dma),
  183. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
  184. writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
  185. priv->config.rd_size,
  186. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
  187. writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
  188. (EIP197_FETCH_COUNT * priv->config.rd_offset),
  189. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  190. /* Configure DMA tx control */
  191. val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
  192. val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
  193. val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
  194. writel(val,
  195. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
  196. /* clear any pending interrupt */
  197. writel(GENMASK(7, 0),
  198. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
  199. /* enable ring interrupt */
  200. val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  201. val |= EIP197_RDR_IRQ(i);
  202. writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
  203. }
  204. return 0;
  205. }
  206. static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
  207. {
  208. u32 version, val;
  209. int i, ret;
  210. /* Determine endianess and configure byte swap */
  211. version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
  212. val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  213. if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
  214. val |= EIP197_MST_CTRL_BYTE_SWAP;
  215. else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
  216. val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
  217. writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
  218. /* Configure wr/rd cache values */
  219. writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
  220. EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
  221. EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
  222. /* Interrupts reset */
  223. /* Disable all global interrupts */
  224. writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
  225. /* Clear any pending interrupt */
  226. writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  227. /* Data Fetch Engine configuration */
  228. /* Reset all DFE threads */
  229. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  230. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
  231. if (priv->version == EIP197) {
  232. /* Reset HIA input interface arbiter */
  233. writel(EIP197_HIA_RA_PE_CTRL_RESET,
  234. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
  235. }
  236. /* DMA transfer size to use */
  237. val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
  238. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
  239. val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
  240. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
  241. val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
  242. writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
  243. /* Leave the DFE threads reset state */
  244. writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
  245. /* Configure the procesing engine thresholds */
  246. writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
  247. EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
  248. writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
  249. EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
  250. if (priv->version == EIP197) {
  251. /* enable HIA input interface arbiter and rings */
  252. writel(EIP197_HIA_RA_PE_CTRL_EN |
  253. GENMASK(priv->config.rings - 1, 0),
  254. EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
  255. }
  256. /* Data Store Engine configuration */
  257. /* Reset all DSE threads */
  258. writel(EIP197_DxE_THR_CTRL_RESET_PE,
  259. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
  260. /* Wait for all DSE threads to complete */
  261. while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
  262. GENMASK(15, 12)) != GENMASK(15, 12))
  263. ;
  264. /* DMA transfer size to use */
  265. val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
  266. val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
  267. val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
  268. val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
  269. /* FIXME: instability issues can occur for EIP97 but disabling it impact
  270. * performances.
  271. */
  272. if (priv->version == EIP197)
  273. val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
  274. writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
  275. /* Leave the DSE threads reset state */
  276. writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
  277. /* Configure the procesing engine thresholds */
  278. writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
  279. EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
  280. /* Processing Engine configuration */
  281. /* H/W capabilities selection */
  282. val = EIP197_FUNCTION_RSVD;
  283. val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
  284. val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
  285. val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
  286. val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
  287. writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
  288. /* Command Descriptor Rings prepare */
  289. for (i = 0; i < priv->config.rings; i++) {
  290. /* Clear interrupts for this ring */
  291. writel(GENMASK(31, 0),
  292. EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
  293. /* Disable external triggering */
  294. writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
  295. /* Clear the pending prepared counter */
  296. writel(EIP197_xDR_PREP_CLR_COUNT,
  297. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  298. /* Clear the pending processed counter */
  299. writel(EIP197_xDR_PROC_CLR_COUNT,
  300. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  301. writel(0,
  302. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  303. writel(0,
  304. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  305. writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
  306. EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  307. }
  308. /* Result Descriptor Ring prepare */
  309. for (i = 0; i < priv->config.rings; i++) {
  310. /* Disable external triggering*/
  311. writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
  312. /* Clear the pending prepared counter */
  313. writel(EIP197_xDR_PREP_CLR_COUNT,
  314. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
  315. /* Clear the pending processed counter */
  316. writel(EIP197_xDR_PROC_CLR_COUNT,
  317. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
  318. writel(0,
  319. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
  320. writel(0,
  321. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
  322. /* Ring size */
  323. writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
  324. EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
  325. }
  326. /* Enable command descriptor rings */
  327. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  328. EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
  329. /* Enable result descriptor rings */
  330. writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
  331. EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
  332. /* Clear any HIA interrupt */
  333. writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
  334. if (priv->version == EIP197) {
  335. eip197_trc_cache_init(priv);
  336. ret = eip197_load_firmwares(priv);
  337. if (ret)
  338. return ret;
  339. }
  340. safexcel_hw_setup_cdesc_rings(priv);
  341. safexcel_hw_setup_rdesc_rings(priv);
  342. return 0;
  343. }
  344. /* Called with ring's lock taken */
  345. static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
  346. int ring)
  347. {
  348. int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
  349. if (!coal)
  350. return;
  351. /* Configure when we want an interrupt */
  352. writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
  353. EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
  354. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
  355. }
  356. void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
  357. {
  358. struct crypto_async_request *req, *backlog;
  359. struct safexcel_context *ctx;
  360. struct safexcel_request *request;
  361. int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
  362. /* If a request wasn't properly dequeued because of a lack of resources,
  363. * proceeded it first,
  364. */
  365. req = priv->ring[ring].req;
  366. backlog = priv->ring[ring].backlog;
  367. if (req)
  368. goto handle_req;
  369. while (true) {
  370. spin_lock_bh(&priv->ring[ring].queue_lock);
  371. backlog = crypto_get_backlog(&priv->ring[ring].queue);
  372. req = crypto_dequeue_request(&priv->ring[ring].queue);
  373. spin_unlock_bh(&priv->ring[ring].queue_lock);
  374. if (!req) {
  375. priv->ring[ring].req = NULL;
  376. priv->ring[ring].backlog = NULL;
  377. goto finalize;
  378. }
  379. handle_req:
  380. request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
  381. if (!request)
  382. goto request_failed;
  383. ctx = crypto_tfm_ctx(req->tfm);
  384. ret = ctx->send(req, ring, request, &commands, &results);
  385. if (ret) {
  386. kfree(request);
  387. goto request_failed;
  388. }
  389. if (backlog)
  390. backlog->complete(backlog, -EINPROGRESS);
  391. /* In case the send() helper did not issue any command to push
  392. * to the engine because the input data was cached, continue to
  393. * dequeue other requests as this is valid and not an error.
  394. */
  395. if (!commands && !results) {
  396. kfree(request);
  397. continue;
  398. }
  399. spin_lock_bh(&priv->ring[ring].egress_lock);
  400. list_add_tail(&request->list, &priv->ring[ring].list);
  401. spin_unlock_bh(&priv->ring[ring].egress_lock);
  402. cdesc += commands;
  403. rdesc += results;
  404. nreq++;
  405. }
  406. request_failed:
  407. /* Not enough resources to handle all the requests. Bail out and save
  408. * the request and the backlog for the next dequeue call (per-ring).
  409. */
  410. priv->ring[ring].req = req;
  411. priv->ring[ring].backlog = backlog;
  412. finalize:
  413. if (!nreq)
  414. return;
  415. spin_lock_bh(&priv->ring[ring].egress_lock);
  416. priv->ring[ring].requests += nreq;
  417. if (!priv->ring[ring].busy) {
  418. safexcel_try_push_requests(priv, ring);
  419. priv->ring[ring].busy = true;
  420. }
  421. spin_unlock_bh(&priv->ring[ring].egress_lock);
  422. /* let the RDR know we have pending descriptors */
  423. writel((rdesc * priv->config.rd_offset) << 2,
  424. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  425. /* let the CDR know we have pending descriptors */
  426. writel((cdesc * priv->config.cd_offset) << 2,
  427. EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
  428. }
  429. void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
  430. {
  431. struct safexcel_command_desc *cdesc;
  432. /* Acknowledge the command descriptors */
  433. do {
  434. cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
  435. if (IS_ERR(cdesc)) {
  436. dev_err(priv->dev,
  437. "Could not retrieve the command descriptor\n");
  438. return;
  439. }
  440. } while (!cdesc->last_seg);
  441. }
  442. void safexcel_inv_complete(struct crypto_async_request *req, int error)
  443. {
  444. struct safexcel_inv_result *result = req->data;
  445. if (error == -EINPROGRESS)
  446. return;
  447. result->error = error;
  448. complete(&result->completion);
  449. }
  450. int safexcel_invalidate_cache(struct crypto_async_request *async,
  451. struct safexcel_crypto_priv *priv,
  452. dma_addr_t ctxr_dma, int ring,
  453. struct safexcel_request *request)
  454. {
  455. struct safexcel_command_desc *cdesc;
  456. struct safexcel_result_desc *rdesc;
  457. int ret = 0;
  458. spin_lock_bh(&priv->ring[ring].egress_lock);
  459. /* Prepare command descriptor */
  460. cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
  461. if (IS_ERR(cdesc)) {
  462. ret = PTR_ERR(cdesc);
  463. goto unlock;
  464. }
  465. cdesc->control_data.type = EIP197_TYPE_EXTENDED;
  466. cdesc->control_data.options = 0;
  467. cdesc->control_data.refresh = 0;
  468. cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
  469. /* Prepare result descriptor */
  470. rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
  471. if (IS_ERR(rdesc)) {
  472. ret = PTR_ERR(rdesc);
  473. goto cdesc_rollback;
  474. }
  475. request->req = async;
  476. goto unlock;
  477. cdesc_rollback:
  478. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  479. unlock:
  480. spin_unlock_bh(&priv->ring[ring].egress_lock);
  481. return ret;
  482. }
  483. static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
  484. int ring)
  485. {
  486. struct safexcel_request *sreq;
  487. struct safexcel_context *ctx;
  488. int ret, i, nreq, ndesc, tot_descs, handled = 0;
  489. bool should_complete;
  490. handle_results:
  491. tot_descs = 0;
  492. nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  493. nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
  494. nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
  495. if (!nreq)
  496. goto requests_left;
  497. for (i = 0; i < nreq; i++) {
  498. spin_lock_bh(&priv->ring[ring].egress_lock);
  499. sreq = list_first_entry(&priv->ring[ring].list,
  500. struct safexcel_request, list);
  501. list_del(&sreq->list);
  502. spin_unlock_bh(&priv->ring[ring].egress_lock);
  503. ctx = crypto_tfm_ctx(sreq->req->tfm);
  504. ndesc = ctx->handle_result(priv, ring, sreq->req,
  505. &should_complete, &ret);
  506. if (ndesc < 0) {
  507. kfree(sreq);
  508. dev_err(priv->dev, "failed to handle result (%d)", ndesc);
  509. goto acknowledge;
  510. }
  511. if (should_complete) {
  512. local_bh_disable();
  513. sreq->req->complete(sreq->req, ret);
  514. local_bh_enable();
  515. }
  516. kfree(sreq);
  517. tot_descs += ndesc;
  518. handled++;
  519. }
  520. acknowledge:
  521. if (i) {
  522. writel(EIP197_xDR_PROC_xD_PKT(i) |
  523. EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
  524. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
  525. }
  526. /* If the number of requests overflowed the counter, try to proceed more
  527. * requests.
  528. */
  529. if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
  530. goto handle_results;
  531. requests_left:
  532. spin_lock_bh(&priv->ring[ring].egress_lock);
  533. priv->ring[ring].requests -= handled;
  534. safexcel_try_push_requests(priv, ring);
  535. if (!priv->ring[ring].requests)
  536. priv->ring[ring].busy = false;
  537. spin_unlock_bh(&priv->ring[ring].egress_lock);
  538. }
  539. static void safexcel_dequeue_work(struct work_struct *work)
  540. {
  541. struct safexcel_work_data *data =
  542. container_of(work, struct safexcel_work_data, work);
  543. safexcel_dequeue(data->priv, data->ring);
  544. }
  545. struct safexcel_ring_irq_data {
  546. struct safexcel_crypto_priv *priv;
  547. int ring;
  548. };
  549. static irqreturn_t safexcel_irq_ring(int irq, void *data)
  550. {
  551. struct safexcel_ring_irq_data *irq_data = data;
  552. struct safexcel_crypto_priv *priv = irq_data->priv;
  553. int ring = irq_data->ring, rc = IRQ_NONE;
  554. u32 status, stat;
  555. status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
  556. if (!status)
  557. return rc;
  558. /* RDR interrupts */
  559. if (status & EIP197_RDR_IRQ(ring)) {
  560. stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  561. if (unlikely(stat & EIP197_xDR_ERR)) {
  562. /*
  563. * Fatal error, the RDR is unusable and must be
  564. * reinitialized. This should not happen under
  565. * normal circumstances.
  566. */
  567. dev_err(priv->dev, "RDR: fatal error.");
  568. } else if (likely(stat & EIP197_xDR_THRESH)) {
  569. rc = IRQ_WAKE_THREAD;
  570. }
  571. /* ACK the interrupts */
  572. writel(stat & 0xff,
  573. EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
  574. }
  575. /* ACK the interrupts */
  576. writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
  577. return rc;
  578. }
  579. static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
  580. {
  581. struct safexcel_ring_irq_data *irq_data = data;
  582. struct safexcel_crypto_priv *priv = irq_data->priv;
  583. int ring = irq_data->ring;
  584. safexcel_handle_result_descriptor(priv, ring);
  585. queue_work(priv->ring[ring].workqueue,
  586. &priv->ring[ring].work_data.work);
  587. return IRQ_HANDLED;
  588. }
  589. static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
  590. irq_handler_t handler,
  591. irq_handler_t threaded_handler,
  592. struct safexcel_ring_irq_data *ring_irq_priv)
  593. {
  594. int ret, irq = platform_get_irq_byname(pdev, name);
  595. if (irq < 0) {
  596. dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
  597. return irq;
  598. }
  599. ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
  600. threaded_handler, IRQF_ONESHOT,
  601. dev_name(&pdev->dev), ring_irq_priv);
  602. if (ret) {
  603. dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
  604. return ret;
  605. }
  606. return irq;
  607. }
  608. static struct safexcel_alg_template *safexcel_algs[] = {
  609. &safexcel_alg_ecb_aes,
  610. &safexcel_alg_cbc_aes,
  611. &safexcel_alg_sha1,
  612. &safexcel_alg_sha224,
  613. &safexcel_alg_sha256,
  614. &safexcel_alg_hmac_sha1,
  615. &safexcel_alg_hmac_sha224,
  616. &safexcel_alg_hmac_sha256,
  617. };
  618. static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
  619. {
  620. int i, j, ret = 0;
  621. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  622. safexcel_algs[i]->priv = priv;
  623. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  624. ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
  625. else
  626. ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
  627. if (ret)
  628. goto fail;
  629. }
  630. return 0;
  631. fail:
  632. for (j = 0; j < i; j++) {
  633. if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  634. crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
  635. else
  636. crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
  637. }
  638. return ret;
  639. }
  640. static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
  641. {
  642. int i;
  643. for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
  644. if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
  645. crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
  646. else
  647. crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
  648. }
  649. }
  650. static void safexcel_configure(struct safexcel_crypto_priv *priv)
  651. {
  652. u32 val, mask;
  653. val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  654. val = (val & GENMASK(27, 25)) >> 25;
  655. mask = BIT(val) - 1;
  656. val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
  657. priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
  658. priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
  659. priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
  660. priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
  661. priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
  662. }
  663. static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
  664. {
  665. struct safexcel_register_offsets *offsets = &priv->offsets;
  666. if (priv->version == EIP197) {
  667. offsets->hia_aic = EIP197_HIA_AIC_BASE;
  668. offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
  669. offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
  670. offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
  671. offsets->hia_dfe = EIP197_HIA_DFE_BASE;
  672. offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
  673. offsets->hia_dse = EIP197_HIA_DSE_BASE;
  674. offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
  675. offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
  676. offsets->pe = EIP197_PE_BASE;
  677. } else {
  678. offsets->hia_aic = EIP97_HIA_AIC_BASE;
  679. offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
  680. offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
  681. offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
  682. offsets->hia_dfe = EIP97_HIA_DFE_BASE;
  683. offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
  684. offsets->hia_dse = EIP97_HIA_DSE_BASE;
  685. offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
  686. offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
  687. offsets->pe = EIP97_PE_BASE;
  688. }
  689. }
  690. static int safexcel_probe(struct platform_device *pdev)
  691. {
  692. struct device *dev = &pdev->dev;
  693. struct resource *res;
  694. struct safexcel_crypto_priv *priv;
  695. int i, ret;
  696. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  697. if (!priv)
  698. return -ENOMEM;
  699. priv->dev = dev;
  700. priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
  701. safexcel_init_register_offsets(priv);
  702. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  703. priv->base = devm_ioremap_resource(dev, res);
  704. if (IS_ERR(priv->base)) {
  705. dev_err(dev, "failed to get resource\n");
  706. return PTR_ERR(priv->base);
  707. }
  708. priv->clk = devm_clk_get(&pdev->dev, NULL);
  709. ret = PTR_ERR_OR_ZERO(priv->clk);
  710. /* The clock isn't mandatory */
  711. if (ret != -ENOENT) {
  712. if (ret)
  713. return ret;
  714. ret = clk_prepare_enable(priv->clk);
  715. if (ret) {
  716. dev_err(dev, "unable to enable clk (%d)\n", ret);
  717. return ret;
  718. }
  719. }
  720. priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
  721. ret = PTR_ERR_OR_ZERO(priv->reg_clk);
  722. /* The clock isn't mandatory */
  723. if (ret != -ENOENT) {
  724. if (ret)
  725. goto err_core_clk;
  726. ret = clk_prepare_enable(priv->reg_clk);
  727. if (ret) {
  728. dev_err(dev, "unable to enable reg clk (%d)\n", ret);
  729. goto err_core_clk;
  730. }
  731. }
  732. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  733. if (ret)
  734. goto err_reg_clk;
  735. priv->context_pool = dmam_pool_create("safexcel-context", dev,
  736. sizeof(struct safexcel_context_record),
  737. 1, 0);
  738. if (!priv->context_pool) {
  739. ret = -ENOMEM;
  740. goto err_reg_clk;
  741. }
  742. safexcel_configure(priv);
  743. for (i = 0; i < priv->config.rings; i++) {
  744. char irq_name[6] = {0}; /* "ringX\0" */
  745. char wq_name[9] = {0}; /* "wq_ringX\0" */
  746. int irq;
  747. struct safexcel_ring_irq_data *ring_irq;
  748. ret = safexcel_init_ring_descriptors(priv,
  749. &priv->ring[i].cdr,
  750. &priv->ring[i].rdr);
  751. if (ret)
  752. goto err_reg_clk;
  753. ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
  754. if (!ring_irq) {
  755. ret = -ENOMEM;
  756. goto err_reg_clk;
  757. }
  758. ring_irq->priv = priv;
  759. ring_irq->ring = i;
  760. snprintf(irq_name, 6, "ring%d", i);
  761. irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
  762. safexcel_irq_ring_thread,
  763. ring_irq);
  764. if (irq < 0) {
  765. ret = irq;
  766. goto err_reg_clk;
  767. }
  768. priv->ring[i].work_data.priv = priv;
  769. priv->ring[i].work_data.ring = i;
  770. INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
  771. snprintf(wq_name, 9, "wq_ring%d", i);
  772. priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
  773. if (!priv->ring[i].workqueue) {
  774. ret = -ENOMEM;
  775. goto err_reg_clk;
  776. }
  777. priv->ring[i].requests = 0;
  778. priv->ring[i].busy = false;
  779. crypto_init_queue(&priv->ring[i].queue,
  780. EIP197_DEFAULT_RING_SIZE);
  781. INIT_LIST_HEAD(&priv->ring[i].list);
  782. spin_lock_init(&priv->ring[i].lock);
  783. spin_lock_init(&priv->ring[i].egress_lock);
  784. spin_lock_init(&priv->ring[i].queue_lock);
  785. }
  786. platform_set_drvdata(pdev, priv);
  787. atomic_set(&priv->ring_used, 0);
  788. ret = safexcel_hw_init(priv);
  789. if (ret) {
  790. dev_err(dev, "EIP h/w init failed (%d)\n", ret);
  791. goto err_reg_clk;
  792. }
  793. ret = safexcel_register_algorithms(priv);
  794. if (ret) {
  795. dev_err(dev, "Failed to register algorithms (%d)\n", ret);
  796. goto err_reg_clk;
  797. }
  798. return 0;
  799. err_reg_clk:
  800. clk_disable_unprepare(priv->reg_clk);
  801. err_core_clk:
  802. clk_disable_unprepare(priv->clk);
  803. return ret;
  804. }
  805. static int safexcel_remove(struct platform_device *pdev)
  806. {
  807. struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
  808. int i;
  809. safexcel_unregister_algorithms(priv);
  810. clk_disable_unprepare(priv->clk);
  811. for (i = 0; i < priv->config.rings; i++)
  812. destroy_workqueue(priv->ring[i].workqueue);
  813. return 0;
  814. }
  815. static const struct of_device_id safexcel_of_match_table[] = {
  816. {
  817. .compatible = "inside-secure,safexcel-eip97",
  818. .data = (void *)EIP97,
  819. },
  820. {
  821. .compatible = "inside-secure,safexcel-eip197",
  822. .data = (void *)EIP197,
  823. },
  824. {},
  825. };
  826. static struct platform_driver crypto_safexcel = {
  827. .probe = safexcel_probe,
  828. .remove = safexcel_remove,
  829. .driver = {
  830. .name = "crypto-safexcel",
  831. .of_match_table = safexcel_of_match_table,
  832. },
  833. };
  834. module_platform_driver(crypto_safexcel);
  835. MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
  836. MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
  837. MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
  838. MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
  839. MODULE_LICENSE("GPL v2");