s5p-sss.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Cryptographic API.
  4. //
  5. // Support for Samsung S5PV210 and Exynos HW acceleration.
  6. //
  7. // Copyright (C) 2011 NetUP Inc. All rights reserved.
  8. // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
  9. //
  10. // Hash part based on omap-sham.c driver.
  11. #include <linux/clk.h>
  12. #include <linux/crypto.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/err.h>
  15. #include <linux/errno.h>
  16. #include <linux/init.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/scatterlist.h>
  24. #include <crypto/ctr.h>
  25. #include <crypto/aes.h>
  26. #include <crypto/algapi.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/hash.h>
  29. #include <crypto/md5.h>
  30. #include <crypto/sha.h>
  31. #include <crypto/internal/hash.h>
  32. #define _SBF(s, v) ((v) << (s))
  33. /* Feed control registers */
  34. #define SSS_REG_FCINTSTAT 0x0000
  35. #define SSS_FCINTSTAT_HPARTINT BIT(7)
  36. #define SSS_FCINTSTAT_HDONEINT BIT(5)
  37. #define SSS_FCINTSTAT_BRDMAINT BIT(3)
  38. #define SSS_FCINTSTAT_BTDMAINT BIT(2)
  39. #define SSS_FCINTSTAT_HRDMAINT BIT(1)
  40. #define SSS_FCINTSTAT_PKDMAINT BIT(0)
  41. #define SSS_REG_FCINTENSET 0x0004
  42. #define SSS_FCINTENSET_HPARTINTENSET BIT(7)
  43. #define SSS_FCINTENSET_HDONEINTENSET BIT(5)
  44. #define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
  45. #define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
  46. #define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
  47. #define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
  48. #define SSS_REG_FCINTENCLR 0x0008
  49. #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
  50. #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
  51. #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
  52. #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
  53. #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
  54. #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
  55. #define SSS_REG_FCINTPEND 0x000C
  56. #define SSS_FCINTPEND_HPARTINTP BIT(7)
  57. #define SSS_FCINTPEND_HDONEINTP BIT(5)
  58. #define SSS_FCINTPEND_BRDMAINTP BIT(3)
  59. #define SSS_FCINTPEND_BTDMAINTP BIT(2)
  60. #define SSS_FCINTPEND_HRDMAINTP BIT(1)
  61. #define SSS_FCINTPEND_PKDMAINTP BIT(0)
  62. #define SSS_REG_FCFIFOSTAT 0x0010
  63. #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
  64. #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
  65. #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
  66. #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
  67. #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
  68. #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
  69. #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
  70. #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
  71. #define SSS_REG_FCFIFOCTRL 0x0014
  72. #define SSS_FCFIFOCTRL_DESSEL BIT(2)
  73. #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
  74. #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
  75. #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
  76. #define SSS_HASHIN_MASK _SBF(0, 0x03)
  77. #define SSS_REG_FCBRDMAS 0x0020
  78. #define SSS_REG_FCBRDMAL 0x0024
  79. #define SSS_REG_FCBRDMAC 0x0028
  80. #define SSS_FCBRDMAC_BYTESWAP BIT(1)
  81. #define SSS_FCBRDMAC_FLUSH BIT(0)
  82. #define SSS_REG_FCBTDMAS 0x0030
  83. #define SSS_REG_FCBTDMAL 0x0034
  84. #define SSS_REG_FCBTDMAC 0x0038
  85. #define SSS_FCBTDMAC_BYTESWAP BIT(1)
  86. #define SSS_FCBTDMAC_FLUSH BIT(0)
  87. #define SSS_REG_FCHRDMAS 0x0040
  88. #define SSS_REG_FCHRDMAL 0x0044
  89. #define SSS_REG_FCHRDMAC 0x0048
  90. #define SSS_FCHRDMAC_BYTESWAP BIT(1)
  91. #define SSS_FCHRDMAC_FLUSH BIT(0)
  92. #define SSS_REG_FCPKDMAS 0x0050
  93. #define SSS_REG_FCPKDMAL 0x0054
  94. #define SSS_REG_FCPKDMAC 0x0058
  95. #define SSS_FCPKDMAC_BYTESWAP BIT(3)
  96. #define SSS_FCPKDMAC_DESCEND BIT(2)
  97. #define SSS_FCPKDMAC_TRANSMIT BIT(1)
  98. #define SSS_FCPKDMAC_FLUSH BIT(0)
  99. #define SSS_REG_FCPKDMAO 0x005C
  100. /* AES registers */
  101. #define SSS_REG_AES_CONTROL 0x00
  102. #define SSS_AES_BYTESWAP_DI BIT(11)
  103. #define SSS_AES_BYTESWAP_DO BIT(10)
  104. #define SSS_AES_BYTESWAP_IV BIT(9)
  105. #define SSS_AES_BYTESWAP_CNT BIT(8)
  106. #define SSS_AES_BYTESWAP_KEY BIT(7)
  107. #define SSS_AES_KEY_CHANGE_MODE BIT(6)
  108. #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
  109. #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
  110. #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
  111. #define SSS_AES_FIFO_MODE BIT(3)
  112. #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
  113. #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
  114. #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
  115. #define SSS_AES_MODE_DECRYPT BIT(0)
  116. #define SSS_REG_AES_STATUS 0x04
  117. #define SSS_AES_BUSY BIT(2)
  118. #define SSS_AES_INPUT_READY BIT(1)
  119. #define SSS_AES_OUTPUT_READY BIT(0)
  120. #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
  121. #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
  122. #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
  123. #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
  124. #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
  125. #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
  126. #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
  127. #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
  128. #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
  129. #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
  130. SSS_AES_REG(dev, reg))
  131. /* HW engine modes */
  132. #define FLAGS_AES_DECRYPT BIT(0)
  133. #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
  134. #define FLAGS_AES_CBC _SBF(1, 0x01)
  135. #define FLAGS_AES_CTR _SBF(1, 0x02)
  136. #define AES_KEY_LEN 16
  137. #define CRYPTO_QUEUE_LEN 1
  138. /* HASH registers */
  139. #define SSS_REG_HASH_CTRL 0x00
  140. #define SSS_HASH_USER_IV_EN BIT(5)
  141. #define SSS_HASH_INIT_BIT BIT(4)
  142. #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
  143. #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
  144. #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
  145. #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
  146. #define SSS_REG_HASH_CTRL_PAUSE 0x04
  147. #define SSS_HASH_PAUSE BIT(0)
  148. #define SSS_REG_HASH_CTRL_FIFO 0x08
  149. #define SSS_HASH_FIFO_MODE_DMA BIT(0)
  150. #define SSS_HASH_FIFO_MODE_CPU 0
  151. #define SSS_REG_HASH_CTRL_SWAP 0x0C
  152. #define SSS_HASH_BYTESWAP_DI BIT(3)
  153. #define SSS_HASH_BYTESWAP_DO BIT(2)
  154. #define SSS_HASH_BYTESWAP_IV BIT(1)
  155. #define SSS_HASH_BYTESWAP_KEY BIT(0)
  156. #define SSS_REG_HASH_STATUS 0x10
  157. #define SSS_HASH_STATUS_MSG_DONE BIT(6)
  158. #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
  159. #define SSS_HASH_STATUS_BUFFER_READY BIT(0)
  160. #define SSS_REG_HASH_MSG_SIZE_LOW 0x20
  161. #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
  162. #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
  163. #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
  164. #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
  165. #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
  166. #define HASH_BLOCK_SIZE 64
  167. #define HASH_REG_SIZEOF 4
  168. #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
  169. #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
  170. #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
  171. /*
  172. * HASH bit numbers, used by device, setting in dev->hash_flags with
  173. * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
  174. * to keep HASH state BUSY or FREE, or to signal state from irq_handler
  175. * to hash_tasklet. SGS keep track of allocated memory for scatterlist
  176. */
  177. #define HASH_FLAGS_BUSY 0
  178. #define HASH_FLAGS_FINAL 1
  179. #define HASH_FLAGS_DMA_ACTIVE 2
  180. #define HASH_FLAGS_OUTPUT_READY 3
  181. #define HASH_FLAGS_DMA_READY 4
  182. #define HASH_FLAGS_SGS_COPIED 5
  183. #define HASH_FLAGS_SGS_ALLOCED 6
  184. /* HASH HW constants */
  185. #define BUFLEN HASH_BLOCK_SIZE
  186. #define SSS_HASH_DMA_LEN_ALIGN 8
  187. #define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
  188. #define SSS_HASH_QUEUE_LENGTH 10
  189. /**
  190. * struct samsung_aes_variant - platform specific SSS driver data
  191. * @aes_offset: AES register offset from SSS module's base.
  192. * @hash_offset: HASH register offset from SSS module's base.
  193. *
  194. * Specifies platform specific configuration of SSS module.
  195. * Note: A structure for driver specific platform data is used for future
  196. * expansion of its usage.
  197. */
  198. struct samsung_aes_variant {
  199. unsigned int aes_offset;
  200. unsigned int hash_offset;
  201. };
  202. struct s5p_aes_reqctx {
  203. unsigned long mode;
  204. };
  205. struct s5p_aes_ctx {
  206. struct s5p_aes_dev *dev;
  207. uint8_t aes_key[AES_MAX_KEY_SIZE];
  208. uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
  209. int keylen;
  210. };
  211. /**
  212. * struct s5p_aes_dev - Crypto device state container
  213. * @dev: Associated device
  214. * @clk: Clock for accessing hardware
  215. * @ioaddr: Mapped IO memory region
  216. * @aes_ioaddr: Per-varian offset for AES block IO memory
  217. * @irq_fc: Feed control interrupt line
  218. * @req: Crypto request currently handled by the device
  219. * @ctx: Configuration for currently handled crypto request
  220. * @sg_src: Scatter list with source data for currently handled block
  221. * in device. This is DMA-mapped into device.
  222. * @sg_dst: Scatter list with destination data for currently handled block
  223. * in device. This is DMA-mapped into device.
  224. * @sg_src_cpy: In case of unaligned access, copied scatter list
  225. * with source data.
  226. * @sg_dst_cpy: In case of unaligned access, copied scatter list
  227. * with destination data.
  228. * @tasklet: New request scheduling jib
  229. * @queue: Crypto queue
  230. * @busy: Indicates whether the device is currently handling some request
  231. * thus it uses some of the fields from this state, like:
  232. * req, ctx, sg_src/dst (and copies). This essentially
  233. * protects against concurrent access to these fields.
  234. * @lock: Lock for protecting both access to device hardware registers
  235. * and fields related to current request (including the busy field).
  236. * @res: Resources for hash.
  237. * @io_hash_base: Per-variant offset for HASH block IO memory.
  238. * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
  239. * variable.
  240. * @hash_flags: Flags for current HASH op.
  241. * @hash_queue: Async hash queue.
  242. * @hash_tasklet: New HASH request scheduling job.
  243. * @xmit_buf: Buffer for current HASH request transfer into SSS block.
  244. * @hash_req: Current request sending to SSS HASH block.
  245. * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
  246. * @hash_sg_cnt: Counter for hash_sg_iter.
  247. *
  248. * @use_hash: true if HASH algs enabled
  249. */
  250. struct s5p_aes_dev {
  251. struct device *dev;
  252. struct clk *clk;
  253. void __iomem *ioaddr;
  254. void __iomem *aes_ioaddr;
  255. int irq_fc;
  256. struct ablkcipher_request *req;
  257. struct s5p_aes_ctx *ctx;
  258. struct scatterlist *sg_src;
  259. struct scatterlist *sg_dst;
  260. struct scatterlist *sg_src_cpy;
  261. struct scatterlist *sg_dst_cpy;
  262. struct tasklet_struct tasklet;
  263. struct crypto_queue queue;
  264. bool busy;
  265. spinlock_t lock;
  266. struct resource *res;
  267. void __iomem *io_hash_base;
  268. spinlock_t hash_lock; /* protect hash_ vars */
  269. unsigned long hash_flags;
  270. struct crypto_queue hash_queue;
  271. struct tasklet_struct hash_tasklet;
  272. u8 xmit_buf[BUFLEN];
  273. struct ahash_request *hash_req;
  274. struct scatterlist *hash_sg_iter;
  275. unsigned int hash_sg_cnt;
  276. bool use_hash;
  277. };
  278. /**
  279. * struct s5p_hash_reqctx - HASH request context
  280. * @dd: Associated device
  281. * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
  282. * @digcnt: Number of bytes processed by HW (without buffer[] ones)
  283. * @digest: Digest message or IV for partial result
  284. * @nregs: Number of HW registers for digest or IV read/write
  285. * @engine: Bits for selecting type of HASH in SSS block
  286. * @sg: sg for DMA transfer
  287. * @sg_len: Length of sg for DMA transfer
  288. * @sgl[]: sg for joining buffer and req->src scatterlist
  289. * @skip: Skip offset in req->src for current op
  290. * @total: Total number of bytes for current request
  291. * @finup: Keep state for finup or final.
  292. * @error: Keep track of error.
  293. * @bufcnt: Number of bytes holded in buffer[]
  294. * @buffer[]: For byte(s) from end of req->src in UPDATE op
  295. */
  296. struct s5p_hash_reqctx {
  297. struct s5p_aes_dev *dd;
  298. bool op_update;
  299. u64 digcnt;
  300. u8 digest[SHA256_DIGEST_SIZE];
  301. unsigned int nregs; /* digest_size / sizeof(reg) */
  302. u32 engine;
  303. struct scatterlist *sg;
  304. unsigned int sg_len;
  305. struct scatterlist sgl[2];
  306. unsigned int skip;
  307. unsigned int total;
  308. bool finup;
  309. bool error;
  310. u32 bufcnt;
  311. u8 buffer[0];
  312. };
  313. /**
  314. * struct s5p_hash_ctx - HASH transformation context
  315. * @dd: Associated device
  316. * @flags: Bits for algorithm HASH.
  317. * @fallback: Software transformation for zero message or size < BUFLEN.
  318. */
  319. struct s5p_hash_ctx {
  320. struct s5p_aes_dev *dd;
  321. unsigned long flags;
  322. struct crypto_shash *fallback;
  323. };
  324. static const struct samsung_aes_variant s5p_aes_data = {
  325. .aes_offset = 0x4000,
  326. .hash_offset = 0x6000,
  327. };
  328. static const struct samsung_aes_variant exynos_aes_data = {
  329. .aes_offset = 0x200,
  330. .hash_offset = 0x400,
  331. };
  332. static const struct of_device_id s5p_sss_dt_match[] = {
  333. {
  334. .compatible = "samsung,s5pv210-secss",
  335. .data = &s5p_aes_data,
  336. },
  337. {
  338. .compatible = "samsung,exynos4210-secss",
  339. .data = &exynos_aes_data,
  340. },
  341. { },
  342. };
  343. MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
  344. static inline const struct samsung_aes_variant *find_s5p_sss_version
  345. (const struct platform_device *pdev)
  346. {
  347. if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
  348. const struct of_device_id *match;
  349. match = of_match_node(s5p_sss_dt_match,
  350. pdev->dev.of_node);
  351. return (const struct samsung_aes_variant *)match->data;
  352. }
  353. return (const struct samsung_aes_variant *)
  354. platform_get_device_id(pdev)->driver_data;
  355. }
  356. static struct s5p_aes_dev *s5p_dev;
  357. static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
  358. const struct scatterlist *sg)
  359. {
  360. SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
  361. SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
  362. }
  363. static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
  364. const struct scatterlist *sg)
  365. {
  366. SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
  367. SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
  368. }
  369. static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
  370. {
  371. int len;
  372. if (!*sg)
  373. return;
  374. len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
  375. free_pages((unsigned long)sg_virt(*sg), get_order(len));
  376. kfree(*sg);
  377. *sg = NULL;
  378. }
  379. static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
  380. unsigned int nbytes, int out)
  381. {
  382. struct scatter_walk walk;
  383. if (!nbytes)
  384. return;
  385. scatterwalk_start(&walk, sg);
  386. scatterwalk_copychunks(buf, &walk, nbytes, out);
  387. scatterwalk_done(&walk, out, 0);
  388. }
  389. static void s5p_sg_done(struct s5p_aes_dev *dev)
  390. {
  391. if (dev->sg_dst_cpy) {
  392. dev_dbg(dev->dev,
  393. "Copying %d bytes of output data back to original place\n",
  394. dev->req->nbytes);
  395. s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
  396. dev->req->nbytes, 1);
  397. }
  398. s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
  399. s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
  400. }
  401. /* Calls the completion. Cannot be called with dev->lock hold. */
  402. static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
  403. {
  404. dev->req->base.complete(&dev->req->base, err);
  405. }
  406. static void s5p_unset_outdata(struct s5p_aes_dev *dev)
  407. {
  408. dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
  409. }
  410. static void s5p_unset_indata(struct s5p_aes_dev *dev)
  411. {
  412. dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
  413. }
  414. static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
  415. struct scatterlist **dst)
  416. {
  417. void *pages;
  418. int len;
  419. *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
  420. if (!*dst)
  421. return -ENOMEM;
  422. len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
  423. pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
  424. if (!pages) {
  425. kfree(*dst);
  426. *dst = NULL;
  427. return -ENOMEM;
  428. }
  429. s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
  430. sg_init_table(*dst, 1);
  431. sg_set_buf(*dst, pages, len);
  432. return 0;
  433. }
  434. static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
  435. {
  436. int err;
  437. if (!sg->length) {
  438. err = -EINVAL;
  439. goto exit;
  440. }
  441. err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
  442. if (!err) {
  443. err = -ENOMEM;
  444. goto exit;
  445. }
  446. dev->sg_dst = sg;
  447. err = 0;
  448. exit:
  449. return err;
  450. }
  451. static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
  452. {
  453. int err;
  454. if (!sg->length) {
  455. err = -EINVAL;
  456. goto exit;
  457. }
  458. err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
  459. if (!err) {
  460. err = -ENOMEM;
  461. goto exit;
  462. }
  463. dev->sg_src = sg;
  464. err = 0;
  465. exit:
  466. return err;
  467. }
  468. /*
  469. * Returns -ERRNO on error (mapping of new data failed).
  470. * On success returns:
  471. * - 0 if there is no more data,
  472. * - 1 if new transmitting (output) data is ready and its address+length
  473. * have to be written to device (by calling s5p_set_dma_outdata()).
  474. */
  475. static int s5p_aes_tx(struct s5p_aes_dev *dev)
  476. {
  477. int ret = 0;
  478. s5p_unset_outdata(dev);
  479. if (!sg_is_last(dev->sg_dst)) {
  480. ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
  481. if (!ret)
  482. ret = 1;
  483. }
  484. return ret;
  485. }
  486. /*
  487. * Returns -ERRNO on error (mapping of new data failed).
  488. * On success returns:
  489. * - 0 if there is no more data,
  490. * - 1 if new receiving (input) data is ready and its address+length
  491. * have to be written to device (by calling s5p_set_dma_indata()).
  492. */
  493. static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
  494. {
  495. int ret = 0;
  496. s5p_unset_indata(dev);
  497. if (!sg_is_last(dev->sg_src)) {
  498. ret = s5p_set_indata(dev, sg_next(dev->sg_src));
  499. if (!ret)
  500. ret = 1;
  501. }
  502. return ret;
  503. }
  504. static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
  505. {
  506. return __raw_readl(dd->io_hash_base + offset);
  507. }
  508. static inline void s5p_hash_write(struct s5p_aes_dev *dd,
  509. u32 offset, u32 value)
  510. {
  511. __raw_writel(value, dd->io_hash_base + offset);
  512. }
  513. /**
  514. * s5p_set_dma_hashdata() - start DMA with sg
  515. * @dev: device
  516. * @sg: scatterlist ready to DMA transmit
  517. */
  518. static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
  519. const struct scatterlist *sg)
  520. {
  521. dev->hash_sg_cnt--;
  522. SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
  523. SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
  524. }
  525. /**
  526. * s5p_hash_rx() - get next hash_sg_iter
  527. * @dev: device
  528. *
  529. * Return:
  530. * 2 if there is no more data and it is UPDATE op
  531. * 1 if new receiving (input) data is ready and can be written to device
  532. * 0 if there is no more data and it is FINAL op
  533. */
  534. static int s5p_hash_rx(struct s5p_aes_dev *dev)
  535. {
  536. if (dev->hash_sg_cnt > 0) {
  537. dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
  538. return 1;
  539. }
  540. set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
  541. if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
  542. return 0;
  543. return 2;
  544. }
  545. static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
  546. {
  547. struct platform_device *pdev = dev_id;
  548. struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
  549. int err_dma_tx = 0;
  550. int err_dma_rx = 0;
  551. int err_dma_hx = 0;
  552. bool tx_end = false;
  553. bool hx_end = false;
  554. unsigned long flags;
  555. uint32_t status;
  556. u32 st_bits;
  557. int err;
  558. spin_lock_irqsave(&dev->lock, flags);
  559. /*
  560. * Handle rx or tx interrupt. If there is still data (scatterlist did not
  561. * reach end), then map next scatterlist entry.
  562. * In case of such mapping error, s5p_aes_complete() should be called.
  563. *
  564. * If there is no more data in tx scatter list, call s5p_aes_complete()
  565. * and schedule new tasklet.
  566. *
  567. * Handle hx interrupt. If there is still data map next entry.
  568. */
  569. status = SSS_READ(dev, FCINTSTAT);
  570. if (status & SSS_FCINTSTAT_BRDMAINT)
  571. err_dma_rx = s5p_aes_rx(dev);
  572. if (status & SSS_FCINTSTAT_BTDMAINT) {
  573. if (sg_is_last(dev->sg_dst))
  574. tx_end = true;
  575. err_dma_tx = s5p_aes_tx(dev);
  576. }
  577. if (status & SSS_FCINTSTAT_HRDMAINT)
  578. err_dma_hx = s5p_hash_rx(dev);
  579. st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
  580. SSS_FCINTSTAT_HRDMAINT);
  581. /* clear DMA bits */
  582. SSS_WRITE(dev, FCINTPEND, st_bits);
  583. /* clear HASH irq bits */
  584. if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
  585. /* cannot have both HPART and HDONE */
  586. if (status & SSS_FCINTSTAT_HPARTINT)
  587. st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
  588. if (status & SSS_FCINTSTAT_HDONEINT)
  589. st_bits = SSS_HASH_STATUS_MSG_DONE;
  590. set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
  591. s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
  592. hx_end = true;
  593. /* when DONE or PART, do not handle HASH DMA */
  594. err_dma_hx = 0;
  595. }
  596. if (err_dma_rx < 0) {
  597. err = err_dma_rx;
  598. goto error;
  599. }
  600. if (err_dma_tx < 0) {
  601. err = err_dma_tx;
  602. goto error;
  603. }
  604. if (tx_end) {
  605. s5p_sg_done(dev);
  606. if (err_dma_hx == 1)
  607. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  608. spin_unlock_irqrestore(&dev->lock, flags);
  609. s5p_aes_complete(dev, 0);
  610. /* Device is still busy */
  611. tasklet_schedule(&dev->tasklet);
  612. } else {
  613. /*
  614. * Writing length of DMA block (either receiving or
  615. * transmitting) will start the operation immediately, so this
  616. * should be done at the end (even after clearing pending
  617. * interrupts to not miss the interrupt).
  618. */
  619. if (err_dma_tx == 1)
  620. s5p_set_dma_outdata(dev, dev->sg_dst);
  621. if (err_dma_rx == 1)
  622. s5p_set_dma_indata(dev, dev->sg_src);
  623. if (err_dma_hx == 1)
  624. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  625. spin_unlock_irqrestore(&dev->lock, flags);
  626. }
  627. goto hash_irq_end;
  628. error:
  629. s5p_sg_done(dev);
  630. dev->busy = false;
  631. if (err_dma_hx == 1)
  632. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  633. spin_unlock_irqrestore(&dev->lock, flags);
  634. s5p_aes_complete(dev, err);
  635. hash_irq_end:
  636. /*
  637. * Note about else if:
  638. * when hash_sg_iter reaches end and its UPDATE op,
  639. * issue SSS_HASH_PAUSE and wait for HPART irq
  640. */
  641. if (hx_end)
  642. tasklet_schedule(&dev->hash_tasklet);
  643. else if (err_dma_hx == 2)
  644. s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
  645. SSS_HASH_PAUSE);
  646. return IRQ_HANDLED;
  647. }
  648. /**
  649. * s5p_hash_read_msg() - read message or IV from HW
  650. * @req: AHASH request
  651. */
  652. static void s5p_hash_read_msg(struct ahash_request *req)
  653. {
  654. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  655. struct s5p_aes_dev *dd = ctx->dd;
  656. u32 *hash = (u32 *)ctx->digest;
  657. unsigned int i;
  658. for (i = 0; i < ctx->nregs; i++)
  659. hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
  660. }
  661. /**
  662. * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
  663. * @dd: device
  664. * @ctx: request context
  665. */
  666. static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
  667. const struct s5p_hash_reqctx *ctx)
  668. {
  669. const u32 *hash = (const u32 *)ctx->digest;
  670. unsigned int i;
  671. for (i = 0; i < ctx->nregs; i++)
  672. s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
  673. }
  674. /**
  675. * s5p_hash_write_iv() - write IV for next partial/finup op.
  676. * @req: AHASH request
  677. */
  678. static void s5p_hash_write_iv(struct ahash_request *req)
  679. {
  680. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  681. s5p_hash_write_ctx_iv(ctx->dd, ctx);
  682. }
  683. /**
  684. * s5p_hash_copy_result() - copy digest into req->result
  685. * @req: AHASH request
  686. */
  687. static void s5p_hash_copy_result(struct ahash_request *req)
  688. {
  689. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  690. if (!req->result)
  691. return;
  692. memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
  693. }
  694. /**
  695. * s5p_hash_dma_flush() - flush HASH DMA
  696. * @dev: secss device
  697. */
  698. static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
  699. {
  700. SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
  701. }
  702. /**
  703. * s5p_hash_dma_enable() - enable DMA mode for HASH
  704. * @dev: secss device
  705. *
  706. * enable DMA mode for HASH
  707. */
  708. static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
  709. {
  710. s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
  711. }
  712. /**
  713. * s5p_hash_irq_disable() - disable irq HASH signals
  714. * @dev: secss device
  715. * @flags: bitfield with irq's to be disabled
  716. */
  717. static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
  718. {
  719. SSS_WRITE(dev, FCINTENCLR, flags);
  720. }
  721. /**
  722. * s5p_hash_irq_enable() - enable irq signals
  723. * @dev: secss device
  724. * @flags: bitfield with irq's to be enabled
  725. */
  726. static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
  727. {
  728. SSS_WRITE(dev, FCINTENSET, flags);
  729. }
  730. /**
  731. * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
  732. * @dev: secss device
  733. * @hashflow: HASH stream flow with/without crypto AES/DES
  734. */
  735. static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
  736. {
  737. unsigned long flags;
  738. u32 flow;
  739. spin_lock_irqsave(&dev->lock, flags);
  740. flow = SSS_READ(dev, FCFIFOCTRL);
  741. flow &= ~SSS_HASHIN_MASK;
  742. flow |= hashflow;
  743. SSS_WRITE(dev, FCFIFOCTRL, flow);
  744. spin_unlock_irqrestore(&dev->lock, flags);
  745. }
  746. /**
  747. * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
  748. * @dev: secss device
  749. * @hashflow: HASH stream flow with/without AES/DES
  750. *
  751. * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
  752. * enable HASH irq's HRDMA, HDONE, HPART
  753. */
  754. static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
  755. {
  756. s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
  757. SSS_FCINTENCLR_HDONEINTENCLR |
  758. SSS_FCINTENCLR_HPARTINTENCLR);
  759. s5p_hash_dma_flush(dev);
  760. s5p_hash_dma_enable(dev);
  761. s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
  762. s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
  763. SSS_FCINTENSET_HDONEINTENSET |
  764. SSS_FCINTENSET_HPARTINTENSET);
  765. }
  766. /**
  767. * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
  768. * @dd: secss device
  769. * @length: length for request
  770. * @final: true if final op
  771. *
  772. * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
  773. * after previous updates, fill up IV words. For final, calculate and set
  774. * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
  775. * length as 2^63 so it will be never reached and set to zero prelow and
  776. * prehigh.
  777. *
  778. * This function does not start DMA transfer.
  779. */
  780. static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
  781. bool final)
  782. {
  783. struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  784. u32 prelow, prehigh, low, high;
  785. u32 configflags, swapflags;
  786. u64 tmplen;
  787. configflags = ctx->engine | SSS_HASH_INIT_BIT;
  788. if (likely(ctx->digcnt)) {
  789. s5p_hash_write_ctx_iv(dd, ctx);
  790. configflags |= SSS_HASH_USER_IV_EN;
  791. }
  792. if (final) {
  793. /* number of bytes for last part */
  794. low = length;
  795. high = 0;
  796. /* total number of bits prev hashed */
  797. tmplen = ctx->digcnt * 8;
  798. prelow = (u32)tmplen;
  799. prehigh = (u32)(tmplen >> 32);
  800. } else {
  801. prelow = 0;
  802. prehigh = 0;
  803. low = 0;
  804. high = BIT(31);
  805. }
  806. swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
  807. SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
  808. s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
  809. s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
  810. s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
  811. s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
  812. s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
  813. s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
  814. }
  815. /**
  816. * s5p_hash_xmit_dma() - start DMA hash processing
  817. * @dd: secss device
  818. * @length: length for request
  819. * @final: true if final op
  820. *
  821. * Update digcnt here, as it is needed for finup/final op.
  822. */
  823. static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
  824. bool final)
  825. {
  826. struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  827. unsigned int cnt;
  828. cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
  829. if (!cnt) {
  830. dev_err(dd->dev, "dma_map_sg error\n");
  831. ctx->error = true;
  832. return -EINVAL;
  833. }
  834. set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
  835. dd->hash_sg_iter = ctx->sg;
  836. dd->hash_sg_cnt = cnt;
  837. s5p_hash_write_ctrl(dd, length, final);
  838. ctx->digcnt += length;
  839. ctx->total -= length;
  840. /* catch last interrupt */
  841. if (final)
  842. set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
  843. s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
  844. return -EINPROGRESS;
  845. }
  846. /**
  847. * s5p_hash_copy_sgs() - copy request's bytes into new buffer
  848. * @ctx: request context
  849. * @sg: source scatterlist request
  850. * @new_len: number of bytes to process from sg
  851. *
  852. * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
  853. * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
  854. * with allocated buffer.
  855. *
  856. * Set bit in dd->hash_flag so we can free it after irq ends processing.
  857. */
  858. static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
  859. struct scatterlist *sg, unsigned int new_len)
  860. {
  861. unsigned int pages, len;
  862. void *buf;
  863. len = new_len + ctx->bufcnt;
  864. pages = get_order(len);
  865. buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
  866. if (!buf) {
  867. dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
  868. ctx->error = true;
  869. return -ENOMEM;
  870. }
  871. if (ctx->bufcnt)
  872. memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
  873. scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
  874. new_len, 0);
  875. sg_init_table(ctx->sgl, 1);
  876. sg_set_buf(ctx->sgl, buf, len);
  877. ctx->sg = ctx->sgl;
  878. ctx->sg_len = 1;
  879. ctx->bufcnt = 0;
  880. ctx->skip = 0;
  881. set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
  882. return 0;
  883. }
  884. /**
  885. * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
  886. * @ctx: request context
  887. * @sg: source scatterlist request
  888. * @new_len: number of bytes to process from sg
  889. *
  890. * Allocate new scatterlist table, copy data for HASH into it. If there was
  891. * xmit_buf filled, prepare it first, then copy page, length and offset from
  892. * source sg into it, adjusting begin and/or end for skip offset and
  893. * hash_later value.
  894. *
  895. * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
  896. * it after irq ends processing.
  897. */
  898. static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
  899. struct scatterlist *sg, unsigned int new_len)
  900. {
  901. unsigned int skip = ctx->skip, n = sg_nents(sg);
  902. struct scatterlist *tmp;
  903. unsigned int len;
  904. if (ctx->bufcnt)
  905. n++;
  906. ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
  907. if (!ctx->sg) {
  908. ctx->error = true;
  909. return -ENOMEM;
  910. }
  911. sg_init_table(ctx->sg, n);
  912. tmp = ctx->sg;
  913. ctx->sg_len = 0;
  914. if (ctx->bufcnt) {
  915. sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
  916. tmp = sg_next(tmp);
  917. ctx->sg_len++;
  918. }
  919. while (sg && skip >= sg->length) {
  920. skip -= sg->length;
  921. sg = sg_next(sg);
  922. }
  923. while (sg && new_len) {
  924. len = sg->length - skip;
  925. if (new_len < len)
  926. len = new_len;
  927. new_len -= len;
  928. sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
  929. skip = 0;
  930. if (new_len <= 0)
  931. sg_mark_end(tmp);
  932. tmp = sg_next(tmp);
  933. ctx->sg_len++;
  934. sg = sg_next(sg);
  935. }
  936. set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
  937. return 0;
  938. }
  939. /**
  940. * s5p_hash_prepare_sgs() - prepare sg for processing
  941. * @ctx: request context
  942. * @sg: source scatterlist request
  943. * @nbytes: number of bytes to process from sg
  944. * @final: final flag
  945. *
  946. * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
  947. * sg table have good aligned elements (list_ok). If one of this checks fails,
  948. * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
  949. * data into this buffer and prepare request in sgl, or (2) allocates new sg
  950. * table and prepare sg elements.
  951. *
  952. * For digest or finup all conditions can be good, and we may not need any
  953. * fixes.
  954. */
  955. static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
  956. struct scatterlist *sg,
  957. unsigned int new_len, bool final)
  958. {
  959. unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
  960. bool aligned = true, list_ok = true;
  961. struct scatterlist *sg_tmp = sg;
  962. if (!sg || !sg->length || !new_len)
  963. return 0;
  964. if (skip || !final)
  965. list_ok = false;
  966. while (nbytes > 0 && sg_tmp) {
  967. n++;
  968. if (skip >= sg_tmp->length) {
  969. skip -= sg_tmp->length;
  970. if (!sg_tmp->length) {
  971. aligned = false;
  972. break;
  973. }
  974. } else {
  975. if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
  976. aligned = false;
  977. break;
  978. }
  979. if (nbytes < sg_tmp->length - skip) {
  980. list_ok = false;
  981. break;
  982. }
  983. nbytes -= sg_tmp->length - skip;
  984. skip = 0;
  985. }
  986. sg_tmp = sg_next(sg_tmp);
  987. }
  988. if (!aligned)
  989. return s5p_hash_copy_sgs(ctx, sg, new_len);
  990. else if (!list_ok)
  991. return s5p_hash_copy_sg_lists(ctx, sg, new_len);
  992. /*
  993. * Have aligned data from previous operation and/or current
  994. * Note: will enter here only if (digest or finup) and aligned
  995. */
  996. if (ctx->bufcnt) {
  997. ctx->sg_len = n;
  998. sg_init_table(ctx->sgl, 2);
  999. sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
  1000. sg_chain(ctx->sgl, 2, sg);
  1001. ctx->sg = ctx->sgl;
  1002. ctx->sg_len++;
  1003. } else {
  1004. ctx->sg = sg;
  1005. ctx->sg_len = n;
  1006. }
  1007. return 0;
  1008. }
  1009. /**
  1010. * s5p_hash_prepare_request() - prepare request for processing
  1011. * @req: AHASH request
  1012. * @update: true if UPDATE op
  1013. *
  1014. * Note 1: we can have update flag _and_ final flag at the same time.
  1015. * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
  1016. * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
  1017. * we have final op
  1018. */
  1019. static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
  1020. {
  1021. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1022. bool final = ctx->finup;
  1023. int xmit_len, hash_later, nbytes;
  1024. int ret;
  1025. if (update)
  1026. nbytes = req->nbytes;
  1027. else
  1028. nbytes = 0;
  1029. ctx->total = nbytes + ctx->bufcnt;
  1030. if (!ctx->total)
  1031. return 0;
  1032. if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
  1033. /* bytes left from previous request, so fill up to BUFLEN */
  1034. int len = BUFLEN - ctx->bufcnt % BUFLEN;
  1035. if (len > nbytes)
  1036. len = nbytes;
  1037. scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
  1038. 0, len, 0);
  1039. ctx->bufcnt += len;
  1040. nbytes -= len;
  1041. ctx->skip = len;
  1042. } else {
  1043. ctx->skip = 0;
  1044. }
  1045. if (ctx->bufcnt)
  1046. memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
  1047. xmit_len = ctx->total;
  1048. if (final) {
  1049. hash_later = 0;
  1050. } else {
  1051. if (IS_ALIGNED(xmit_len, BUFLEN))
  1052. xmit_len -= BUFLEN;
  1053. else
  1054. xmit_len -= xmit_len & (BUFLEN - 1);
  1055. hash_later = ctx->total - xmit_len;
  1056. /* copy hash_later bytes from end of req->src */
  1057. /* previous bytes are in xmit_buf, so no overwrite */
  1058. scatterwalk_map_and_copy(ctx->buffer, req->src,
  1059. req->nbytes - hash_later,
  1060. hash_later, 0);
  1061. }
  1062. if (xmit_len > BUFLEN) {
  1063. ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
  1064. final);
  1065. if (ret)
  1066. return ret;
  1067. } else {
  1068. /* have buffered data only */
  1069. if (unlikely(!ctx->bufcnt)) {
  1070. /* first update didn't fill up buffer */
  1071. scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
  1072. 0, xmit_len, 0);
  1073. }
  1074. sg_init_table(ctx->sgl, 1);
  1075. sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
  1076. ctx->sg = ctx->sgl;
  1077. ctx->sg_len = 1;
  1078. }
  1079. ctx->bufcnt = hash_later;
  1080. if (!final)
  1081. ctx->total = xmit_len;
  1082. return 0;
  1083. }
  1084. /**
  1085. * s5p_hash_update_dma_stop() - unmap DMA
  1086. * @dd: secss device
  1087. *
  1088. * Unmap scatterlist ctx->sg.
  1089. */
  1090. static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
  1091. {
  1092. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  1093. dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
  1094. clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
  1095. }
  1096. /**
  1097. * s5p_hash_finish() - copy calculated digest to crypto layer
  1098. * @req: AHASH request
  1099. */
  1100. static void s5p_hash_finish(struct ahash_request *req)
  1101. {
  1102. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1103. struct s5p_aes_dev *dd = ctx->dd;
  1104. if (ctx->digcnt)
  1105. s5p_hash_copy_result(req);
  1106. dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
  1107. }
  1108. /**
  1109. * s5p_hash_finish_req() - finish request
  1110. * @req: AHASH request
  1111. * @err: error
  1112. */
  1113. static void s5p_hash_finish_req(struct ahash_request *req, int err)
  1114. {
  1115. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1116. struct s5p_aes_dev *dd = ctx->dd;
  1117. unsigned long flags;
  1118. if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
  1119. free_pages((unsigned long)sg_virt(ctx->sg),
  1120. get_order(ctx->sg->length));
  1121. if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
  1122. kfree(ctx->sg);
  1123. ctx->sg = NULL;
  1124. dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
  1125. BIT(HASH_FLAGS_SGS_COPIED));
  1126. if (!err && !ctx->error) {
  1127. s5p_hash_read_msg(req);
  1128. if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
  1129. s5p_hash_finish(req);
  1130. } else {
  1131. ctx->error = true;
  1132. }
  1133. spin_lock_irqsave(&dd->hash_lock, flags);
  1134. dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
  1135. BIT(HASH_FLAGS_DMA_READY) |
  1136. BIT(HASH_FLAGS_OUTPUT_READY));
  1137. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1138. if (req->base.complete)
  1139. req->base.complete(&req->base, err);
  1140. }
  1141. /**
  1142. * s5p_hash_handle_queue() - handle hash queue
  1143. * @dd: device s5p_aes_dev
  1144. * @req: AHASH request
  1145. *
  1146. * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
  1147. * device then processes the first request from the dd->queue
  1148. *
  1149. * Returns: see s5p_hash_final below.
  1150. */
  1151. static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
  1152. struct ahash_request *req)
  1153. {
  1154. struct crypto_async_request *async_req, *backlog;
  1155. struct s5p_hash_reqctx *ctx;
  1156. unsigned long flags;
  1157. int err = 0, ret = 0;
  1158. retry:
  1159. spin_lock_irqsave(&dd->hash_lock, flags);
  1160. if (req)
  1161. ret = ahash_enqueue_request(&dd->hash_queue, req);
  1162. if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
  1163. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1164. return ret;
  1165. }
  1166. backlog = crypto_get_backlog(&dd->hash_queue);
  1167. async_req = crypto_dequeue_request(&dd->hash_queue);
  1168. if (async_req)
  1169. set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
  1170. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1171. if (!async_req)
  1172. return ret;
  1173. if (backlog)
  1174. backlog->complete(backlog, -EINPROGRESS);
  1175. req = ahash_request_cast(async_req);
  1176. dd->hash_req = req;
  1177. ctx = ahash_request_ctx(req);
  1178. err = s5p_hash_prepare_request(req, ctx->op_update);
  1179. if (err || !ctx->total)
  1180. goto out;
  1181. dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
  1182. ctx->op_update, req->nbytes);
  1183. s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
  1184. if (ctx->digcnt)
  1185. s5p_hash_write_iv(req); /* restore hash IV */
  1186. if (ctx->op_update) { /* HASH_OP_UPDATE */
  1187. err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
  1188. if (err != -EINPROGRESS && ctx->finup && !ctx->error)
  1189. /* no final() after finup() */
  1190. err = s5p_hash_xmit_dma(dd, ctx->total, true);
  1191. } else { /* HASH_OP_FINAL */
  1192. err = s5p_hash_xmit_dma(dd, ctx->total, true);
  1193. }
  1194. out:
  1195. if (err != -EINPROGRESS) {
  1196. /* hash_tasklet_cb will not finish it, so do it here */
  1197. s5p_hash_finish_req(req, err);
  1198. req = NULL;
  1199. /*
  1200. * Execute next request immediately if there is anything
  1201. * in queue.
  1202. */
  1203. goto retry;
  1204. }
  1205. return ret;
  1206. }
  1207. /**
  1208. * s5p_hash_tasklet_cb() - hash tasklet
  1209. * @data: ptr to s5p_aes_dev
  1210. */
  1211. static void s5p_hash_tasklet_cb(unsigned long data)
  1212. {
  1213. struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
  1214. if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
  1215. s5p_hash_handle_queue(dd, NULL);
  1216. return;
  1217. }
  1218. if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
  1219. if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
  1220. &dd->hash_flags)) {
  1221. s5p_hash_update_dma_stop(dd);
  1222. }
  1223. if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
  1224. &dd->hash_flags)) {
  1225. /* hash or semi-hash ready */
  1226. clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
  1227. goto finish;
  1228. }
  1229. }
  1230. return;
  1231. finish:
  1232. /* finish curent request */
  1233. s5p_hash_finish_req(dd->hash_req, 0);
  1234. /* If we are not busy, process next req */
  1235. if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
  1236. s5p_hash_handle_queue(dd, NULL);
  1237. }
  1238. /**
  1239. * s5p_hash_enqueue() - enqueue request
  1240. * @req: AHASH request
  1241. * @op: operation UPDATE (true) or FINAL (false)
  1242. *
  1243. * Returns: see s5p_hash_final below.
  1244. */
  1245. static int s5p_hash_enqueue(struct ahash_request *req, bool op)
  1246. {
  1247. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1248. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1249. ctx->op_update = op;
  1250. return s5p_hash_handle_queue(tctx->dd, req);
  1251. }
  1252. /**
  1253. * s5p_hash_update() - process the hash input data
  1254. * @req: AHASH request
  1255. *
  1256. * If request will fit in buffer, copy it and return immediately
  1257. * else enqueue it with OP_UPDATE.
  1258. *
  1259. * Returns: see s5p_hash_final below.
  1260. */
  1261. static int s5p_hash_update(struct ahash_request *req)
  1262. {
  1263. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1264. if (!req->nbytes)
  1265. return 0;
  1266. if (ctx->bufcnt + req->nbytes <= BUFLEN) {
  1267. scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
  1268. 0, req->nbytes, 0);
  1269. ctx->bufcnt += req->nbytes;
  1270. return 0;
  1271. }
  1272. return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
  1273. }
  1274. /**
  1275. * s5p_hash_shash_digest() - calculate shash digest
  1276. * @tfm: crypto transformation
  1277. * @flags: tfm flags
  1278. * @data: input data
  1279. * @len: length of data
  1280. * @out: output buffer
  1281. */
  1282. static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
  1283. const u8 *data, unsigned int len, u8 *out)
  1284. {
  1285. SHASH_DESC_ON_STACK(shash, tfm);
  1286. shash->tfm = tfm;
  1287. shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
  1288. return crypto_shash_digest(shash, data, len, out);
  1289. }
  1290. /**
  1291. * s5p_hash_final_shash() - calculate shash digest
  1292. * @req: AHASH request
  1293. */
  1294. static int s5p_hash_final_shash(struct ahash_request *req)
  1295. {
  1296. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1297. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1298. return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
  1299. ctx->buffer, ctx->bufcnt, req->result);
  1300. }
  1301. /**
  1302. * s5p_hash_final() - close up hash and calculate digest
  1303. * @req: AHASH request
  1304. *
  1305. * Note: in final req->src do not have any data, and req->nbytes can be
  1306. * non-zero.
  1307. *
  1308. * If there were no input data processed yet and the buffered hash data is
  1309. * less than BUFLEN (64) then calculate the final hash immediately by using
  1310. * SW algorithm fallback.
  1311. *
  1312. * Otherwise enqueues the current AHASH request with OP_FINAL operation op
  1313. * and finalize hash message in HW. Note that if digcnt!=0 then there were
  1314. * previous update op, so there are always some buffered bytes in ctx->buffer,
  1315. * which means that ctx->bufcnt!=0
  1316. *
  1317. * Returns:
  1318. * 0 if the request has been processed immediately,
  1319. * -EINPROGRESS if the operation has been queued for later execution or is set
  1320. * to processing by HW,
  1321. * -EBUSY if queue is full and request should be resubmitted later,
  1322. * other negative values denotes an error.
  1323. */
  1324. static int s5p_hash_final(struct ahash_request *req)
  1325. {
  1326. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1327. ctx->finup = true;
  1328. if (ctx->error)
  1329. return -EINVAL; /* uncompleted hash is not needed */
  1330. if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
  1331. return s5p_hash_final_shash(req);
  1332. return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
  1333. }
  1334. /**
  1335. * s5p_hash_finup() - process last req->src and calculate digest
  1336. * @req: AHASH request containing the last update data
  1337. *
  1338. * Return values: see s5p_hash_final above.
  1339. */
  1340. static int s5p_hash_finup(struct ahash_request *req)
  1341. {
  1342. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1343. int err1, err2;
  1344. ctx->finup = true;
  1345. err1 = s5p_hash_update(req);
  1346. if (err1 == -EINPROGRESS || err1 == -EBUSY)
  1347. return err1;
  1348. /*
  1349. * final() has to be always called to cleanup resources even if
  1350. * update() failed, except EINPROGRESS or calculate digest for small
  1351. * size
  1352. */
  1353. err2 = s5p_hash_final(req);
  1354. return err1 ?: err2;
  1355. }
  1356. /**
  1357. * s5p_hash_init() - initialize AHASH request contex
  1358. * @req: AHASH request
  1359. *
  1360. * Init async hash request context.
  1361. */
  1362. static int s5p_hash_init(struct ahash_request *req)
  1363. {
  1364. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1365. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1366. struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  1367. ctx->dd = tctx->dd;
  1368. ctx->error = false;
  1369. ctx->finup = false;
  1370. ctx->bufcnt = 0;
  1371. ctx->digcnt = 0;
  1372. ctx->total = 0;
  1373. ctx->skip = 0;
  1374. dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
  1375. crypto_ahash_digestsize(tfm));
  1376. switch (crypto_ahash_digestsize(tfm)) {
  1377. case MD5_DIGEST_SIZE:
  1378. ctx->engine = SSS_HASH_ENGINE_MD5;
  1379. ctx->nregs = HASH_MD5_MAX_REG;
  1380. break;
  1381. case SHA1_DIGEST_SIZE:
  1382. ctx->engine = SSS_HASH_ENGINE_SHA1;
  1383. ctx->nregs = HASH_SHA1_MAX_REG;
  1384. break;
  1385. case SHA256_DIGEST_SIZE:
  1386. ctx->engine = SSS_HASH_ENGINE_SHA256;
  1387. ctx->nregs = HASH_SHA256_MAX_REG;
  1388. break;
  1389. default:
  1390. ctx->error = true;
  1391. return -EINVAL;
  1392. }
  1393. return 0;
  1394. }
  1395. /**
  1396. * s5p_hash_digest - calculate digest from req->src
  1397. * @req: AHASH request
  1398. *
  1399. * Return values: see s5p_hash_final above.
  1400. */
  1401. static int s5p_hash_digest(struct ahash_request *req)
  1402. {
  1403. return s5p_hash_init(req) ?: s5p_hash_finup(req);
  1404. }
  1405. /**
  1406. * s5p_hash_cra_init_alg - init crypto alg transformation
  1407. * @tfm: crypto transformation
  1408. */
  1409. static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
  1410. {
  1411. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  1412. const char *alg_name = crypto_tfm_alg_name(tfm);
  1413. tctx->dd = s5p_dev;
  1414. /* Allocate a fallback and abort if it failed. */
  1415. tctx->fallback = crypto_alloc_shash(alg_name, 0,
  1416. CRYPTO_ALG_NEED_FALLBACK);
  1417. if (IS_ERR(tctx->fallback)) {
  1418. pr_err("fallback alloc fails for '%s'\n", alg_name);
  1419. return PTR_ERR(tctx->fallback);
  1420. }
  1421. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1422. sizeof(struct s5p_hash_reqctx) + BUFLEN);
  1423. return 0;
  1424. }
  1425. /**
  1426. * s5p_hash_cra_init - init crypto tfm
  1427. * @tfm: crypto transformation
  1428. */
  1429. static int s5p_hash_cra_init(struct crypto_tfm *tfm)
  1430. {
  1431. return s5p_hash_cra_init_alg(tfm);
  1432. }
  1433. /**
  1434. * s5p_hash_cra_exit - exit crypto tfm
  1435. * @tfm: crypto transformation
  1436. *
  1437. * free allocated fallback
  1438. */
  1439. static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
  1440. {
  1441. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  1442. crypto_free_shash(tctx->fallback);
  1443. tctx->fallback = NULL;
  1444. }
  1445. /**
  1446. * s5p_hash_export - export hash state
  1447. * @req: AHASH request
  1448. * @out: buffer for exported state
  1449. */
  1450. static int s5p_hash_export(struct ahash_request *req, void *out)
  1451. {
  1452. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1453. memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
  1454. return 0;
  1455. }
  1456. /**
  1457. * s5p_hash_import - import hash state
  1458. * @req: AHASH request
  1459. * @in: buffer with state to be imported from
  1460. */
  1461. static int s5p_hash_import(struct ahash_request *req, const void *in)
  1462. {
  1463. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1464. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1465. struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  1466. const struct s5p_hash_reqctx *ctx_in = in;
  1467. memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
  1468. if (ctx_in->bufcnt > BUFLEN) {
  1469. ctx->error = true;
  1470. return -EINVAL;
  1471. }
  1472. ctx->dd = tctx->dd;
  1473. ctx->error = false;
  1474. return 0;
  1475. }
  1476. static struct ahash_alg algs_sha1_md5_sha256[] = {
  1477. {
  1478. .init = s5p_hash_init,
  1479. .update = s5p_hash_update,
  1480. .final = s5p_hash_final,
  1481. .finup = s5p_hash_finup,
  1482. .digest = s5p_hash_digest,
  1483. .export = s5p_hash_export,
  1484. .import = s5p_hash_import,
  1485. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1486. .halg.digestsize = SHA1_DIGEST_SIZE,
  1487. .halg.base = {
  1488. .cra_name = "sha1",
  1489. .cra_driver_name = "exynos-sha1",
  1490. .cra_priority = 100,
  1491. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1492. CRYPTO_ALG_ASYNC |
  1493. CRYPTO_ALG_NEED_FALLBACK,
  1494. .cra_blocksize = HASH_BLOCK_SIZE,
  1495. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1496. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1497. .cra_module = THIS_MODULE,
  1498. .cra_init = s5p_hash_cra_init,
  1499. .cra_exit = s5p_hash_cra_exit,
  1500. }
  1501. },
  1502. {
  1503. .init = s5p_hash_init,
  1504. .update = s5p_hash_update,
  1505. .final = s5p_hash_final,
  1506. .finup = s5p_hash_finup,
  1507. .digest = s5p_hash_digest,
  1508. .export = s5p_hash_export,
  1509. .import = s5p_hash_import,
  1510. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1511. .halg.digestsize = MD5_DIGEST_SIZE,
  1512. .halg.base = {
  1513. .cra_name = "md5",
  1514. .cra_driver_name = "exynos-md5",
  1515. .cra_priority = 100,
  1516. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1517. CRYPTO_ALG_ASYNC |
  1518. CRYPTO_ALG_NEED_FALLBACK,
  1519. .cra_blocksize = HASH_BLOCK_SIZE,
  1520. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1521. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1522. .cra_module = THIS_MODULE,
  1523. .cra_init = s5p_hash_cra_init,
  1524. .cra_exit = s5p_hash_cra_exit,
  1525. }
  1526. },
  1527. {
  1528. .init = s5p_hash_init,
  1529. .update = s5p_hash_update,
  1530. .final = s5p_hash_final,
  1531. .finup = s5p_hash_finup,
  1532. .digest = s5p_hash_digest,
  1533. .export = s5p_hash_export,
  1534. .import = s5p_hash_import,
  1535. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1536. .halg.digestsize = SHA256_DIGEST_SIZE,
  1537. .halg.base = {
  1538. .cra_name = "sha256",
  1539. .cra_driver_name = "exynos-sha256",
  1540. .cra_priority = 100,
  1541. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1542. CRYPTO_ALG_ASYNC |
  1543. CRYPTO_ALG_NEED_FALLBACK,
  1544. .cra_blocksize = HASH_BLOCK_SIZE,
  1545. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1546. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1547. .cra_module = THIS_MODULE,
  1548. .cra_init = s5p_hash_cra_init,
  1549. .cra_exit = s5p_hash_cra_exit,
  1550. }
  1551. }
  1552. };
  1553. static void s5p_set_aes(struct s5p_aes_dev *dev,
  1554. const uint8_t *key, const uint8_t *iv,
  1555. unsigned int keylen)
  1556. {
  1557. void __iomem *keystart;
  1558. if (iv)
  1559. memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
  1560. if (keylen == AES_KEYSIZE_256)
  1561. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
  1562. else if (keylen == AES_KEYSIZE_192)
  1563. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
  1564. else
  1565. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
  1566. memcpy_toio(keystart, key, keylen);
  1567. }
  1568. static bool s5p_is_sg_aligned(struct scatterlist *sg)
  1569. {
  1570. while (sg) {
  1571. if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
  1572. return false;
  1573. sg = sg_next(sg);
  1574. }
  1575. return true;
  1576. }
  1577. static int s5p_set_indata_start(struct s5p_aes_dev *dev,
  1578. struct ablkcipher_request *req)
  1579. {
  1580. struct scatterlist *sg;
  1581. int err;
  1582. dev->sg_src_cpy = NULL;
  1583. sg = req->src;
  1584. if (!s5p_is_sg_aligned(sg)) {
  1585. dev_dbg(dev->dev,
  1586. "At least one unaligned source scatter list, making a copy\n");
  1587. err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
  1588. if (err)
  1589. return err;
  1590. sg = dev->sg_src_cpy;
  1591. }
  1592. err = s5p_set_indata(dev, sg);
  1593. if (err) {
  1594. s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
  1595. return err;
  1596. }
  1597. return 0;
  1598. }
  1599. static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
  1600. struct ablkcipher_request *req)
  1601. {
  1602. struct scatterlist *sg;
  1603. int err;
  1604. dev->sg_dst_cpy = NULL;
  1605. sg = req->dst;
  1606. if (!s5p_is_sg_aligned(sg)) {
  1607. dev_dbg(dev->dev,
  1608. "At least one unaligned dest scatter list, making a copy\n");
  1609. err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
  1610. if (err)
  1611. return err;
  1612. sg = dev->sg_dst_cpy;
  1613. }
  1614. err = s5p_set_outdata(dev, sg);
  1615. if (err) {
  1616. s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
  1617. return err;
  1618. }
  1619. return 0;
  1620. }
  1621. static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
  1622. {
  1623. struct ablkcipher_request *req = dev->req;
  1624. uint32_t aes_control;
  1625. unsigned long flags;
  1626. int err;
  1627. u8 *iv;
  1628. aes_control = SSS_AES_KEY_CHANGE_MODE;
  1629. if (mode & FLAGS_AES_DECRYPT)
  1630. aes_control |= SSS_AES_MODE_DECRYPT;
  1631. if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
  1632. aes_control |= SSS_AES_CHAIN_MODE_CBC;
  1633. iv = req->info;
  1634. } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
  1635. aes_control |= SSS_AES_CHAIN_MODE_CTR;
  1636. iv = req->info;
  1637. } else {
  1638. iv = NULL; /* AES_ECB */
  1639. }
  1640. if (dev->ctx->keylen == AES_KEYSIZE_192)
  1641. aes_control |= SSS_AES_KEY_SIZE_192;
  1642. else if (dev->ctx->keylen == AES_KEYSIZE_256)
  1643. aes_control |= SSS_AES_KEY_SIZE_256;
  1644. aes_control |= SSS_AES_FIFO_MODE;
  1645. /* as a variant it is possible to use byte swapping on DMA side */
  1646. aes_control |= SSS_AES_BYTESWAP_DI
  1647. | SSS_AES_BYTESWAP_DO
  1648. | SSS_AES_BYTESWAP_IV
  1649. | SSS_AES_BYTESWAP_KEY
  1650. | SSS_AES_BYTESWAP_CNT;
  1651. spin_lock_irqsave(&dev->lock, flags);
  1652. SSS_WRITE(dev, FCINTENCLR,
  1653. SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
  1654. SSS_WRITE(dev, FCFIFOCTRL, 0x00);
  1655. err = s5p_set_indata_start(dev, req);
  1656. if (err)
  1657. goto indata_error;
  1658. err = s5p_set_outdata_start(dev, req);
  1659. if (err)
  1660. goto outdata_error;
  1661. SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
  1662. s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
  1663. s5p_set_dma_indata(dev, dev->sg_src);
  1664. s5p_set_dma_outdata(dev, dev->sg_dst);
  1665. SSS_WRITE(dev, FCINTENSET,
  1666. SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
  1667. spin_unlock_irqrestore(&dev->lock, flags);
  1668. return;
  1669. outdata_error:
  1670. s5p_unset_indata(dev);
  1671. indata_error:
  1672. s5p_sg_done(dev);
  1673. dev->busy = false;
  1674. spin_unlock_irqrestore(&dev->lock, flags);
  1675. s5p_aes_complete(dev, err);
  1676. }
  1677. static void s5p_tasklet_cb(unsigned long data)
  1678. {
  1679. struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
  1680. struct crypto_async_request *async_req, *backlog;
  1681. struct s5p_aes_reqctx *reqctx;
  1682. unsigned long flags;
  1683. spin_lock_irqsave(&dev->lock, flags);
  1684. backlog = crypto_get_backlog(&dev->queue);
  1685. async_req = crypto_dequeue_request(&dev->queue);
  1686. if (!async_req) {
  1687. dev->busy = false;
  1688. spin_unlock_irqrestore(&dev->lock, flags);
  1689. return;
  1690. }
  1691. spin_unlock_irqrestore(&dev->lock, flags);
  1692. if (backlog)
  1693. backlog->complete(backlog, -EINPROGRESS);
  1694. dev->req = ablkcipher_request_cast(async_req);
  1695. dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
  1696. reqctx = ablkcipher_request_ctx(dev->req);
  1697. s5p_aes_crypt_start(dev, reqctx->mode);
  1698. }
  1699. static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
  1700. struct ablkcipher_request *req)
  1701. {
  1702. unsigned long flags;
  1703. int err;
  1704. spin_lock_irqsave(&dev->lock, flags);
  1705. err = ablkcipher_enqueue_request(&dev->queue, req);
  1706. if (dev->busy) {
  1707. spin_unlock_irqrestore(&dev->lock, flags);
  1708. goto exit;
  1709. }
  1710. dev->busy = true;
  1711. spin_unlock_irqrestore(&dev->lock, flags);
  1712. tasklet_schedule(&dev->tasklet);
  1713. exit:
  1714. return err;
  1715. }
  1716. static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
  1717. {
  1718. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1719. struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
  1720. struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  1721. struct s5p_aes_dev *dev = ctx->dev;
  1722. if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
  1723. dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
  1724. return -EINVAL;
  1725. }
  1726. reqctx->mode = mode;
  1727. return s5p_aes_handle_req(dev, req);
  1728. }
  1729. static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
  1730. const uint8_t *key, unsigned int keylen)
  1731. {
  1732. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  1733. struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  1734. if (keylen != AES_KEYSIZE_128 &&
  1735. keylen != AES_KEYSIZE_192 &&
  1736. keylen != AES_KEYSIZE_256)
  1737. return -EINVAL;
  1738. memcpy(ctx->aes_key, key, keylen);
  1739. ctx->keylen = keylen;
  1740. return 0;
  1741. }
  1742. static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
  1743. {
  1744. return s5p_aes_crypt(req, 0);
  1745. }
  1746. static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
  1747. {
  1748. return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
  1749. }
  1750. static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
  1751. {
  1752. return s5p_aes_crypt(req, FLAGS_AES_CBC);
  1753. }
  1754. static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
  1755. {
  1756. return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
  1757. }
  1758. static int s5p_aes_cra_init(struct crypto_tfm *tfm)
  1759. {
  1760. struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  1761. ctx->dev = s5p_dev;
  1762. tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
  1763. return 0;
  1764. }
  1765. static struct crypto_alg algs[] = {
  1766. {
  1767. .cra_name = "ecb(aes)",
  1768. .cra_driver_name = "ecb-aes-s5p",
  1769. .cra_priority = 100,
  1770. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1771. CRYPTO_ALG_ASYNC |
  1772. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1773. .cra_blocksize = AES_BLOCK_SIZE,
  1774. .cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1775. .cra_alignmask = 0x0f,
  1776. .cra_type = &crypto_ablkcipher_type,
  1777. .cra_module = THIS_MODULE,
  1778. .cra_init = s5p_aes_cra_init,
  1779. .cra_u.ablkcipher = {
  1780. .min_keysize = AES_MIN_KEY_SIZE,
  1781. .max_keysize = AES_MAX_KEY_SIZE,
  1782. .setkey = s5p_aes_setkey,
  1783. .encrypt = s5p_aes_ecb_encrypt,
  1784. .decrypt = s5p_aes_ecb_decrypt,
  1785. }
  1786. },
  1787. {
  1788. .cra_name = "cbc(aes)",
  1789. .cra_driver_name = "cbc-aes-s5p",
  1790. .cra_priority = 100,
  1791. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1792. CRYPTO_ALG_ASYNC |
  1793. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1794. .cra_blocksize = AES_BLOCK_SIZE,
  1795. .cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1796. .cra_alignmask = 0x0f,
  1797. .cra_type = &crypto_ablkcipher_type,
  1798. .cra_module = THIS_MODULE,
  1799. .cra_init = s5p_aes_cra_init,
  1800. .cra_u.ablkcipher = {
  1801. .min_keysize = AES_MIN_KEY_SIZE,
  1802. .max_keysize = AES_MAX_KEY_SIZE,
  1803. .ivsize = AES_BLOCK_SIZE,
  1804. .setkey = s5p_aes_setkey,
  1805. .encrypt = s5p_aes_cbc_encrypt,
  1806. .decrypt = s5p_aes_cbc_decrypt,
  1807. }
  1808. },
  1809. };
  1810. static int s5p_aes_probe(struct platform_device *pdev)
  1811. {
  1812. struct device *dev = &pdev->dev;
  1813. int i, j, err = -ENODEV;
  1814. const struct samsung_aes_variant *variant;
  1815. struct s5p_aes_dev *pdata;
  1816. struct resource *res;
  1817. unsigned int hash_i;
  1818. if (s5p_dev)
  1819. return -EEXIST;
  1820. pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  1821. if (!pdata)
  1822. return -ENOMEM;
  1823. variant = find_s5p_sss_version(pdev);
  1824. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1825. /*
  1826. * Note: HASH and PRNG uses the same registers in secss, avoid
  1827. * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
  1828. * is enabled in config. We need larger size for HASH registers in
  1829. * secss, current describe only AES/DES
  1830. */
  1831. if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
  1832. if (variant == &exynos_aes_data) {
  1833. res->end += 0x300;
  1834. pdata->use_hash = true;
  1835. }
  1836. }
  1837. pdata->res = res;
  1838. pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
  1839. if (IS_ERR(pdata->ioaddr)) {
  1840. if (!pdata->use_hash)
  1841. return PTR_ERR(pdata->ioaddr);
  1842. /* try AES without HASH */
  1843. res->end -= 0x300;
  1844. pdata->use_hash = false;
  1845. pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
  1846. if (IS_ERR(pdata->ioaddr))
  1847. return PTR_ERR(pdata->ioaddr);
  1848. }
  1849. pdata->clk = devm_clk_get(dev, "secss");
  1850. if (IS_ERR(pdata->clk)) {
  1851. dev_err(dev, "failed to find secss clock source\n");
  1852. return -ENOENT;
  1853. }
  1854. err = clk_prepare_enable(pdata->clk);
  1855. if (err < 0) {
  1856. dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
  1857. return err;
  1858. }
  1859. spin_lock_init(&pdata->lock);
  1860. spin_lock_init(&pdata->hash_lock);
  1861. pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
  1862. pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
  1863. pdata->irq_fc = platform_get_irq(pdev, 0);
  1864. if (pdata->irq_fc < 0) {
  1865. err = pdata->irq_fc;
  1866. dev_warn(dev, "feed control interrupt is not available.\n");
  1867. goto err_irq;
  1868. }
  1869. err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
  1870. s5p_aes_interrupt, IRQF_ONESHOT,
  1871. pdev->name, pdev);
  1872. if (err < 0) {
  1873. dev_warn(dev, "feed control interrupt is not available.\n");
  1874. goto err_irq;
  1875. }
  1876. pdata->busy = false;
  1877. pdata->dev = dev;
  1878. platform_set_drvdata(pdev, pdata);
  1879. s5p_dev = pdata;
  1880. tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
  1881. crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
  1882. for (i = 0; i < ARRAY_SIZE(algs); i++) {
  1883. err = crypto_register_alg(&algs[i]);
  1884. if (err)
  1885. goto err_algs;
  1886. }
  1887. if (pdata->use_hash) {
  1888. tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
  1889. (unsigned long)pdata);
  1890. crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
  1891. for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
  1892. hash_i++) {
  1893. struct ahash_alg *alg;
  1894. alg = &algs_sha1_md5_sha256[hash_i];
  1895. err = crypto_register_ahash(alg);
  1896. if (err) {
  1897. dev_err(dev, "can't register '%s': %d\n",
  1898. alg->halg.base.cra_driver_name, err);
  1899. goto err_hash;
  1900. }
  1901. }
  1902. }
  1903. dev_info(dev, "s5p-sss driver registered\n");
  1904. return 0;
  1905. err_hash:
  1906. for (j = hash_i - 1; j >= 0; j--)
  1907. crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
  1908. tasklet_kill(&pdata->hash_tasklet);
  1909. res->end -= 0x300;
  1910. err_algs:
  1911. if (i < ARRAY_SIZE(algs))
  1912. dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
  1913. err);
  1914. for (j = 0; j < i; j++)
  1915. crypto_unregister_alg(&algs[j]);
  1916. tasklet_kill(&pdata->tasklet);
  1917. err_irq:
  1918. clk_disable_unprepare(pdata->clk);
  1919. s5p_dev = NULL;
  1920. return err;
  1921. }
  1922. static int s5p_aes_remove(struct platform_device *pdev)
  1923. {
  1924. struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
  1925. int i;
  1926. if (!pdata)
  1927. return -ENODEV;
  1928. for (i = 0; i < ARRAY_SIZE(algs); i++)
  1929. crypto_unregister_alg(&algs[i]);
  1930. tasklet_kill(&pdata->tasklet);
  1931. if (pdata->use_hash) {
  1932. for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
  1933. crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
  1934. pdata->res->end -= 0x300;
  1935. tasklet_kill(&pdata->hash_tasklet);
  1936. pdata->use_hash = false;
  1937. }
  1938. clk_disable_unprepare(pdata->clk);
  1939. s5p_dev = NULL;
  1940. return 0;
  1941. }
  1942. static struct platform_driver s5p_aes_crypto = {
  1943. .probe = s5p_aes_probe,
  1944. .remove = s5p_aes_remove,
  1945. .driver = {
  1946. .name = "s5p-secss",
  1947. .of_match_table = s5p_sss_dt_match,
  1948. },
  1949. };
  1950. module_platform_driver(s5p_aes_crypto);
  1951. MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
  1952. MODULE_LICENSE("GPL v2");
  1953. MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
  1954. MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");