s5p-sss.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Cryptographic API.
  4. //
  5. // Support for Samsung S5PV210 and Exynos HW acceleration.
  6. //
  7. // Copyright (C) 2011 NetUP Inc. All rights reserved.
  8. // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
  9. //
  10. // Hash part based on omap-sham.c driver.
  11. #include <linux/clk.h>
  12. #include <linux/crypto.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/err.h>
  15. #include <linux/errno.h>
  16. #include <linux/init.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/scatterlist.h>
  24. #include <crypto/ctr.h>
  25. #include <crypto/aes.h>
  26. #include <crypto/algapi.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/hash.h>
  29. #include <crypto/md5.h>
  30. #include <crypto/sha.h>
  31. #include <crypto/internal/hash.h>
  32. #define _SBF(s, v) ((v) << (s))
  33. /* Feed control registers */
  34. #define SSS_REG_FCINTSTAT 0x0000
  35. #define SSS_FCINTSTAT_HPARTINT BIT(7)
  36. #define SSS_FCINTSTAT_HDONEINT BIT(5)
  37. #define SSS_FCINTSTAT_BRDMAINT BIT(3)
  38. #define SSS_FCINTSTAT_BTDMAINT BIT(2)
  39. #define SSS_FCINTSTAT_HRDMAINT BIT(1)
  40. #define SSS_FCINTSTAT_PKDMAINT BIT(0)
  41. #define SSS_REG_FCINTENSET 0x0004
  42. #define SSS_FCINTENSET_HPARTINTENSET BIT(7)
  43. #define SSS_FCINTENSET_HDONEINTENSET BIT(5)
  44. #define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
  45. #define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
  46. #define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
  47. #define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
  48. #define SSS_REG_FCINTENCLR 0x0008
  49. #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
  50. #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
  51. #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
  52. #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
  53. #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
  54. #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
  55. #define SSS_REG_FCINTPEND 0x000C
  56. #define SSS_FCINTPEND_HPARTINTP BIT(7)
  57. #define SSS_FCINTPEND_HDONEINTP BIT(5)
  58. #define SSS_FCINTPEND_BRDMAINTP BIT(3)
  59. #define SSS_FCINTPEND_BTDMAINTP BIT(2)
  60. #define SSS_FCINTPEND_HRDMAINTP BIT(1)
  61. #define SSS_FCINTPEND_PKDMAINTP BIT(0)
  62. #define SSS_REG_FCFIFOSTAT 0x0010
  63. #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
  64. #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
  65. #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
  66. #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
  67. #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
  68. #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
  69. #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
  70. #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
  71. #define SSS_REG_FCFIFOCTRL 0x0014
  72. #define SSS_FCFIFOCTRL_DESSEL BIT(2)
  73. #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
  74. #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
  75. #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
  76. #define SSS_HASHIN_MASK _SBF(0, 0x03)
  77. #define SSS_REG_FCBRDMAS 0x0020
  78. #define SSS_REG_FCBRDMAL 0x0024
  79. #define SSS_REG_FCBRDMAC 0x0028
  80. #define SSS_FCBRDMAC_BYTESWAP BIT(1)
  81. #define SSS_FCBRDMAC_FLUSH BIT(0)
  82. #define SSS_REG_FCBTDMAS 0x0030
  83. #define SSS_REG_FCBTDMAL 0x0034
  84. #define SSS_REG_FCBTDMAC 0x0038
  85. #define SSS_FCBTDMAC_BYTESWAP BIT(1)
  86. #define SSS_FCBTDMAC_FLUSH BIT(0)
  87. #define SSS_REG_FCHRDMAS 0x0040
  88. #define SSS_REG_FCHRDMAL 0x0044
  89. #define SSS_REG_FCHRDMAC 0x0048
  90. #define SSS_FCHRDMAC_BYTESWAP BIT(1)
  91. #define SSS_FCHRDMAC_FLUSH BIT(0)
  92. #define SSS_REG_FCPKDMAS 0x0050
  93. #define SSS_REG_FCPKDMAL 0x0054
  94. #define SSS_REG_FCPKDMAC 0x0058
  95. #define SSS_FCPKDMAC_BYTESWAP BIT(3)
  96. #define SSS_FCPKDMAC_DESCEND BIT(2)
  97. #define SSS_FCPKDMAC_TRANSMIT BIT(1)
  98. #define SSS_FCPKDMAC_FLUSH BIT(0)
  99. #define SSS_REG_FCPKDMAO 0x005C
  100. /* AES registers */
  101. #define SSS_REG_AES_CONTROL 0x00
  102. #define SSS_AES_BYTESWAP_DI BIT(11)
  103. #define SSS_AES_BYTESWAP_DO BIT(10)
  104. #define SSS_AES_BYTESWAP_IV BIT(9)
  105. #define SSS_AES_BYTESWAP_CNT BIT(8)
  106. #define SSS_AES_BYTESWAP_KEY BIT(7)
  107. #define SSS_AES_KEY_CHANGE_MODE BIT(6)
  108. #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
  109. #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
  110. #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
  111. #define SSS_AES_FIFO_MODE BIT(3)
  112. #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
  113. #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
  114. #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
  115. #define SSS_AES_MODE_DECRYPT BIT(0)
  116. #define SSS_REG_AES_STATUS 0x04
  117. #define SSS_AES_BUSY BIT(2)
  118. #define SSS_AES_INPUT_READY BIT(1)
  119. #define SSS_AES_OUTPUT_READY BIT(0)
  120. #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
  121. #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
  122. #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
  123. #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
  124. #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
  125. #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
  126. #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
  127. #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
  128. #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
  129. #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
  130. SSS_AES_REG(dev, reg))
  131. /* HW engine modes */
  132. #define FLAGS_AES_DECRYPT BIT(0)
  133. #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
  134. #define FLAGS_AES_CBC _SBF(1, 0x01)
  135. #define FLAGS_AES_CTR _SBF(1, 0x02)
  136. #define AES_KEY_LEN 16
  137. #define CRYPTO_QUEUE_LEN 1
  138. /* HASH registers */
  139. #define SSS_REG_HASH_CTRL 0x00
  140. #define SSS_HASH_USER_IV_EN BIT(5)
  141. #define SSS_HASH_INIT_BIT BIT(4)
  142. #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
  143. #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
  144. #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
  145. #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
  146. #define SSS_REG_HASH_CTRL_PAUSE 0x04
  147. #define SSS_HASH_PAUSE BIT(0)
  148. #define SSS_REG_HASH_CTRL_FIFO 0x08
  149. #define SSS_HASH_FIFO_MODE_DMA BIT(0)
  150. #define SSS_HASH_FIFO_MODE_CPU 0
  151. #define SSS_REG_HASH_CTRL_SWAP 0x0C
  152. #define SSS_HASH_BYTESWAP_DI BIT(3)
  153. #define SSS_HASH_BYTESWAP_DO BIT(2)
  154. #define SSS_HASH_BYTESWAP_IV BIT(1)
  155. #define SSS_HASH_BYTESWAP_KEY BIT(0)
  156. #define SSS_REG_HASH_STATUS 0x10
  157. #define SSS_HASH_STATUS_MSG_DONE BIT(6)
  158. #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
  159. #define SSS_HASH_STATUS_BUFFER_READY BIT(0)
  160. #define SSS_REG_HASH_MSG_SIZE_LOW 0x20
  161. #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
  162. #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
  163. #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
  164. #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
  165. #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
  166. #define HASH_BLOCK_SIZE 64
  167. #define HASH_REG_SIZEOF 4
  168. #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
  169. #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
  170. #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
  171. /*
  172. * HASH bit numbers, used by device, setting in dev->hash_flags with
  173. * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
  174. * to keep HASH state BUSY or FREE, or to signal state from irq_handler
  175. * to hash_tasklet. SGS keep track of allocated memory for scatterlist
  176. */
  177. #define HASH_FLAGS_BUSY 0
  178. #define HASH_FLAGS_FINAL 1
  179. #define HASH_FLAGS_DMA_ACTIVE 2
  180. #define HASH_FLAGS_OUTPUT_READY 3
  181. #define HASH_FLAGS_DMA_READY 4
  182. #define HASH_FLAGS_SGS_COPIED 5
  183. #define HASH_FLAGS_SGS_ALLOCED 6
  184. /* HASH HW constants */
  185. #define BUFLEN HASH_BLOCK_SIZE
  186. #define SSS_HASH_DMA_LEN_ALIGN 8
  187. #define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
  188. #define SSS_HASH_QUEUE_LENGTH 10
  189. /**
  190. * struct samsung_aes_variant - platform specific SSS driver data
  191. * @aes_offset: AES register offset from SSS module's base.
  192. * @hash_offset: HASH register offset from SSS module's base.
  193. *
  194. * Specifies platform specific configuration of SSS module.
  195. * Note: A structure for driver specific platform data is used for future
  196. * expansion of its usage.
  197. */
  198. struct samsung_aes_variant {
  199. unsigned int aes_offset;
  200. unsigned int hash_offset;
  201. };
  202. struct s5p_aes_reqctx {
  203. unsigned long mode;
  204. };
  205. struct s5p_aes_ctx {
  206. struct s5p_aes_dev *dev;
  207. u8 aes_key[AES_MAX_KEY_SIZE];
  208. u8 nonce[CTR_RFC3686_NONCE_SIZE];
  209. int keylen;
  210. };
  211. /**
  212. * struct s5p_aes_dev - Crypto device state container
  213. * @dev: Associated device
  214. * @clk: Clock for accessing hardware
  215. * @ioaddr: Mapped IO memory region
  216. * @aes_ioaddr: Per-varian offset for AES block IO memory
  217. * @irq_fc: Feed control interrupt line
  218. * @req: Crypto request currently handled by the device
  219. * @ctx: Configuration for currently handled crypto request
  220. * @sg_src: Scatter list with source data for currently handled block
  221. * in device. This is DMA-mapped into device.
  222. * @sg_dst: Scatter list with destination data for currently handled block
  223. * in device. This is DMA-mapped into device.
  224. * @sg_src_cpy: In case of unaligned access, copied scatter list
  225. * with source data.
  226. * @sg_dst_cpy: In case of unaligned access, copied scatter list
  227. * with destination data.
  228. * @tasklet: New request scheduling jib
  229. * @queue: Crypto queue
  230. * @busy: Indicates whether the device is currently handling some request
  231. * thus it uses some of the fields from this state, like:
  232. * req, ctx, sg_src/dst (and copies). This essentially
  233. * protects against concurrent access to these fields.
  234. * @lock: Lock for protecting both access to device hardware registers
  235. * and fields related to current request (including the busy field).
  236. * @res: Resources for hash.
  237. * @io_hash_base: Per-variant offset for HASH block IO memory.
  238. * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
  239. * variable.
  240. * @hash_flags: Flags for current HASH op.
  241. * @hash_queue: Async hash queue.
  242. * @hash_tasklet: New HASH request scheduling job.
  243. * @xmit_buf: Buffer for current HASH request transfer into SSS block.
  244. * @hash_req: Current request sending to SSS HASH block.
  245. * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
  246. * @hash_sg_cnt: Counter for hash_sg_iter.
  247. *
  248. * @use_hash: true if HASH algs enabled
  249. */
  250. struct s5p_aes_dev {
  251. struct device *dev;
  252. struct clk *clk;
  253. void __iomem *ioaddr;
  254. void __iomem *aes_ioaddr;
  255. int irq_fc;
  256. struct ablkcipher_request *req;
  257. struct s5p_aes_ctx *ctx;
  258. struct scatterlist *sg_src;
  259. struct scatterlist *sg_dst;
  260. struct scatterlist *sg_src_cpy;
  261. struct scatterlist *sg_dst_cpy;
  262. struct tasklet_struct tasklet;
  263. struct crypto_queue queue;
  264. bool busy;
  265. spinlock_t lock;
  266. struct resource *res;
  267. void __iomem *io_hash_base;
  268. spinlock_t hash_lock; /* protect hash_ vars */
  269. unsigned long hash_flags;
  270. struct crypto_queue hash_queue;
  271. struct tasklet_struct hash_tasklet;
  272. u8 xmit_buf[BUFLEN];
  273. struct ahash_request *hash_req;
  274. struct scatterlist *hash_sg_iter;
  275. unsigned int hash_sg_cnt;
  276. bool use_hash;
  277. };
  278. /**
  279. * struct s5p_hash_reqctx - HASH request context
  280. * @dd: Associated device
  281. * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
  282. * @digcnt: Number of bytes processed by HW (without buffer[] ones)
  283. * @digest: Digest message or IV for partial result
  284. * @nregs: Number of HW registers for digest or IV read/write
  285. * @engine: Bits for selecting type of HASH in SSS block
  286. * @sg: sg for DMA transfer
  287. * @sg_len: Length of sg for DMA transfer
  288. * @sgl[]: sg for joining buffer and req->src scatterlist
  289. * @skip: Skip offset in req->src for current op
  290. * @total: Total number of bytes for current request
  291. * @finup: Keep state for finup or final.
  292. * @error: Keep track of error.
  293. * @bufcnt: Number of bytes holded in buffer[]
  294. * @buffer[]: For byte(s) from end of req->src in UPDATE op
  295. */
  296. struct s5p_hash_reqctx {
  297. struct s5p_aes_dev *dd;
  298. bool op_update;
  299. u64 digcnt;
  300. u8 digest[SHA256_DIGEST_SIZE];
  301. unsigned int nregs; /* digest_size / sizeof(reg) */
  302. u32 engine;
  303. struct scatterlist *sg;
  304. unsigned int sg_len;
  305. struct scatterlist sgl[2];
  306. unsigned int skip;
  307. unsigned int total;
  308. bool finup;
  309. bool error;
  310. u32 bufcnt;
  311. u8 buffer[0];
  312. };
  313. /**
  314. * struct s5p_hash_ctx - HASH transformation context
  315. * @dd: Associated device
  316. * @flags: Bits for algorithm HASH.
  317. * @fallback: Software transformation for zero message or size < BUFLEN.
  318. */
  319. struct s5p_hash_ctx {
  320. struct s5p_aes_dev *dd;
  321. unsigned long flags;
  322. struct crypto_shash *fallback;
  323. };
  324. static const struct samsung_aes_variant s5p_aes_data = {
  325. .aes_offset = 0x4000,
  326. .hash_offset = 0x6000,
  327. };
  328. static const struct samsung_aes_variant exynos_aes_data = {
  329. .aes_offset = 0x200,
  330. .hash_offset = 0x400,
  331. };
  332. static const struct of_device_id s5p_sss_dt_match[] = {
  333. {
  334. .compatible = "samsung,s5pv210-secss",
  335. .data = &s5p_aes_data,
  336. },
  337. {
  338. .compatible = "samsung,exynos4210-secss",
  339. .data = &exynos_aes_data,
  340. },
  341. { },
  342. };
  343. MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
  344. static inline const struct samsung_aes_variant *find_s5p_sss_version
  345. (const struct platform_device *pdev)
  346. {
  347. if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
  348. const struct of_device_id *match;
  349. match = of_match_node(s5p_sss_dt_match,
  350. pdev->dev.of_node);
  351. return (const struct samsung_aes_variant *)match->data;
  352. }
  353. return (const struct samsung_aes_variant *)
  354. platform_get_device_id(pdev)->driver_data;
  355. }
  356. static struct s5p_aes_dev *s5p_dev;
  357. static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
  358. const struct scatterlist *sg)
  359. {
  360. SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
  361. SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
  362. }
  363. static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
  364. const struct scatterlist *sg)
  365. {
  366. SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
  367. SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
  368. }
  369. static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
  370. {
  371. int len;
  372. if (!*sg)
  373. return;
  374. len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
  375. free_pages((unsigned long)sg_virt(*sg), get_order(len));
  376. kfree(*sg);
  377. *sg = NULL;
  378. }
  379. static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
  380. unsigned int nbytes, int out)
  381. {
  382. struct scatter_walk walk;
  383. if (!nbytes)
  384. return;
  385. scatterwalk_start(&walk, sg);
  386. scatterwalk_copychunks(buf, &walk, nbytes, out);
  387. scatterwalk_done(&walk, out, 0);
  388. }
  389. static void s5p_sg_done(struct s5p_aes_dev *dev)
  390. {
  391. if (dev->sg_dst_cpy) {
  392. dev_dbg(dev->dev,
  393. "Copying %d bytes of output data back to original place\n",
  394. dev->req->nbytes);
  395. s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
  396. dev->req->nbytes, 1);
  397. }
  398. s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
  399. s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
  400. }
  401. /* Calls the completion. Cannot be called with dev->lock hold. */
  402. static void s5p_aes_complete(struct ablkcipher_request *req, int err)
  403. {
  404. req->base.complete(&req->base, err);
  405. }
  406. static void s5p_unset_outdata(struct s5p_aes_dev *dev)
  407. {
  408. dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
  409. }
  410. static void s5p_unset_indata(struct s5p_aes_dev *dev)
  411. {
  412. dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
  413. }
  414. static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
  415. struct scatterlist **dst)
  416. {
  417. void *pages;
  418. int len;
  419. *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
  420. if (!*dst)
  421. return -ENOMEM;
  422. len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
  423. pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
  424. if (!pages) {
  425. kfree(*dst);
  426. *dst = NULL;
  427. return -ENOMEM;
  428. }
  429. s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
  430. sg_init_table(*dst, 1);
  431. sg_set_buf(*dst, pages, len);
  432. return 0;
  433. }
  434. static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
  435. {
  436. if (!sg->length)
  437. return -EINVAL;
  438. if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
  439. return -ENOMEM;
  440. dev->sg_dst = sg;
  441. return 0;
  442. }
  443. static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
  444. {
  445. if (!sg->length)
  446. return -EINVAL;
  447. if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
  448. return -ENOMEM;
  449. dev->sg_src = sg;
  450. return 0;
  451. }
  452. /*
  453. * Returns -ERRNO on error (mapping of new data failed).
  454. * On success returns:
  455. * - 0 if there is no more data,
  456. * - 1 if new transmitting (output) data is ready and its address+length
  457. * have to be written to device (by calling s5p_set_dma_outdata()).
  458. */
  459. static int s5p_aes_tx(struct s5p_aes_dev *dev)
  460. {
  461. int ret = 0;
  462. s5p_unset_outdata(dev);
  463. if (!sg_is_last(dev->sg_dst)) {
  464. ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
  465. if (!ret)
  466. ret = 1;
  467. }
  468. return ret;
  469. }
  470. /*
  471. * Returns -ERRNO on error (mapping of new data failed).
  472. * On success returns:
  473. * - 0 if there is no more data,
  474. * - 1 if new receiving (input) data is ready and its address+length
  475. * have to be written to device (by calling s5p_set_dma_indata()).
  476. */
  477. static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
  478. {
  479. int ret = 0;
  480. s5p_unset_indata(dev);
  481. if (!sg_is_last(dev->sg_src)) {
  482. ret = s5p_set_indata(dev, sg_next(dev->sg_src));
  483. if (!ret)
  484. ret = 1;
  485. }
  486. return ret;
  487. }
  488. static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
  489. {
  490. return __raw_readl(dd->io_hash_base + offset);
  491. }
  492. static inline void s5p_hash_write(struct s5p_aes_dev *dd,
  493. u32 offset, u32 value)
  494. {
  495. __raw_writel(value, dd->io_hash_base + offset);
  496. }
  497. /**
  498. * s5p_set_dma_hashdata() - start DMA with sg
  499. * @dev: device
  500. * @sg: scatterlist ready to DMA transmit
  501. */
  502. static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
  503. const struct scatterlist *sg)
  504. {
  505. dev->hash_sg_cnt--;
  506. SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
  507. SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
  508. }
  509. /**
  510. * s5p_hash_rx() - get next hash_sg_iter
  511. * @dev: device
  512. *
  513. * Return:
  514. * 2 if there is no more data and it is UPDATE op
  515. * 1 if new receiving (input) data is ready and can be written to device
  516. * 0 if there is no more data and it is FINAL op
  517. */
  518. static int s5p_hash_rx(struct s5p_aes_dev *dev)
  519. {
  520. if (dev->hash_sg_cnt > 0) {
  521. dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
  522. return 1;
  523. }
  524. set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
  525. if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
  526. return 0;
  527. return 2;
  528. }
  529. static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
  530. {
  531. struct platform_device *pdev = dev_id;
  532. struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
  533. struct ablkcipher_request *req;
  534. int err_dma_tx = 0;
  535. int err_dma_rx = 0;
  536. int err_dma_hx = 0;
  537. bool tx_end = false;
  538. bool hx_end = false;
  539. unsigned long flags;
  540. u32 status, st_bits;
  541. int err;
  542. spin_lock_irqsave(&dev->lock, flags);
  543. /*
  544. * Handle rx or tx interrupt. If there is still data (scatterlist did not
  545. * reach end), then map next scatterlist entry.
  546. * In case of such mapping error, s5p_aes_complete() should be called.
  547. *
  548. * If there is no more data in tx scatter list, call s5p_aes_complete()
  549. * and schedule new tasklet.
  550. *
  551. * Handle hx interrupt. If there is still data map next entry.
  552. */
  553. status = SSS_READ(dev, FCINTSTAT);
  554. if (status & SSS_FCINTSTAT_BRDMAINT)
  555. err_dma_rx = s5p_aes_rx(dev);
  556. if (status & SSS_FCINTSTAT_BTDMAINT) {
  557. if (sg_is_last(dev->sg_dst))
  558. tx_end = true;
  559. err_dma_tx = s5p_aes_tx(dev);
  560. }
  561. if (status & SSS_FCINTSTAT_HRDMAINT)
  562. err_dma_hx = s5p_hash_rx(dev);
  563. st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
  564. SSS_FCINTSTAT_HRDMAINT);
  565. /* clear DMA bits */
  566. SSS_WRITE(dev, FCINTPEND, st_bits);
  567. /* clear HASH irq bits */
  568. if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
  569. /* cannot have both HPART and HDONE */
  570. if (status & SSS_FCINTSTAT_HPARTINT)
  571. st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
  572. if (status & SSS_FCINTSTAT_HDONEINT)
  573. st_bits = SSS_HASH_STATUS_MSG_DONE;
  574. set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
  575. s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
  576. hx_end = true;
  577. /* when DONE or PART, do not handle HASH DMA */
  578. err_dma_hx = 0;
  579. }
  580. if (err_dma_rx < 0) {
  581. err = err_dma_rx;
  582. goto error;
  583. }
  584. if (err_dma_tx < 0) {
  585. err = err_dma_tx;
  586. goto error;
  587. }
  588. if (tx_end) {
  589. s5p_sg_done(dev);
  590. if (err_dma_hx == 1)
  591. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  592. spin_unlock_irqrestore(&dev->lock, flags);
  593. s5p_aes_complete(dev->req, 0);
  594. /* Device is still busy */
  595. tasklet_schedule(&dev->tasklet);
  596. } else {
  597. /*
  598. * Writing length of DMA block (either receiving or
  599. * transmitting) will start the operation immediately, so this
  600. * should be done at the end (even after clearing pending
  601. * interrupts to not miss the interrupt).
  602. */
  603. if (err_dma_tx == 1)
  604. s5p_set_dma_outdata(dev, dev->sg_dst);
  605. if (err_dma_rx == 1)
  606. s5p_set_dma_indata(dev, dev->sg_src);
  607. if (err_dma_hx == 1)
  608. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  609. spin_unlock_irqrestore(&dev->lock, flags);
  610. }
  611. goto hash_irq_end;
  612. error:
  613. s5p_sg_done(dev);
  614. dev->busy = false;
  615. req = dev->req;
  616. if (err_dma_hx == 1)
  617. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  618. spin_unlock_irqrestore(&dev->lock, flags);
  619. s5p_aes_complete(req, err);
  620. hash_irq_end:
  621. /*
  622. * Note about else if:
  623. * when hash_sg_iter reaches end and its UPDATE op,
  624. * issue SSS_HASH_PAUSE and wait for HPART irq
  625. */
  626. if (hx_end)
  627. tasklet_schedule(&dev->hash_tasklet);
  628. else if (err_dma_hx == 2)
  629. s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
  630. SSS_HASH_PAUSE);
  631. return IRQ_HANDLED;
  632. }
  633. /**
  634. * s5p_hash_read_msg() - read message or IV from HW
  635. * @req: AHASH request
  636. */
  637. static void s5p_hash_read_msg(struct ahash_request *req)
  638. {
  639. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  640. struct s5p_aes_dev *dd = ctx->dd;
  641. u32 *hash = (u32 *)ctx->digest;
  642. unsigned int i;
  643. for (i = 0; i < ctx->nregs; i++)
  644. hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
  645. }
  646. /**
  647. * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
  648. * @dd: device
  649. * @ctx: request context
  650. */
  651. static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
  652. const struct s5p_hash_reqctx *ctx)
  653. {
  654. const u32 *hash = (const u32 *)ctx->digest;
  655. unsigned int i;
  656. for (i = 0; i < ctx->nregs; i++)
  657. s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
  658. }
  659. /**
  660. * s5p_hash_write_iv() - write IV for next partial/finup op.
  661. * @req: AHASH request
  662. */
  663. static void s5p_hash_write_iv(struct ahash_request *req)
  664. {
  665. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  666. s5p_hash_write_ctx_iv(ctx->dd, ctx);
  667. }
  668. /**
  669. * s5p_hash_copy_result() - copy digest into req->result
  670. * @req: AHASH request
  671. */
  672. static void s5p_hash_copy_result(struct ahash_request *req)
  673. {
  674. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  675. if (!req->result)
  676. return;
  677. memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
  678. }
  679. /**
  680. * s5p_hash_dma_flush() - flush HASH DMA
  681. * @dev: secss device
  682. */
  683. static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
  684. {
  685. SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
  686. }
  687. /**
  688. * s5p_hash_dma_enable() - enable DMA mode for HASH
  689. * @dev: secss device
  690. *
  691. * enable DMA mode for HASH
  692. */
  693. static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
  694. {
  695. s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
  696. }
  697. /**
  698. * s5p_hash_irq_disable() - disable irq HASH signals
  699. * @dev: secss device
  700. * @flags: bitfield with irq's to be disabled
  701. */
  702. static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
  703. {
  704. SSS_WRITE(dev, FCINTENCLR, flags);
  705. }
  706. /**
  707. * s5p_hash_irq_enable() - enable irq signals
  708. * @dev: secss device
  709. * @flags: bitfield with irq's to be enabled
  710. */
  711. static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
  712. {
  713. SSS_WRITE(dev, FCINTENSET, flags);
  714. }
  715. /**
  716. * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
  717. * @dev: secss device
  718. * @hashflow: HASH stream flow with/without crypto AES/DES
  719. */
  720. static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
  721. {
  722. unsigned long flags;
  723. u32 flow;
  724. spin_lock_irqsave(&dev->lock, flags);
  725. flow = SSS_READ(dev, FCFIFOCTRL);
  726. flow &= ~SSS_HASHIN_MASK;
  727. flow |= hashflow;
  728. SSS_WRITE(dev, FCFIFOCTRL, flow);
  729. spin_unlock_irqrestore(&dev->lock, flags);
  730. }
  731. /**
  732. * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
  733. * @dev: secss device
  734. * @hashflow: HASH stream flow with/without AES/DES
  735. *
  736. * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
  737. * enable HASH irq's HRDMA, HDONE, HPART
  738. */
  739. static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
  740. {
  741. s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
  742. SSS_FCINTENCLR_HDONEINTENCLR |
  743. SSS_FCINTENCLR_HPARTINTENCLR);
  744. s5p_hash_dma_flush(dev);
  745. s5p_hash_dma_enable(dev);
  746. s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
  747. s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
  748. SSS_FCINTENSET_HDONEINTENSET |
  749. SSS_FCINTENSET_HPARTINTENSET);
  750. }
  751. /**
  752. * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
  753. * @dd: secss device
  754. * @length: length for request
  755. * @final: true if final op
  756. *
  757. * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
  758. * after previous updates, fill up IV words. For final, calculate and set
  759. * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
  760. * length as 2^63 so it will be never reached and set to zero prelow and
  761. * prehigh.
  762. *
  763. * This function does not start DMA transfer.
  764. */
  765. static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
  766. bool final)
  767. {
  768. struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  769. u32 prelow, prehigh, low, high;
  770. u32 configflags, swapflags;
  771. u64 tmplen;
  772. configflags = ctx->engine | SSS_HASH_INIT_BIT;
  773. if (likely(ctx->digcnt)) {
  774. s5p_hash_write_ctx_iv(dd, ctx);
  775. configflags |= SSS_HASH_USER_IV_EN;
  776. }
  777. if (final) {
  778. /* number of bytes for last part */
  779. low = length;
  780. high = 0;
  781. /* total number of bits prev hashed */
  782. tmplen = ctx->digcnt * 8;
  783. prelow = (u32)tmplen;
  784. prehigh = (u32)(tmplen >> 32);
  785. } else {
  786. prelow = 0;
  787. prehigh = 0;
  788. low = 0;
  789. high = BIT(31);
  790. }
  791. swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
  792. SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
  793. s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
  794. s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
  795. s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
  796. s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
  797. s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
  798. s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
  799. }
  800. /**
  801. * s5p_hash_xmit_dma() - start DMA hash processing
  802. * @dd: secss device
  803. * @length: length for request
  804. * @final: true if final op
  805. *
  806. * Update digcnt here, as it is needed for finup/final op.
  807. */
  808. static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
  809. bool final)
  810. {
  811. struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  812. unsigned int cnt;
  813. cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
  814. if (!cnt) {
  815. dev_err(dd->dev, "dma_map_sg error\n");
  816. ctx->error = true;
  817. return -EINVAL;
  818. }
  819. set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
  820. dd->hash_sg_iter = ctx->sg;
  821. dd->hash_sg_cnt = cnt;
  822. s5p_hash_write_ctrl(dd, length, final);
  823. ctx->digcnt += length;
  824. ctx->total -= length;
  825. /* catch last interrupt */
  826. if (final)
  827. set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
  828. s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
  829. return -EINPROGRESS;
  830. }
  831. /**
  832. * s5p_hash_copy_sgs() - copy request's bytes into new buffer
  833. * @ctx: request context
  834. * @sg: source scatterlist request
  835. * @new_len: number of bytes to process from sg
  836. *
  837. * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
  838. * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
  839. * with allocated buffer.
  840. *
  841. * Set bit in dd->hash_flag so we can free it after irq ends processing.
  842. */
  843. static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
  844. struct scatterlist *sg, unsigned int new_len)
  845. {
  846. unsigned int pages, len;
  847. void *buf;
  848. len = new_len + ctx->bufcnt;
  849. pages = get_order(len);
  850. buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
  851. if (!buf) {
  852. dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
  853. ctx->error = true;
  854. return -ENOMEM;
  855. }
  856. if (ctx->bufcnt)
  857. memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
  858. scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
  859. new_len, 0);
  860. sg_init_table(ctx->sgl, 1);
  861. sg_set_buf(ctx->sgl, buf, len);
  862. ctx->sg = ctx->sgl;
  863. ctx->sg_len = 1;
  864. ctx->bufcnt = 0;
  865. ctx->skip = 0;
  866. set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
  867. return 0;
  868. }
  869. /**
  870. * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
  871. * @ctx: request context
  872. * @sg: source scatterlist request
  873. * @new_len: number of bytes to process from sg
  874. *
  875. * Allocate new scatterlist table, copy data for HASH into it. If there was
  876. * xmit_buf filled, prepare it first, then copy page, length and offset from
  877. * source sg into it, adjusting begin and/or end for skip offset and
  878. * hash_later value.
  879. *
  880. * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
  881. * it after irq ends processing.
  882. */
  883. static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
  884. struct scatterlist *sg, unsigned int new_len)
  885. {
  886. unsigned int skip = ctx->skip, n = sg_nents(sg);
  887. struct scatterlist *tmp;
  888. unsigned int len;
  889. if (ctx->bufcnt)
  890. n++;
  891. ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
  892. if (!ctx->sg) {
  893. ctx->error = true;
  894. return -ENOMEM;
  895. }
  896. sg_init_table(ctx->sg, n);
  897. tmp = ctx->sg;
  898. ctx->sg_len = 0;
  899. if (ctx->bufcnt) {
  900. sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
  901. tmp = sg_next(tmp);
  902. ctx->sg_len++;
  903. }
  904. while (sg && skip >= sg->length) {
  905. skip -= sg->length;
  906. sg = sg_next(sg);
  907. }
  908. while (sg && new_len) {
  909. len = sg->length - skip;
  910. if (new_len < len)
  911. len = new_len;
  912. new_len -= len;
  913. sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
  914. skip = 0;
  915. if (new_len <= 0)
  916. sg_mark_end(tmp);
  917. tmp = sg_next(tmp);
  918. ctx->sg_len++;
  919. sg = sg_next(sg);
  920. }
  921. set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
  922. return 0;
  923. }
  924. /**
  925. * s5p_hash_prepare_sgs() - prepare sg for processing
  926. * @ctx: request context
  927. * @sg: source scatterlist request
  928. * @nbytes: number of bytes to process from sg
  929. * @final: final flag
  930. *
  931. * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
  932. * sg table have good aligned elements (list_ok). If one of this checks fails,
  933. * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
  934. * data into this buffer and prepare request in sgl, or (2) allocates new sg
  935. * table and prepare sg elements.
  936. *
  937. * For digest or finup all conditions can be good, and we may not need any
  938. * fixes.
  939. */
  940. static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
  941. struct scatterlist *sg,
  942. unsigned int new_len, bool final)
  943. {
  944. unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
  945. bool aligned = true, list_ok = true;
  946. struct scatterlist *sg_tmp = sg;
  947. if (!sg || !sg->length || !new_len)
  948. return 0;
  949. if (skip || !final)
  950. list_ok = false;
  951. while (nbytes > 0 && sg_tmp) {
  952. n++;
  953. if (skip >= sg_tmp->length) {
  954. skip -= sg_tmp->length;
  955. if (!sg_tmp->length) {
  956. aligned = false;
  957. break;
  958. }
  959. } else {
  960. if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
  961. aligned = false;
  962. break;
  963. }
  964. if (nbytes < sg_tmp->length - skip) {
  965. list_ok = false;
  966. break;
  967. }
  968. nbytes -= sg_tmp->length - skip;
  969. skip = 0;
  970. }
  971. sg_tmp = sg_next(sg_tmp);
  972. }
  973. if (!aligned)
  974. return s5p_hash_copy_sgs(ctx, sg, new_len);
  975. else if (!list_ok)
  976. return s5p_hash_copy_sg_lists(ctx, sg, new_len);
  977. /*
  978. * Have aligned data from previous operation and/or current
  979. * Note: will enter here only if (digest or finup) and aligned
  980. */
  981. if (ctx->bufcnt) {
  982. ctx->sg_len = n;
  983. sg_init_table(ctx->sgl, 2);
  984. sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
  985. sg_chain(ctx->sgl, 2, sg);
  986. ctx->sg = ctx->sgl;
  987. ctx->sg_len++;
  988. } else {
  989. ctx->sg = sg;
  990. ctx->sg_len = n;
  991. }
  992. return 0;
  993. }
  994. /**
  995. * s5p_hash_prepare_request() - prepare request for processing
  996. * @req: AHASH request
  997. * @update: true if UPDATE op
  998. *
  999. * Note 1: we can have update flag _and_ final flag at the same time.
  1000. * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
  1001. * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
  1002. * we have final op
  1003. */
  1004. static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
  1005. {
  1006. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1007. bool final = ctx->finup;
  1008. int xmit_len, hash_later, nbytes;
  1009. int ret;
  1010. if (update)
  1011. nbytes = req->nbytes;
  1012. else
  1013. nbytes = 0;
  1014. ctx->total = nbytes + ctx->bufcnt;
  1015. if (!ctx->total)
  1016. return 0;
  1017. if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
  1018. /* bytes left from previous request, so fill up to BUFLEN */
  1019. int len = BUFLEN - ctx->bufcnt % BUFLEN;
  1020. if (len > nbytes)
  1021. len = nbytes;
  1022. scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
  1023. 0, len, 0);
  1024. ctx->bufcnt += len;
  1025. nbytes -= len;
  1026. ctx->skip = len;
  1027. } else {
  1028. ctx->skip = 0;
  1029. }
  1030. if (ctx->bufcnt)
  1031. memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
  1032. xmit_len = ctx->total;
  1033. if (final) {
  1034. hash_later = 0;
  1035. } else {
  1036. if (IS_ALIGNED(xmit_len, BUFLEN))
  1037. xmit_len -= BUFLEN;
  1038. else
  1039. xmit_len -= xmit_len & (BUFLEN - 1);
  1040. hash_later = ctx->total - xmit_len;
  1041. /* copy hash_later bytes from end of req->src */
  1042. /* previous bytes are in xmit_buf, so no overwrite */
  1043. scatterwalk_map_and_copy(ctx->buffer, req->src,
  1044. req->nbytes - hash_later,
  1045. hash_later, 0);
  1046. }
  1047. if (xmit_len > BUFLEN) {
  1048. ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
  1049. final);
  1050. if (ret)
  1051. return ret;
  1052. } else {
  1053. /* have buffered data only */
  1054. if (unlikely(!ctx->bufcnt)) {
  1055. /* first update didn't fill up buffer */
  1056. scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
  1057. 0, xmit_len, 0);
  1058. }
  1059. sg_init_table(ctx->sgl, 1);
  1060. sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
  1061. ctx->sg = ctx->sgl;
  1062. ctx->sg_len = 1;
  1063. }
  1064. ctx->bufcnt = hash_later;
  1065. if (!final)
  1066. ctx->total = xmit_len;
  1067. return 0;
  1068. }
  1069. /**
  1070. * s5p_hash_update_dma_stop() - unmap DMA
  1071. * @dd: secss device
  1072. *
  1073. * Unmap scatterlist ctx->sg.
  1074. */
  1075. static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
  1076. {
  1077. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  1078. dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
  1079. clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
  1080. }
  1081. /**
  1082. * s5p_hash_finish() - copy calculated digest to crypto layer
  1083. * @req: AHASH request
  1084. */
  1085. static void s5p_hash_finish(struct ahash_request *req)
  1086. {
  1087. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1088. struct s5p_aes_dev *dd = ctx->dd;
  1089. if (ctx->digcnt)
  1090. s5p_hash_copy_result(req);
  1091. dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
  1092. }
  1093. /**
  1094. * s5p_hash_finish_req() - finish request
  1095. * @req: AHASH request
  1096. * @err: error
  1097. */
  1098. static void s5p_hash_finish_req(struct ahash_request *req, int err)
  1099. {
  1100. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1101. struct s5p_aes_dev *dd = ctx->dd;
  1102. unsigned long flags;
  1103. if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
  1104. free_pages((unsigned long)sg_virt(ctx->sg),
  1105. get_order(ctx->sg->length));
  1106. if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
  1107. kfree(ctx->sg);
  1108. ctx->sg = NULL;
  1109. dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
  1110. BIT(HASH_FLAGS_SGS_COPIED));
  1111. if (!err && !ctx->error) {
  1112. s5p_hash_read_msg(req);
  1113. if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
  1114. s5p_hash_finish(req);
  1115. } else {
  1116. ctx->error = true;
  1117. }
  1118. spin_lock_irqsave(&dd->hash_lock, flags);
  1119. dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
  1120. BIT(HASH_FLAGS_DMA_READY) |
  1121. BIT(HASH_FLAGS_OUTPUT_READY));
  1122. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1123. if (req->base.complete)
  1124. req->base.complete(&req->base, err);
  1125. }
  1126. /**
  1127. * s5p_hash_handle_queue() - handle hash queue
  1128. * @dd: device s5p_aes_dev
  1129. * @req: AHASH request
  1130. *
  1131. * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
  1132. * device then processes the first request from the dd->queue
  1133. *
  1134. * Returns: see s5p_hash_final below.
  1135. */
  1136. static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
  1137. struct ahash_request *req)
  1138. {
  1139. struct crypto_async_request *async_req, *backlog;
  1140. struct s5p_hash_reqctx *ctx;
  1141. unsigned long flags;
  1142. int err = 0, ret = 0;
  1143. retry:
  1144. spin_lock_irqsave(&dd->hash_lock, flags);
  1145. if (req)
  1146. ret = ahash_enqueue_request(&dd->hash_queue, req);
  1147. if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
  1148. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1149. return ret;
  1150. }
  1151. backlog = crypto_get_backlog(&dd->hash_queue);
  1152. async_req = crypto_dequeue_request(&dd->hash_queue);
  1153. if (async_req)
  1154. set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
  1155. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1156. if (!async_req)
  1157. return ret;
  1158. if (backlog)
  1159. backlog->complete(backlog, -EINPROGRESS);
  1160. req = ahash_request_cast(async_req);
  1161. dd->hash_req = req;
  1162. ctx = ahash_request_ctx(req);
  1163. err = s5p_hash_prepare_request(req, ctx->op_update);
  1164. if (err || !ctx->total)
  1165. goto out;
  1166. dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
  1167. ctx->op_update, req->nbytes);
  1168. s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
  1169. if (ctx->digcnt)
  1170. s5p_hash_write_iv(req); /* restore hash IV */
  1171. if (ctx->op_update) { /* HASH_OP_UPDATE */
  1172. err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
  1173. if (err != -EINPROGRESS && ctx->finup && !ctx->error)
  1174. /* no final() after finup() */
  1175. err = s5p_hash_xmit_dma(dd, ctx->total, true);
  1176. } else { /* HASH_OP_FINAL */
  1177. err = s5p_hash_xmit_dma(dd, ctx->total, true);
  1178. }
  1179. out:
  1180. if (err != -EINPROGRESS) {
  1181. /* hash_tasklet_cb will not finish it, so do it here */
  1182. s5p_hash_finish_req(req, err);
  1183. req = NULL;
  1184. /*
  1185. * Execute next request immediately if there is anything
  1186. * in queue.
  1187. */
  1188. goto retry;
  1189. }
  1190. return ret;
  1191. }
  1192. /**
  1193. * s5p_hash_tasklet_cb() - hash tasklet
  1194. * @data: ptr to s5p_aes_dev
  1195. */
  1196. static void s5p_hash_tasklet_cb(unsigned long data)
  1197. {
  1198. struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
  1199. if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
  1200. s5p_hash_handle_queue(dd, NULL);
  1201. return;
  1202. }
  1203. if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
  1204. if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
  1205. &dd->hash_flags)) {
  1206. s5p_hash_update_dma_stop(dd);
  1207. }
  1208. if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
  1209. &dd->hash_flags)) {
  1210. /* hash or semi-hash ready */
  1211. clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
  1212. goto finish;
  1213. }
  1214. }
  1215. return;
  1216. finish:
  1217. /* finish curent request */
  1218. s5p_hash_finish_req(dd->hash_req, 0);
  1219. /* If we are not busy, process next req */
  1220. if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
  1221. s5p_hash_handle_queue(dd, NULL);
  1222. }
  1223. /**
  1224. * s5p_hash_enqueue() - enqueue request
  1225. * @req: AHASH request
  1226. * @op: operation UPDATE (true) or FINAL (false)
  1227. *
  1228. * Returns: see s5p_hash_final below.
  1229. */
  1230. static int s5p_hash_enqueue(struct ahash_request *req, bool op)
  1231. {
  1232. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1233. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1234. ctx->op_update = op;
  1235. return s5p_hash_handle_queue(tctx->dd, req);
  1236. }
  1237. /**
  1238. * s5p_hash_update() - process the hash input data
  1239. * @req: AHASH request
  1240. *
  1241. * If request will fit in buffer, copy it and return immediately
  1242. * else enqueue it with OP_UPDATE.
  1243. *
  1244. * Returns: see s5p_hash_final below.
  1245. */
  1246. static int s5p_hash_update(struct ahash_request *req)
  1247. {
  1248. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1249. if (!req->nbytes)
  1250. return 0;
  1251. if (ctx->bufcnt + req->nbytes <= BUFLEN) {
  1252. scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
  1253. 0, req->nbytes, 0);
  1254. ctx->bufcnt += req->nbytes;
  1255. return 0;
  1256. }
  1257. return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
  1258. }
  1259. /**
  1260. * s5p_hash_shash_digest() - calculate shash digest
  1261. * @tfm: crypto transformation
  1262. * @flags: tfm flags
  1263. * @data: input data
  1264. * @len: length of data
  1265. * @out: output buffer
  1266. */
  1267. static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
  1268. const u8 *data, unsigned int len, u8 *out)
  1269. {
  1270. SHASH_DESC_ON_STACK(shash, tfm);
  1271. shash->tfm = tfm;
  1272. shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
  1273. return crypto_shash_digest(shash, data, len, out);
  1274. }
  1275. /**
  1276. * s5p_hash_final_shash() - calculate shash digest
  1277. * @req: AHASH request
  1278. */
  1279. static int s5p_hash_final_shash(struct ahash_request *req)
  1280. {
  1281. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1282. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1283. return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
  1284. ctx->buffer, ctx->bufcnt, req->result);
  1285. }
  1286. /**
  1287. * s5p_hash_final() - close up hash and calculate digest
  1288. * @req: AHASH request
  1289. *
  1290. * Note: in final req->src do not have any data, and req->nbytes can be
  1291. * non-zero.
  1292. *
  1293. * If there were no input data processed yet and the buffered hash data is
  1294. * less than BUFLEN (64) then calculate the final hash immediately by using
  1295. * SW algorithm fallback.
  1296. *
  1297. * Otherwise enqueues the current AHASH request with OP_FINAL operation op
  1298. * and finalize hash message in HW. Note that if digcnt!=0 then there were
  1299. * previous update op, so there are always some buffered bytes in ctx->buffer,
  1300. * which means that ctx->bufcnt!=0
  1301. *
  1302. * Returns:
  1303. * 0 if the request has been processed immediately,
  1304. * -EINPROGRESS if the operation has been queued for later execution or is set
  1305. * to processing by HW,
  1306. * -EBUSY if queue is full and request should be resubmitted later,
  1307. * other negative values denotes an error.
  1308. */
  1309. static int s5p_hash_final(struct ahash_request *req)
  1310. {
  1311. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1312. ctx->finup = true;
  1313. if (ctx->error)
  1314. return -EINVAL; /* uncompleted hash is not needed */
  1315. if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
  1316. return s5p_hash_final_shash(req);
  1317. return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
  1318. }
  1319. /**
  1320. * s5p_hash_finup() - process last req->src and calculate digest
  1321. * @req: AHASH request containing the last update data
  1322. *
  1323. * Return values: see s5p_hash_final above.
  1324. */
  1325. static int s5p_hash_finup(struct ahash_request *req)
  1326. {
  1327. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1328. int err1, err2;
  1329. ctx->finup = true;
  1330. err1 = s5p_hash_update(req);
  1331. if (err1 == -EINPROGRESS || err1 == -EBUSY)
  1332. return err1;
  1333. /*
  1334. * final() has to be always called to cleanup resources even if
  1335. * update() failed, except EINPROGRESS or calculate digest for small
  1336. * size
  1337. */
  1338. err2 = s5p_hash_final(req);
  1339. return err1 ?: err2;
  1340. }
  1341. /**
  1342. * s5p_hash_init() - initialize AHASH request contex
  1343. * @req: AHASH request
  1344. *
  1345. * Init async hash request context.
  1346. */
  1347. static int s5p_hash_init(struct ahash_request *req)
  1348. {
  1349. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1350. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1351. struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  1352. ctx->dd = tctx->dd;
  1353. ctx->error = false;
  1354. ctx->finup = false;
  1355. ctx->bufcnt = 0;
  1356. ctx->digcnt = 0;
  1357. ctx->total = 0;
  1358. ctx->skip = 0;
  1359. dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
  1360. crypto_ahash_digestsize(tfm));
  1361. switch (crypto_ahash_digestsize(tfm)) {
  1362. case MD5_DIGEST_SIZE:
  1363. ctx->engine = SSS_HASH_ENGINE_MD5;
  1364. ctx->nregs = HASH_MD5_MAX_REG;
  1365. break;
  1366. case SHA1_DIGEST_SIZE:
  1367. ctx->engine = SSS_HASH_ENGINE_SHA1;
  1368. ctx->nregs = HASH_SHA1_MAX_REG;
  1369. break;
  1370. case SHA256_DIGEST_SIZE:
  1371. ctx->engine = SSS_HASH_ENGINE_SHA256;
  1372. ctx->nregs = HASH_SHA256_MAX_REG;
  1373. break;
  1374. default:
  1375. ctx->error = true;
  1376. return -EINVAL;
  1377. }
  1378. return 0;
  1379. }
  1380. /**
  1381. * s5p_hash_digest - calculate digest from req->src
  1382. * @req: AHASH request
  1383. *
  1384. * Return values: see s5p_hash_final above.
  1385. */
  1386. static int s5p_hash_digest(struct ahash_request *req)
  1387. {
  1388. return s5p_hash_init(req) ?: s5p_hash_finup(req);
  1389. }
  1390. /**
  1391. * s5p_hash_cra_init_alg - init crypto alg transformation
  1392. * @tfm: crypto transformation
  1393. */
  1394. static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
  1395. {
  1396. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  1397. const char *alg_name = crypto_tfm_alg_name(tfm);
  1398. tctx->dd = s5p_dev;
  1399. /* Allocate a fallback and abort if it failed. */
  1400. tctx->fallback = crypto_alloc_shash(alg_name, 0,
  1401. CRYPTO_ALG_NEED_FALLBACK);
  1402. if (IS_ERR(tctx->fallback)) {
  1403. pr_err("fallback alloc fails for '%s'\n", alg_name);
  1404. return PTR_ERR(tctx->fallback);
  1405. }
  1406. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1407. sizeof(struct s5p_hash_reqctx) + BUFLEN);
  1408. return 0;
  1409. }
  1410. /**
  1411. * s5p_hash_cra_init - init crypto tfm
  1412. * @tfm: crypto transformation
  1413. */
  1414. static int s5p_hash_cra_init(struct crypto_tfm *tfm)
  1415. {
  1416. return s5p_hash_cra_init_alg(tfm);
  1417. }
  1418. /**
  1419. * s5p_hash_cra_exit - exit crypto tfm
  1420. * @tfm: crypto transformation
  1421. *
  1422. * free allocated fallback
  1423. */
  1424. static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
  1425. {
  1426. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  1427. crypto_free_shash(tctx->fallback);
  1428. tctx->fallback = NULL;
  1429. }
  1430. /**
  1431. * s5p_hash_export - export hash state
  1432. * @req: AHASH request
  1433. * @out: buffer for exported state
  1434. */
  1435. static int s5p_hash_export(struct ahash_request *req, void *out)
  1436. {
  1437. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1438. memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
  1439. return 0;
  1440. }
  1441. /**
  1442. * s5p_hash_import - import hash state
  1443. * @req: AHASH request
  1444. * @in: buffer with state to be imported from
  1445. */
  1446. static int s5p_hash_import(struct ahash_request *req, const void *in)
  1447. {
  1448. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1449. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1450. struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  1451. const struct s5p_hash_reqctx *ctx_in = in;
  1452. memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
  1453. if (ctx_in->bufcnt > BUFLEN) {
  1454. ctx->error = true;
  1455. return -EINVAL;
  1456. }
  1457. ctx->dd = tctx->dd;
  1458. ctx->error = false;
  1459. return 0;
  1460. }
  1461. static struct ahash_alg algs_sha1_md5_sha256[] = {
  1462. {
  1463. .init = s5p_hash_init,
  1464. .update = s5p_hash_update,
  1465. .final = s5p_hash_final,
  1466. .finup = s5p_hash_finup,
  1467. .digest = s5p_hash_digest,
  1468. .export = s5p_hash_export,
  1469. .import = s5p_hash_import,
  1470. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1471. .halg.digestsize = SHA1_DIGEST_SIZE,
  1472. .halg.base = {
  1473. .cra_name = "sha1",
  1474. .cra_driver_name = "exynos-sha1",
  1475. .cra_priority = 100,
  1476. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1477. CRYPTO_ALG_ASYNC |
  1478. CRYPTO_ALG_NEED_FALLBACK,
  1479. .cra_blocksize = HASH_BLOCK_SIZE,
  1480. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1481. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1482. .cra_module = THIS_MODULE,
  1483. .cra_init = s5p_hash_cra_init,
  1484. .cra_exit = s5p_hash_cra_exit,
  1485. }
  1486. },
  1487. {
  1488. .init = s5p_hash_init,
  1489. .update = s5p_hash_update,
  1490. .final = s5p_hash_final,
  1491. .finup = s5p_hash_finup,
  1492. .digest = s5p_hash_digest,
  1493. .export = s5p_hash_export,
  1494. .import = s5p_hash_import,
  1495. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1496. .halg.digestsize = MD5_DIGEST_SIZE,
  1497. .halg.base = {
  1498. .cra_name = "md5",
  1499. .cra_driver_name = "exynos-md5",
  1500. .cra_priority = 100,
  1501. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1502. CRYPTO_ALG_ASYNC |
  1503. CRYPTO_ALG_NEED_FALLBACK,
  1504. .cra_blocksize = HASH_BLOCK_SIZE,
  1505. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1506. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1507. .cra_module = THIS_MODULE,
  1508. .cra_init = s5p_hash_cra_init,
  1509. .cra_exit = s5p_hash_cra_exit,
  1510. }
  1511. },
  1512. {
  1513. .init = s5p_hash_init,
  1514. .update = s5p_hash_update,
  1515. .final = s5p_hash_final,
  1516. .finup = s5p_hash_finup,
  1517. .digest = s5p_hash_digest,
  1518. .export = s5p_hash_export,
  1519. .import = s5p_hash_import,
  1520. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1521. .halg.digestsize = SHA256_DIGEST_SIZE,
  1522. .halg.base = {
  1523. .cra_name = "sha256",
  1524. .cra_driver_name = "exynos-sha256",
  1525. .cra_priority = 100,
  1526. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1527. CRYPTO_ALG_ASYNC |
  1528. CRYPTO_ALG_NEED_FALLBACK,
  1529. .cra_blocksize = HASH_BLOCK_SIZE,
  1530. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1531. .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
  1532. .cra_module = THIS_MODULE,
  1533. .cra_init = s5p_hash_cra_init,
  1534. .cra_exit = s5p_hash_cra_exit,
  1535. }
  1536. }
  1537. };
  1538. static void s5p_set_aes(struct s5p_aes_dev *dev,
  1539. const u8 *key, const u8 *iv, const u8 *ctr,
  1540. unsigned int keylen)
  1541. {
  1542. void __iomem *keystart;
  1543. if (iv)
  1544. memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
  1545. if (ctr)
  1546. memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10);
  1547. if (keylen == AES_KEYSIZE_256)
  1548. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
  1549. else if (keylen == AES_KEYSIZE_192)
  1550. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
  1551. else
  1552. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
  1553. memcpy_toio(keystart, key, keylen);
  1554. }
  1555. static bool s5p_is_sg_aligned(struct scatterlist *sg)
  1556. {
  1557. while (sg) {
  1558. if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
  1559. return false;
  1560. sg = sg_next(sg);
  1561. }
  1562. return true;
  1563. }
  1564. static int s5p_set_indata_start(struct s5p_aes_dev *dev,
  1565. struct ablkcipher_request *req)
  1566. {
  1567. struct scatterlist *sg;
  1568. int err;
  1569. dev->sg_src_cpy = NULL;
  1570. sg = req->src;
  1571. if (!s5p_is_sg_aligned(sg)) {
  1572. dev_dbg(dev->dev,
  1573. "At least one unaligned source scatter list, making a copy\n");
  1574. err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
  1575. if (err)
  1576. return err;
  1577. sg = dev->sg_src_cpy;
  1578. }
  1579. err = s5p_set_indata(dev, sg);
  1580. if (err) {
  1581. s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
  1582. return err;
  1583. }
  1584. return 0;
  1585. }
  1586. static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
  1587. struct ablkcipher_request *req)
  1588. {
  1589. struct scatterlist *sg;
  1590. int err;
  1591. dev->sg_dst_cpy = NULL;
  1592. sg = req->dst;
  1593. if (!s5p_is_sg_aligned(sg)) {
  1594. dev_dbg(dev->dev,
  1595. "At least one unaligned dest scatter list, making a copy\n");
  1596. err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
  1597. if (err)
  1598. return err;
  1599. sg = dev->sg_dst_cpy;
  1600. }
  1601. err = s5p_set_outdata(dev, sg);
  1602. if (err) {
  1603. s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
  1604. return err;
  1605. }
  1606. return 0;
  1607. }
  1608. static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
  1609. {
  1610. struct ablkcipher_request *req = dev->req;
  1611. u32 aes_control;
  1612. unsigned long flags;
  1613. int err;
  1614. u8 *iv, *ctr;
  1615. /* This sets bit [13:12] to 00, which selects 128-bit counter */
  1616. aes_control = SSS_AES_KEY_CHANGE_MODE;
  1617. if (mode & FLAGS_AES_DECRYPT)
  1618. aes_control |= SSS_AES_MODE_DECRYPT;
  1619. if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
  1620. aes_control |= SSS_AES_CHAIN_MODE_CBC;
  1621. iv = req->info;
  1622. ctr = NULL;
  1623. } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
  1624. aes_control |= SSS_AES_CHAIN_MODE_CTR;
  1625. iv = NULL;
  1626. ctr = req->info;
  1627. } else {
  1628. iv = NULL; /* AES_ECB */
  1629. ctr = NULL;
  1630. }
  1631. if (dev->ctx->keylen == AES_KEYSIZE_192)
  1632. aes_control |= SSS_AES_KEY_SIZE_192;
  1633. else if (dev->ctx->keylen == AES_KEYSIZE_256)
  1634. aes_control |= SSS_AES_KEY_SIZE_256;
  1635. aes_control |= SSS_AES_FIFO_MODE;
  1636. /* as a variant it is possible to use byte swapping on DMA side */
  1637. aes_control |= SSS_AES_BYTESWAP_DI
  1638. | SSS_AES_BYTESWAP_DO
  1639. | SSS_AES_BYTESWAP_IV
  1640. | SSS_AES_BYTESWAP_KEY
  1641. | SSS_AES_BYTESWAP_CNT;
  1642. spin_lock_irqsave(&dev->lock, flags);
  1643. SSS_WRITE(dev, FCINTENCLR,
  1644. SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
  1645. SSS_WRITE(dev, FCFIFOCTRL, 0x00);
  1646. err = s5p_set_indata_start(dev, req);
  1647. if (err)
  1648. goto indata_error;
  1649. err = s5p_set_outdata_start(dev, req);
  1650. if (err)
  1651. goto outdata_error;
  1652. SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
  1653. s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
  1654. s5p_set_dma_indata(dev, dev->sg_src);
  1655. s5p_set_dma_outdata(dev, dev->sg_dst);
  1656. SSS_WRITE(dev, FCINTENSET,
  1657. SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
  1658. spin_unlock_irqrestore(&dev->lock, flags);
  1659. return;
  1660. outdata_error:
  1661. s5p_unset_indata(dev);
  1662. indata_error:
  1663. s5p_sg_done(dev);
  1664. dev->busy = false;
  1665. spin_unlock_irqrestore(&dev->lock, flags);
  1666. s5p_aes_complete(req, err);
  1667. }
  1668. static void s5p_tasklet_cb(unsigned long data)
  1669. {
  1670. struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
  1671. struct crypto_async_request *async_req, *backlog;
  1672. struct s5p_aes_reqctx *reqctx;
  1673. unsigned long flags;
  1674. spin_lock_irqsave(&dev->lock, flags);
  1675. backlog = crypto_get_backlog(&dev->queue);
  1676. async_req = crypto_dequeue_request(&dev->queue);
  1677. if (!async_req) {
  1678. dev->busy = false;
  1679. spin_unlock_irqrestore(&dev->lock, flags);
  1680. return;
  1681. }
  1682. spin_unlock_irqrestore(&dev->lock, flags);
  1683. if (backlog)
  1684. backlog->complete(backlog, -EINPROGRESS);
  1685. dev->req = ablkcipher_request_cast(async_req);
  1686. dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
  1687. reqctx = ablkcipher_request_ctx(dev->req);
  1688. s5p_aes_crypt_start(dev, reqctx->mode);
  1689. }
  1690. static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
  1691. struct ablkcipher_request *req)
  1692. {
  1693. unsigned long flags;
  1694. int err;
  1695. spin_lock_irqsave(&dev->lock, flags);
  1696. err = ablkcipher_enqueue_request(&dev->queue, req);
  1697. if (dev->busy) {
  1698. spin_unlock_irqrestore(&dev->lock, flags);
  1699. return err;
  1700. }
  1701. dev->busy = true;
  1702. spin_unlock_irqrestore(&dev->lock, flags);
  1703. tasklet_schedule(&dev->tasklet);
  1704. return err;
  1705. }
  1706. static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
  1707. {
  1708. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1709. struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
  1710. struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  1711. struct s5p_aes_dev *dev = ctx->dev;
  1712. if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
  1713. ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
  1714. dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
  1715. return -EINVAL;
  1716. }
  1717. reqctx->mode = mode;
  1718. return s5p_aes_handle_req(dev, req);
  1719. }
  1720. static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
  1721. const u8 *key, unsigned int keylen)
  1722. {
  1723. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  1724. struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  1725. if (keylen != AES_KEYSIZE_128 &&
  1726. keylen != AES_KEYSIZE_192 &&
  1727. keylen != AES_KEYSIZE_256)
  1728. return -EINVAL;
  1729. memcpy(ctx->aes_key, key, keylen);
  1730. ctx->keylen = keylen;
  1731. return 0;
  1732. }
  1733. static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
  1734. {
  1735. return s5p_aes_crypt(req, 0);
  1736. }
  1737. static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
  1738. {
  1739. return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
  1740. }
  1741. static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
  1742. {
  1743. return s5p_aes_crypt(req, FLAGS_AES_CBC);
  1744. }
  1745. static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
  1746. {
  1747. return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
  1748. }
  1749. static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
  1750. {
  1751. return s5p_aes_crypt(req, FLAGS_AES_CTR);
  1752. }
  1753. static int s5p_aes_cra_init(struct crypto_tfm *tfm)
  1754. {
  1755. struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  1756. ctx->dev = s5p_dev;
  1757. tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
  1758. return 0;
  1759. }
  1760. static struct crypto_alg algs[] = {
  1761. {
  1762. .cra_name = "ecb(aes)",
  1763. .cra_driver_name = "ecb-aes-s5p",
  1764. .cra_priority = 100,
  1765. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1766. CRYPTO_ALG_ASYNC |
  1767. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1768. .cra_blocksize = AES_BLOCK_SIZE,
  1769. .cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1770. .cra_alignmask = 0x0f,
  1771. .cra_type = &crypto_ablkcipher_type,
  1772. .cra_module = THIS_MODULE,
  1773. .cra_init = s5p_aes_cra_init,
  1774. .cra_u.ablkcipher = {
  1775. .min_keysize = AES_MIN_KEY_SIZE,
  1776. .max_keysize = AES_MAX_KEY_SIZE,
  1777. .setkey = s5p_aes_setkey,
  1778. .encrypt = s5p_aes_ecb_encrypt,
  1779. .decrypt = s5p_aes_ecb_decrypt,
  1780. }
  1781. },
  1782. {
  1783. .cra_name = "cbc(aes)",
  1784. .cra_driver_name = "cbc-aes-s5p",
  1785. .cra_priority = 100,
  1786. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1787. CRYPTO_ALG_ASYNC |
  1788. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1789. .cra_blocksize = AES_BLOCK_SIZE,
  1790. .cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1791. .cra_alignmask = 0x0f,
  1792. .cra_type = &crypto_ablkcipher_type,
  1793. .cra_module = THIS_MODULE,
  1794. .cra_init = s5p_aes_cra_init,
  1795. .cra_u.ablkcipher = {
  1796. .min_keysize = AES_MIN_KEY_SIZE,
  1797. .max_keysize = AES_MAX_KEY_SIZE,
  1798. .ivsize = AES_BLOCK_SIZE,
  1799. .setkey = s5p_aes_setkey,
  1800. .encrypt = s5p_aes_cbc_encrypt,
  1801. .decrypt = s5p_aes_cbc_decrypt,
  1802. }
  1803. },
  1804. {
  1805. .cra_name = "ctr(aes)",
  1806. .cra_driver_name = "ctr-aes-s5p",
  1807. .cra_priority = 100,
  1808. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1809. CRYPTO_ALG_ASYNC |
  1810. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1811. .cra_blocksize = AES_BLOCK_SIZE,
  1812. .cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1813. .cra_alignmask = 0x0f,
  1814. .cra_type = &crypto_ablkcipher_type,
  1815. .cra_module = THIS_MODULE,
  1816. .cra_init = s5p_aes_cra_init,
  1817. .cra_u.ablkcipher = {
  1818. .min_keysize = AES_MIN_KEY_SIZE,
  1819. .max_keysize = AES_MAX_KEY_SIZE,
  1820. .ivsize = AES_BLOCK_SIZE,
  1821. .setkey = s5p_aes_setkey,
  1822. .encrypt = s5p_aes_ctr_crypt,
  1823. .decrypt = s5p_aes_ctr_crypt,
  1824. }
  1825. },
  1826. };
  1827. static int s5p_aes_probe(struct platform_device *pdev)
  1828. {
  1829. struct device *dev = &pdev->dev;
  1830. int i, j, err = -ENODEV;
  1831. const struct samsung_aes_variant *variant;
  1832. struct s5p_aes_dev *pdata;
  1833. struct resource *res;
  1834. unsigned int hash_i;
  1835. if (s5p_dev)
  1836. return -EEXIST;
  1837. pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  1838. if (!pdata)
  1839. return -ENOMEM;
  1840. variant = find_s5p_sss_version(pdev);
  1841. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1842. /*
  1843. * Note: HASH and PRNG uses the same registers in secss, avoid
  1844. * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
  1845. * is enabled in config. We need larger size for HASH registers in
  1846. * secss, current describe only AES/DES
  1847. */
  1848. if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
  1849. if (variant == &exynos_aes_data) {
  1850. res->end += 0x300;
  1851. pdata->use_hash = true;
  1852. }
  1853. }
  1854. pdata->res = res;
  1855. pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
  1856. if (IS_ERR(pdata->ioaddr)) {
  1857. if (!pdata->use_hash)
  1858. return PTR_ERR(pdata->ioaddr);
  1859. /* try AES without HASH */
  1860. res->end -= 0x300;
  1861. pdata->use_hash = false;
  1862. pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
  1863. if (IS_ERR(pdata->ioaddr))
  1864. return PTR_ERR(pdata->ioaddr);
  1865. }
  1866. pdata->clk = devm_clk_get(dev, "secss");
  1867. if (IS_ERR(pdata->clk)) {
  1868. dev_err(dev, "failed to find secss clock source\n");
  1869. return -ENOENT;
  1870. }
  1871. err = clk_prepare_enable(pdata->clk);
  1872. if (err < 0) {
  1873. dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
  1874. return err;
  1875. }
  1876. spin_lock_init(&pdata->lock);
  1877. spin_lock_init(&pdata->hash_lock);
  1878. pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
  1879. pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
  1880. pdata->irq_fc = platform_get_irq(pdev, 0);
  1881. if (pdata->irq_fc < 0) {
  1882. err = pdata->irq_fc;
  1883. dev_warn(dev, "feed control interrupt is not available.\n");
  1884. goto err_irq;
  1885. }
  1886. err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
  1887. s5p_aes_interrupt, IRQF_ONESHOT,
  1888. pdev->name, pdev);
  1889. if (err < 0) {
  1890. dev_warn(dev, "feed control interrupt is not available.\n");
  1891. goto err_irq;
  1892. }
  1893. pdata->busy = false;
  1894. pdata->dev = dev;
  1895. platform_set_drvdata(pdev, pdata);
  1896. s5p_dev = pdata;
  1897. tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
  1898. crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
  1899. for (i = 0; i < ARRAY_SIZE(algs); i++) {
  1900. err = crypto_register_alg(&algs[i]);
  1901. if (err)
  1902. goto err_algs;
  1903. }
  1904. if (pdata->use_hash) {
  1905. tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
  1906. (unsigned long)pdata);
  1907. crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
  1908. for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
  1909. hash_i++) {
  1910. struct ahash_alg *alg;
  1911. alg = &algs_sha1_md5_sha256[hash_i];
  1912. err = crypto_register_ahash(alg);
  1913. if (err) {
  1914. dev_err(dev, "can't register '%s': %d\n",
  1915. alg->halg.base.cra_driver_name, err);
  1916. goto err_hash;
  1917. }
  1918. }
  1919. }
  1920. dev_info(dev, "s5p-sss driver registered\n");
  1921. return 0;
  1922. err_hash:
  1923. for (j = hash_i - 1; j >= 0; j--)
  1924. crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
  1925. tasklet_kill(&pdata->hash_tasklet);
  1926. res->end -= 0x300;
  1927. err_algs:
  1928. if (i < ARRAY_SIZE(algs))
  1929. dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
  1930. err);
  1931. for (j = 0; j < i; j++)
  1932. crypto_unregister_alg(&algs[j]);
  1933. tasklet_kill(&pdata->tasklet);
  1934. err_irq:
  1935. clk_disable_unprepare(pdata->clk);
  1936. s5p_dev = NULL;
  1937. return err;
  1938. }
  1939. static int s5p_aes_remove(struct platform_device *pdev)
  1940. {
  1941. struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
  1942. int i;
  1943. if (!pdata)
  1944. return -ENODEV;
  1945. for (i = 0; i < ARRAY_SIZE(algs); i++)
  1946. crypto_unregister_alg(&algs[i]);
  1947. tasklet_kill(&pdata->tasklet);
  1948. if (pdata->use_hash) {
  1949. for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
  1950. crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
  1951. pdata->res->end -= 0x300;
  1952. tasklet_kill(&pdata->hash_tasklet);
  1953. pdata->use_hash = false;
  1954. }
  1955. clk_disable_unprepare(pdata->clk);
  1956. s5p_dev = NULL;
  1957. return 0;
  1958. }
  1959. static struct platform_driver s5p_aes_crypto = {
  1960. .probe = s5p_aes_probe,
  1961. .remove = s5p_aes_remove,
  1962. .driver = {
  1963. .name = "s5p-secss",
  1964. .of_match_table = s5p_sss_dt_match,
  1965. },
  1966. };
  1967. module_platform_driver(s5p_aes_crypto);
  1968. MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
  1969. MODULE_LICENSE("GPL v2");
  1970. MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
  1971. MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");