hash_core.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970
  1. /*
  2. * Cryptographic API.
  3. * Support for Nomadik hardware crypto engine.
  4. * Copyright (C) ST-Ericsson SA 2010
  5. * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
  6. * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
  7. * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
  8. * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
  9. * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
  10. * License terms: GNU General Public License (GPL) version 2
  11. */
  12. #define pr_fmt(fmt) "hashX hashX: " fmt
  13. #include <linux/clk.h>
  14. #include <linux/device.h>
  15. #include <linux/err.h>
  16. #include <linux/init.h>
  17. #include <linux/io.h>
  18. #include <linux/klist.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/crypto.h>
  23. #include <linux/regulator/consumer.h>
  24. #include <linux/dmaengine.h>
  25. #include <linux/bitops.h>
  26. #include <crypto/internal/hash.h>
  27. #include <crypto/sha.h>
  28. #include <crypto/scatterwalk.h>
  29. #include <crypto/algapi.h>
  30. #include <linux/platform_data/crypto-ux500.h>
  31. #include "hash_alg.h"
  32. static int hash_mode;
  33. module_param(hash_mode, int, 0);
  34. MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
  35. /* HMAC-SHA1, no key */
  36. static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
  37. 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
  38. 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
  39. 0x70, 0x69, 0x0e, 0x1d
  40. };
  41. /* HMAC-SHA256, no key */
  42. static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
  43. 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
  44. 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
  45. 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
  46. 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
  47. };
  48. /**
  49. * struct hash_driver_data - data specific to the driver.
  50. *
  51. * @device_list: A list of registered devices to choose from.
  52. * @device_allocation: A semaphore initialized with number of devices.
  53. */
  54. struct hash_driver_data {
  55. struct klist device_list;
  56. struct semaphore device_allocation;
  57. };
  58. static struct hash_driver_data driver_data;
  59. /* Declaration of functions */
  60. /**
  61. * hash_messagepad - Pads a message and write the nblw bits.
  62. * @device_data: Structure for the hash device.
  63. * @message: Last word of a message
  64. * @index_bytes: The number of bytes in the last message
  65. *
  66. * This function manages the final part of the digest calculation, when less
  67. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  68. *
  69. */
  70. static void hash_messagepad(struct hash_device_data *device_data,
  71. const u32 *message, u8 index_bytes);
  72. /**
  73. * release_hash_device - Releases a previously allocated hash device.
  74. * @device_data: Structure for the hash device.
  75. *
  76. */
  77. static void release_hash_device(struct hash_device_data *device_data)
  78. {
  79. spin_lock(&device_data->ctx_lock);
  80. device_data->current_ctx->device = NULL;
  81. device_data->current_ctx = NULL;
  82. spin_unlock(&device_data->ctx_lock);
  83. /*
  84. * The down_interruptible part for this semaphore is called in
  85. * cryp_get_device_data.
  86. */
  87. up(&driver_data.device_allocation);
  88. }
  89. static void hash_dma_setup_channel(struct hash_device_data *device_data,
  90. struct device *dev)
  91. {
  92. struct hash_platform_data *platform_data = dev->platform_data;
  93. struct dma_slave_config conf = {
  94. .direction = DMA_MEM_TO_DEV,
  95. .dst_addr = device_data->phybase + HASH_DMA_FIFO,
  96. .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
  97. .dst_maxburst = 16,
  98. };
  99. dma_cap_zero(device_data->dma.mask);
  100. dma_cap_set(DMA_SLAVE, device_data->dma.mask);
  101. device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
  102. device_data->dma.chan_mem2hash =
  103. dma_request_channel(device_data->dma.mask,
  104. platform_data->dma_filter,
  105. device_data->dma.cfg_mem2hash);
  106. dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
  107. init_completion(&device_data->dma.complete);
  108. }
  109. static void hash_dma_callback(void *data)
  110. {
  111. struct hash_ctx *ctx = data;
  112. complete(&ctx->device->dma.complete);
  113. }
  114. static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
  115. int len, enum dma_data_direction direction)
  116. {
  117. struct dma_async_tx_descriptor *desc = NULL;
  118. struct dma_chan *channel = NULL;
  119. dma_cookie_t cookie;
  120. if (direction != DMA_TO_DEVICE) {
  121. dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
  122. __func__);
  123. return -EFAULT;
  124. }
  125. sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
  126. channel = ctx->device->dma.chan_mem2hash;
  127. ctx->device->dma.sg = sg;
  128. ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
  129. ctx->device->dma.sg, ctx->device->dma.nents,
  130. direction);
  131. if (!ctx->device->dma.sg_len) {
  132. dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
  133. __func__);
  134. return -EFAULT;
  135. }
  136. dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
  137. __func__);
  138. desc = dmaengine_prep_slave_sg(channel,
  139. ctx->device->dma.sg, ctx->device->dma.sg_len,
  140. direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
  141. if (!desc) {
  142. dev_err(ctx->device->dev,
  143. "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
  144. return -EFAULT;
  145. }
  146. desc->callback = hash_dma_callback;
  147. desc->callback_param = ctx;
  148. cookie = dmaengine_submit(desc);
  149. dma_async_issue_pending(channel);
  150. return 0;
  151. }
  152. static void hash_dma_done(struct hash_ctx *ctx)
  153. {
  154. struct dma_chan *chan;
  155. chan = ctx->device->dma.chan_mem2hash;
  156. dmaengine_terminate_all(chan);
  157. dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
  158. ctx->device->dma.sg_len, DMA_TO_DEVICE);
  159. }
  160. static int hash_dma_write(struct hash_ctx *ctx,
  161. struct scatterlist *sg, int len)
  162. {
  163. int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
  164. if (error) {
  165. dev_dbg(ctx->device->dev,
  166. "%s: hash_set_dma_transfer() failed\n", __func__);
  167. return error;
  168. }
  169. return len;
  170. }
  171. /**
  172. * get_empty_message_digest - Returns a pre-calculated digest for
  173. * the empty message.
  174. * @device_data: Structure for the hash device.
  175. * @zero_hash: Buffer to return the empty message digest.
  176. * @zero_hash_size: Hash size of the empty message digest.
  177. * @zero_digest: True if zero_digest returned.
  178. */
  179. static int get_empty_message_digest(
  180. struct hash_device_data *device_data,
  181. u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
  182. {
  183. int ret = 0;
  184. struct hash_ctx *ctx = device_data->current_ctx;
  185. *zero_digest = false;
  186. /**
  187. * Caller responsible for ctx != NULL.
  188. */
  189. if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
  190. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  191. memcpy(zero_hash, &sha1_zero_message_hash[0],
  192. SHA1_DIGEST_SIZE);
  193. *zero_hash_size = SHA1_DIGEST_SIZE;
  194. *zero_digest = true;
  195. } else if (HASH_ALGO_SHA256 ==
  196. ctx->config.algorithm) {
  197. memcpy(zero_hash, &sha256_zero_message_hash[0],
  198. SHA256_DIGEST_SIZE);
  199. *zero_hash_size = SHA256_DIGEST_SIZE;
  200. *zero_digest = true;
  201. } else {
  202. dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
  203. __func__);
  204. ret = -EINVAL;
  205. goto out;
  206. }
  207. } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
  208. if (!ctx->keylen) {
  209. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  210. memcpy(zero_hash, &zero_message_hmac_sha1[0],
  211. SHA1_DIGEST_SIZE);
  212. *zero_hash_size = SHA1_DIGEST_SIZE;
  213. *zero_digest = true;
  214. } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
  215. memcpy(zero_hash, &zero_message_hmac_sha256[0],
  216. SHA256_DIGEST_SIZE);
  217. *zero_hash_size = SHA256_DIGEST_SIZE;
  218. *zero_digest = true;
  219. } else {
  220. dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
  221. __func__);
  222. ret = -EINVAL;
  223. goto out;
  224. }
  225. } else {
  226. dev_dbg(device_data->dev,
  227. "%s: Continue hash calculation, since hmac key available\n",
  228. __func__);
  229. }
  230. }
  231. out:
  232. return ret;
  233. }
  234. /**
  235. * hash_disable_power - Request to disable power and clock.
  236. * @device_data: Structure for the hash device.
  237. * @save_device_state: If true, saves the current hw state.
  238. *
  239. * This function request for disabling power (regulator) and clock,
  240. * and could also save current hw state.
  241. */
  242. static int hash_disable_power(struct hash_device_data *device_data,
  243. bool save_device_state)
  244. {
  245. int ret = 0;
  246. struct device *dev = device_data->dev;
  247. spin_lock(&device_data->power_state_lock);
  248. if (!device_data->power_state)
  249. goto out;
  250. if (save_device_state) {
  251. hash_save_state(device_data,
  252. &device_data->state);
  253. device_data->restore_dev_state = true;
  254. }
  255. clk_disable(device_data->clk);
  256. ret = regulator_disable(device_data->regulator);
  257. if (ret)
  258. dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
  259. device_data->power_state = false;
  260. out:
  261. spin_unlock(&device_data->power_state_lock);
  262. return ret;
  263. }
  264. /**
  265. * hash_enable_power - Request to enable power and clock.
  266. * @device_data: Structure for the hash device.
  267. * @restore_device_state: If true, restores a previous saved hw state.
  268. *
  269. * This function request for enabling power (regulator) and clock,
  270. * and could also restore a previously saved hw state.
  271. */
  272. static int hash_enable_power(struct hash_device_data *device_data,
  273. bool restore_device_state)
  274. {
  275. int ret = 0;
  276. struct device *dev = device_data->dev;
  277. spin_lock(&device_data->power_state_lock);
  278. if (!device_data->power_state) {
  279. ret = regulator_enable(device_data->regulator);
  280. if (ret) {
  281. dev_err(dev, "%s: regulator_enable() failed!\n",
  282. __func__);
  283. goto out;
  284. }
  285. ret = clk_enable(device_data->clk);
  286. if (ret) {
  287. dev_err(dev, "%s: clk_enable() failed!\n", __func__);
  288. ret = regulator_disable(
  289. device_data->regulator);
  290. goto out;
  291. }
  292. device_data->power_state = true;
  293. }
  294. if (device_data->restore_dev_state) {
  295. if (restore_device_state) {
  296. device_data->restore_dev_state = false;
  297. hash_resume_state(device_data, &device_data->state);
  298. }
  299. }
  300. out:
  301. spin_unlock(&device_data->power_state_lock);
  302. return ret;
  303. }
  304. /**
  305. * hash_get_device_data - Checks for an available hash device and return it.
  306. * @hash_ctx: Structure for the hash context.
  307. * @device_data: Structure for the hash device.
  308. *
  309. * This function check for an available hash device and return it to
  310. * the caller.
  311. * Note! Caller need to release the device, calling up().
  312. */
  313. static int hash_get_device_data(struct hash_ctx *ctx,
  314. struct hash_device_data **device_data)
  315. {
  316. int ret;
  317. struct klist_iter device_iterator;
  318. struct klist_node *device_node;
  319. struct hash_device_data *local_device_data = NULL;
  320. /* Wait until a device is available */
  321. ret = down_interruptible(&driver_data.device_allocation);
  322. if (ret)
  323. return ret; /* Interrupted */
  324. /* Select a device */
  325. klist_iter_init(&driver_data.device_list, &device_iterator);
  326. device_node = klist_next(&device_iterator);
  327. while (device_node) {
  328. local_device_data = container_of(device_node,
  329. struct hash_device_data, list_node);
  330. spin_lock(&local_device_data->ctx_lock);
  331. /* current_ctx allocates a device, NULL = unallocated */
  332. if (local_device_data->current_ctx) {
  333. device_node = klist_next(&device_iterator);
  334. } else {
  335. local_device_data->current_ctx = ctx;
  336. ctx->device = local_device_data;
  337. spin_unlock(&local_device_data->ctx_lock);
  338. break;
  339. }
  340. spin_unlock(&local_device_data->ctx_lock);
  341. }
  342. klist_iter_exit(&device_iterator);
  343. if (!device_node) {
  344. /**
  345. * No free device found.
  346. * Since we allocated a device with down_interruptible, this
  347. * should not be able to happen.
  348. * Number of available devices, which are contained in
  349. * device_allocation, is therefore decremented by not doing
  350. * an up(device_allocation).
  351. */
  352. return -EBUSY;
  353. }
  354. *device_data = local_device_data;
  355. return 0;
  356. }
  357. /**
  358. * hash_hw_write_key - Writes the key to the hardware registries.
  359. *
  360. * @device_data: Structure for the hash device.
  361. * @key: Key to be written.
  362. * @keylen: The lengt of the key.
  363. *
  364. * Note! This function DOES NOT write to the NBLW registry, even though
  365. * specified in the the hw design spec. Either due to incorrect info in the
  366. * spec or due to a bug in the hw.
  367. */
  368. static void hash_hw_write_key(struct hash_device_data *device_data,
  369. const u8 *key, unsigned int keylen)
  370. {
  371. u32 word = 0;
  372. int nwords = 1;
  373. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  374. while (keylen >= 4) {
  375. u32 *key_word = (u32 *)key;
  376. HASH_SET_DIN(key_word, nwords);
  377. keylen -= 4;
  378. key += 4;
  379. }
  380. /* Take care of the remaining bytes in the last word */
  381. if (keylen) {
  382. word = 0;
  383. while (keylen) {
  384. word |= (key[keylen - 1] << (8 * (keylen - 1)));
  385. keylen--;
  386. }
  387. HASH_SET_DIN(&word, nwords);
  388. }
  389. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  390. cpu_relax();
  391. HASH_SET_DCAL;
  392. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  393. cpu_relax();
  394. }
  395. /**
  396. * init_hash_hw - Initialise the hash hardware for a new calculation.
  397. * @device_data: Structure for the hash device.
  398. * @ctx: The hash context.
  399. *
  400. * This function will enable the bits needed to clear and start a new
  401. * calculation.
  402. */
  403. static int init_hash_hw(struct hash_device_data *device_data,
  404. struct hash_ctx *ctx)
  405. {
  406. int ret = 0;
  407. ret = hash_setconfiguration(device_data, &ctx->config);
  408. if (ret) {
  409. dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
  410. __func__);
  411. return ret;
  412. }
  413. hash_begin(device_data, ctx);
  414. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  415. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  416. return ret;
  417. }
  418. /**
  419. * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
  420. *
  421. * @sg: Scatterlist.
  422. * @size: Size in bytes.
  423. * @aligned: True if sg data aligned to work in DMA mode.
  424. *
  425. */
  426. static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
  427. {
  428. int nents = 0;
  429. bool aligned_data = true;
  430. while (size > 0 && sg) {
  431. nents++;
  432. size -= sg->length;
  433. /* hash_set_dma_transfer will align last nent */
  434. if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
  435. (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
  436. aligned_data = false;
  437. sg = sg_next(sg);
  438. }
  439. if (aligned)
  440. *aligned = aligned_data;
  441. if (size != 0)
  442. return -EFAULT;
  443. return nents;
  444. }
  445. /**
  446. * hash_dma_valid_data - checks for dma valid sg data.
  447. * @sg: Scatterlist.
  448. * @datasize: Datasize in bytes.
  449. *
  450. * NOTE! This function checks for dma valid sg data, since dma
  451. * only accept datasizes of even wordsize.
  452. */
  453. static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
  454. {
  455. bool aligned;
  456. /* Need to include at least one nent, else error */
  457. if (hash_get_nents(sg, datasize, &aligned) < 1)
  458. return false;
  459. return aligned;
  460. }
  461. /**
  462. * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
  463. * @req: The hash request for the job.
  464. *
  465. * Initialize structures.
  466. */
  467. static int hash_init(struct ahash_request *req)
  468. {
  469. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  470. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  471. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  472. if (!ctx->key)
  473. ctx->keylen = 0;
  474. memset(&req_ctx->state, 0, sizeof(struct hash_state));
  475. req_ctx->updated = 0;
  476. if (hash_mode == HASH_MODE_DMA) {
  477. if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
  478. req_ctx->dma_mode = false; /* Don't use DMA */
  479. pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
  480. __func__, HASH_DMA_ALIGN_SIZE);
  481. } else {
  482. if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
  483. hash_dma_valid_data(req->src, req->nbytes)) {
  484. req_ctx->dma_mode = true;
  485. } else {
  486. req_ctx->dma_mode = false;
  487. pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
  488. __func__,
  489. HASH_DMA_PERFORMANCE_MIN_SIZE);
  490. }
  491. }
  492. }
  493. return 0;
  494. }
  495. /**
  496. * hash_processblock - This function processes a single block of 512 bits (64
  497. * bytes), word aligned, starting at message.
  498. * @device_data: Structure for the hash device.
  499. * @message: Block (512 bits) of message to be written to
  500. * the HASH hardware.
  501. *
  502. */
  503. static void hash_processblock(struct hash_device_data *device_data,
  504. const u32 *message, int length)
  505. {
  506. int len = length / HASH_BYTES_PER_WORD;
  507. /*
  508. * NBLW bits. Reset the number of bits in last word (NBLW).
  509. */
  510. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  511. /*
  512. * Write message data to the HASH_DIN register.
  513. */
  514. HASH_SET_DIN(message, len);
  515. }
  516. /**
  517. * hash_messagepad - Pads a message and write the nblw bits.
  518. * @device_data: Structure for the hash device.
  519. * @message: Last word of a message.
  520. * @index_bytes: The number of bytes in the last message.
  521. *
  522. * This function manages the final part of the digest calculation, when less
  523. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  524. *
  525. */
  526. static void hash_messagepad(struct hash_device_data *device_data,
  527. const u32 *message, u8 index_bytes)
  528. {
  529. int nwords = 1;
  530. /*
  531. * Clear hash str register, only clear NBLW
  532. * since DCAL will be reset by hardware.
  533. */
  534. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  535. /* Main loop */
  536. while (index_bytes >= 4) {
  537. HASH_SET_DIN(message, nwords);
  538. index_bytes -= 4;
  539. message++;
  540. }
  541. if (index_bytes)
  542. HASH_SET_DIN(message, nwords);
  543. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  544. cpu_relax();
  545. /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
  546. HASH_SET_NBLW(index_bytes * 8);
  547. dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
  548. __func__, readl_relaxed(&device_data->base->din),
  549. readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
  550. HASH_SET_DCAL;
  551. dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
  552. __func__, readl_relaxed(&device_data->base->din),
  553. readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
  554. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  555. cpu_relax();
  556. }
  557. /**
  558. * hash_incrementlength - Increments the length of the current message.
  559. * @ctx: Hash context
  560. * @incr: Length of message processed already
  561. *
  562. * Overflow cannot occur, because conditions for overflow are checked in
  563. * hash_hw_update.
  564. */
  565. static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
  566. {
  567. ctx->state.length.low_word += incr;
  568. /* Check for wrap-around */
  569. if (ctx->state.length.low_word < incr)
  570. ctx->state.length.high_word++;
  571. }
  572. /**
  573. * hash_setconfiguration - Sets the required configuration for the hash
  574. * hardware.
  575. * @device_data: Structure for the hash device.
  576. * @config: Pointer to a configuration structure.
  577. */
  578. int hash_setconfiguration(struct hash_device_data *device_data,
  579. struct hash_config *config)
  580. {
  581. int ret = 0;
  582. if (config->algorithm != HASH_ALGO_SHA1 &&
  583. config->algorithm != HASH_ALGO_SHA256)
  584. return -EPERM;
  585. /*
  586. * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
  587. * to be written to HASH_DIN is considered as 32 bits.
  588. */
  589. HASH_SET_DATA_FORMAT(config->data_format);
  590. /*
  591. * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
  592. */
  593. switch (config->algorithm) {
  594. case HASH_ALGO_SHA1:
  595. HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  596. break;
  597. case HASH_ALGO_SHA256:
  598. HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  599. break;
  600. default:
  601. dev_err(device_data->dev, "%s: Incorrect algorithm\n",
  602. __func__);
  603. return -EPERM;
  604. }
  605. /*
  606. * MODE bit. This bit selects between HASH or HMAC mode for the
  607. * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
  608. */
  609. if (HASH_OPER_MODE_HASH == config->oper_mode)
  610. HASH_CLEAR_BITS(&device_data->base->cr,
  611. HASH_CR_MODE_MASK);
  612. else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
  613. HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
  614. if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
  615. /* Truncate key to blocksize */
  616. dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
  617. HASH_SET_BITS(&device_data->base->cr,
  618. HASH_CR_LKEY_MASK);
  619. } else {
  620. dev_dbg(device_data->dev, "%s: LKEY cleared\n",
  621. __func__);
  622. HASH_CLEAR_BITS(&device_data->base->cr,
  623. HASH_CR_LKEY_MASK);
  624. }
  625. } else { /* Wrong hash mode */
  626. ret = -EPERM;
  627. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  628. __func__);
  629. }
  630. return ret;
  631. }
  632. /**
  633. * hash_begin - This routine resets some globals and initializes the hash
  634. * hardware.
  635. * @device_data: Structure for the hash device.
  636. * @ctx: Hash context.
  637. */
  638. void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
  639. {
  640. /* HW and SW initializations */
  641. /* Note: there is no need to initialize buffer and digest members */
  642. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  643. cpu_relax();
  644. /*
  645. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  646. * prepare the initialize the HASH accelerator to compute the message
  647. * digest of a new message.
  648. */
  649. HASH_INITIALIZE;
  650. /*
  651. * NBLW bits. Reset the number of bits in last word (NBLW).
  652. */
  653. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  654. }
  655. static int hash_process_data(struct hash_device_data *device_data,
  656. struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
  657. int msg_length, u8 *data_buffer, u8 *buffer,
  658. u8 *index)
  659. {
  660. int ret = 0;
  661. u32 count;
  662. do {
  663. if ((*index + msg_length) < HASH_BLOCK_SIZE) {
  664. for (count = 0; count < msg_length; count++) {
  665. buffer[*index + count] =
  666. *(data_buffer + count);
  667. }
  668. *index += msg_length;
  669. msg_length = 0;
  670. } else {
  671. if (req_ctx->updated) {
  672. ret = hash_resume_state(device_data,
  673. &device_data->state);
  674. memmove(req_ctx->state.buffer,
  675. device_data->state.buffer,
  676. HASH_BLOCK_SIZE);
  677. if (ret) {
  678. dev_err(device_data->dev,
  679. "%s: hash_resume_state() failed!\n",
  680. __func__);
  681. goto out;
  682. }
  683. } else {
  684. ret = init_hash_hw(device_data, ctx);
  685. if (ret) {
  686. dev_err(device_data->dev,
  687. "%s: init_hash_hw() failed!\n",
  688. __func__);
  689. goto out;
  690. }
  691. req_ctx->updated = 1;
  692. }
  693. /*
  694. * If 'data_buffer' is four byte aligned and
  695. * local buffer does not have any data, we can
  696. * write data directly from 'data_buffer' to
  697. * HW peripheral, otherwise we first copy data
  698. * to a local buffer
  699. */
  700. if ((0 == (((u32)data_buffer) % 4)) &&
  701. (0 == *index))
  702. hash_processblock(device_data,
  703. (const u32 *)data_buffer,
  704. HASH_BLOCK_SIZE);
  705. else {
  706. for (count = 0;
  707. count < (u32)(HASH_BLOCK_SIZE - *index);
  708. count++) {
  709. buffer[*index + count] =
  710. *(data_buffer + count);
  711. }
  712. hash_processblock(device_data,
  713. (const u32 *)buffer,
  714. HASH_BLOCK_SIZE);
  715. }
  716. hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
  717. data_buffer += (HASH_BLOCK_SIZE - *index);
  718. msg_length -= (HASH_BLOCK_SIZE - *index);
  719. *index = 0;
  720. ret = hash_save_state(device_data,
  721. &device_data->state);
  722. memmove(device_data->state.buffer,
  723. req_ctx->state.buffer,
  724. HASH_BLOCK_SIZE);
  725. if (ret) {
  726. dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
  727. __func__);
  728. goto out;
  729. }
  730. }
  731. } while (msg_length != 0);
  732. out:
  733. return ret;
  734. }
  735. /**
  736. * hash_dma_final - The hash dma final function for SHA1/SHA256.
  737. * @req: The hash request for the job.
  738. */
  739. static int hash_dma_final(struct ahash_request *req)
  740. {
  741. int ret = 0;
  742. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  743. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  744. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  745. struct hash_device_data *device_data;
  746. u8 digest[SHA256_DIGEST_SIZE];
  747. int bytes_written = 0;
  748. ret = hash_get_device_data(ctx, &device_data);
  749. if (ret)
  750. return ret;
  751. dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
  752. if (req_ctx->updated) {
  753. ret = hash_resume_state(device_data, &device_data->state);
  754. if (ret) {
  755. dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
  756. __func__);
  757. goto out;
  758. }
  759. }
  760. if (!req_ctx->updated) {
  761. ret = hash_setconfiguration(device_data, &ctx->config);
  762. if (ret) {
  763. dev_err(device_data->dev,
  764. "%s: hash_setconfiguration() failed!\n",
  765. __func__);
  766. goto out;
  767. }
  768. /* Enable DMA input */
  769. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
  770. HASH_CLEAR_BITS(&device_data->base->cr,
  771. HASH_CR_DMAE_MASK);
  772. } else {
  773. HASH_SET_BITS(&device_data->base->cr,
  774. HASH_CR_DMAE_MASK);
  775. HASH_SET_BITS(&device_data->base->cr,
  776. HASH_CR_PRIVN_MASK);
  777. }
  778. HASH_INITIALIZE;
  779. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  780. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  781. /* Number of bits in last word = (nbytes * 8) % 32 */
  782. HASH_SET_NBLW((req->nbytes * 8) % 32);
  783. req_ctx->updated = 1;
  784. }
  785. /* Store the nents in the dma struct. */
  786. ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
  787. if (!ctx->device->dma.nents) {
  788. dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
  789. __func__);
  790. ret = ctx->device->dma.nents;
  791. goto out;
  792. }
  793. bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
  794. if (bytes_written != req->nbytes) {
  795. dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
  796. __func__);
  797. ret = bytes_written;
  798. goto out;
  799. }
  800. wait_for_completion(&ctx->device->dma.complete);
  801. hash_dma_done(ctx);
  802. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  803. cpu_relax();
  804. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  805. unsigned int keylen = ctx->keylen;
  806. u8 *key = ctx->key;
  807. dev_dbg(device_data->dev, "%s: keylen: %d\n",
  808. __func__, ctx->keylen);
  809. hash_hw_write_key(device_data, key, keylen);
  810. }
  811. hash_get_digest(device_data, digest, ctx->config.algorithm);
  812. memcpy(req->result, digest, ctx->digestsize);
  813. out:
  814. release_hash_device(device_data);
  815. /**
  816. * Allocated in setkey, and only used in HMAC.
  817. */
  818. kfree(ctx->key);
  819. return ret;
  820. }
  821. /**
  822. * hash_hw_final - The final hash calculation function
  823. * @req: The hash request for the job.
  824. */
  825. static int hash_hw_final(struct ahash_request *req)
  826. {
  827. int ret = 0;
  828. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  829. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  830. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  831. struct hash_device_data *device_data;
  832. u8 digest[SHA256_DIGEST_SIZE];
  833. ret = hash_get_device_data(ctx, &device_data);
  834. if (ret)
  835. return ret;
  836. dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
  837. if (req_ctx->updated) {
  838. ret = hash_resume_state(device_data, &device_data->state);
  839. if (ret) {
  840. dev_err(device_data->dev,
  841. "%s: hash_resume_state() failed!\n", __func__);
  842. goto out;
  843. }
  844. } else if (req->nbytes == 0 && ctx->keylen == 0) {
  845. u8 zero_hash[SHA256_DIGEST_SIZE];
  846. u32 zero_hash_size = 0;
  847. bool zero_digest = false;
  848. /**
  849. * Use a pre-calculated empty message digest
  850. * (workaround since hw return zeroes, hw bug!?)
  851. */
  852. ret = get_empty_message_digest(device_data, &zero_hash[0],
  853. &zero_hash_size, &zero_digest);
  854. if (!ret && likely(zero_hash_size == ctx->digestsize) &&
  855. zero_digest) {
  856. memcpy(req->result, &zero_hash[0], ctx->digestsize);
  857. goto out;
  858. } else if (!ret && !zero_digest) {
  859. dev_dbg(device_data->dev,
  860. "%s: HMAC zero msg with key, continue...\n",
  861. __func__);
  862. } else {
  863. dev_err(device_data->dev,
  864. "%s: ret=%d, or wrong digest size? %s\n",
  865. __func__, ret,
  866. zero_hash_size == ctx->digestsize ?
  867. "true" : "false");
  868. /* Return error */
  869. goto out;
  870. }
  871. } else if (req->nbytes == 0 && ctx->keylen > 0) {
  872. dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
  873. __func__);
  874. goto out;
  875. }
  876. if (!req_ctx->updated) {
  877. ret = init_hash_hw(device_data, ctx);
  878. if (ret) {
  879. dev_err(device_data->dev,
  880. "%s: init_hash_hw() failed!\n", __func__);
  881. goto out;
  882. }
  883. }
  884. if (req_ctx->state.index) {
  885. hash_messagepad(device_data, req_ctx->state.buffer,
  886. req_ctx->state.index);
  887. } else {
  888. HASH_SET_DCAL;
  889. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  890. cpu_relax();
  891. }
  892. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  893. unsigned int keylen = ctx->keylen;
  894. u8 *key = ctx->key;
  895. dev_dbg(device_data->dev, "%s: keylen: %d\n",
  896. __func__, ctx->keylen);
  897. hash_hw_write_key(device_data, key, keylen);
  898. }
  899. hash_get_digest(device_data, digest, ctx->config.algorithm);
  900. memcpy(req->result, digest, ctx->digestsize);
  901. out:
  902. release_hash_device(device_data);
  903. /**
  904. * Allocated in setkey, and only used in HMAC.
  905. */
  906. kfree(ctx->key);
  907. return ret;
  908. }
  909. /**
  910. * hash_hw_update - Updates current HASH computation hashing another part of
  911. * the message.
  912. * @req: Byte array containing the message to be hashed (caller
  913. * allocated).
  914. */
  915. int hash_hw_update(struct ahash_request *req)
  916. {
  917. int ret = 0;
  918. u8 index = 0;
  919. u8 *buffer;
  920. struct hash_device_data *device_data;
  921. u8 *data_buffer;
  922. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  923. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  924. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  925. struct crypto_hash_walk walk;
  926. int msg_length = crypto_hash_walk_first(req, &walk);
  927. /* Empty message ("") is correct indata */
  928. if (msg_length == 0)
  929. return ret;
  930. index = req_ctx->state.index;
  931. buffer = (u8 *)req_ctx->state.buffer;
  932. /* Check if ctx->state.length + msg_length
  933. overflows */
  934. if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
  935. HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
  936. pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
  937. return -EPERM;
  938. }
  939. ret = hash_get_device_data(ctx, &device_data);
  940. if (ret)
  941. return ret;
  942. /* Main loop */
  943. while (0 != msg_length) {
  944. data_buffer = walk.data;
  945. ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
  946. data_buffer, buffer, &index);
  947. if (ret) {
  948. dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
  949. __func__);
  950. goto out;
  951. }
  952. msg_length = crypto_hash_walk_done(&walk, 0);
  953. }
  954. req_ctx->state.index = index;
  955. dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
  956. __func__, req_ctx->state.index, req_ctx->state.bit_index);
  957. out:
  958. release_hash_device(device_data);
  959. return ret;
  960. }
  961. /**
  962. * hash_resume_state - Function that resumes the state of an calculation.
  963. * @device_data: Pointer to the device structure.
  964. * @device_state: The state to be restored in the hash hardware
  965. */
  966. int hash_resume_state(struct hash_device_data *device_data,
  967. const struct hash_state *device_state)
  968. {
  969. u32 temp_cr;
  970. s32 count;
  971. int hash_mode = HASH_OPER_MODE_HASH;
  972. if (NULL == device_state) {
  973. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  974. __func__);
  975. return -EPERM;
  976. }
  977. /* Check correctness of index and length members */
  978. if (device_state->index > HASH_BLOCK_SIZE ||
  979. (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
  980. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  981. __func__);
  982. return -EPERM;
  983. }
  984. /*
  985. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  986. * prepare the initialize the HASH accelerator to compute the message
  987. * digest of a new message.
  988. */
  989. HASH_INITIALIZE;
  990. temp_cr = device_state->temp_cr;
  991. writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
  992. if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
  993. hash_mode = HASH_OPER_MODE_HMAC;
  994. else
  995. hash_mode = HASH_OPER_MODE_HASH;
  996. for (count = 0; count < HASH_CSR_COUNT; count++) {
  997. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  998. break;
  999. writel_relaxed(device_state->csr[count],
  1000. &device_data->base->csrx[count]);
  1001. }
  1002. writel_relaxed(device_state->csfull, &device_data->base->csfull);
  1003. writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
  1004. writel_relaxed(device_state->str_reg, &device_data->base->str);
  1005. writel_relaxed(temp_cr, &device_data->base->cr);
  1006. return 0;
  1007. }
  1008. /**
  1009. * hash_save_state - Function that saves the state of hardware.
  1010. * @device_data: Pointer to the device structure.
  1011. * @device_state: The strucure where the hardware state should be saved.
  1012. */
  1013. int hash_save_state(struct hash_device_data *device_data,
  1014. struct hash_state *device_state)
  1015. {
  1016. u32 temp_cr;
  1017. u32 count;
  1018. int hash_mode = HASH_OPER_MODE_HASH;
  1019. if (NULL == device_state) {
  1020. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  1021. __func__);
  1022. return -ENOTSUPP;
  1023. }
  1024. /* Write dummy value to force digest intermediate calculation. This
  1025. * actually makes sure that there isn't any ongoing calculation in the
  1026. * hardware.
  1027. */
  1028. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  1029. cpu_relax();
  1030. temp_cr = readl_relaxed(&device_data->base->cr);
  1031. device_state->str_reg = readl_relaxed(&device_data->base->str);
  1032. device_state->din_reg = readl_relaxed(&device_data->base->din);
  1033. if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
  1034. hash_mode = HASH_OPER_MODE_HMAC;
  1035. else
  1036. hash_mode = HASH_OPER_MODE_HASH;
  1037. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1038. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1039. break;
  1040. device_state->csr[count] =
  1041. readl_relaxed(&device_data->base->csrx[count]);
  1042. }
  1043. device_state->csfull = readl_relaxed(&device_data->base->csfull);
  1044. device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
  1045. device_state->temp_cr = temp_cr;
  1046. return 0;
  1047. }
  1048. /**
  1049. * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
  1050. * @device_data:
  1051. *
  1052. */
  1053. int hash_check_hw(struct hash_device_data *device_data)
  1054. {
  1055. /* Checking Peripheral Ids */
  1056. if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
  1057. HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
  1058. HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
  1059. HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
  1060. HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
  1061. HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
  1062. HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
  1063. HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
  1064. return 0;
  1065. }
  1066. dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
  1067. return -ENOTSUPP;
  1068. }
  1069. /**
  1070. * hash_get_digest - Gets the digest.
  1071. * @device_data: Pointer to the device structure.
  1072. * @digest: User allocated byte array for the calculated digest.
  1073. * @algorithm: The algorithm in use.
  1074. */
  1075. void hash_get_digest(struct hash_device_data *device_data,
  1076. u8 *digest, int algorithm)
  1077. {
  1078. u32 temp_hx_val, count;
  1079. int loop_ctr;
  1080. if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
  1081. dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
  1082. __func__, algorithm);
  1083. return;
  1084. }
  1085. if (algorithm == HASH_ALGO_SHA1)
  1086. loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
  1087. else
  1088. loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
  1089. dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
  1090. __func__, (u32) digest);
  1091. /* Copy result into digest array */
  1092. for (count = 0; count < loop_ctr; count++) {
  1093. temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
  1094. digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
  1095. digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
  1096. digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
  1097. digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
  1098. }
  1099. }
  1100. /**
  1101. * hash_update - The hash update function for SHA1/SHA2 (SHA256).
  1102. * @req: The hash request for the job.
  1103. */
  1104. static int ahash_update(struct ahash_request *req)
  1105. {
  1106. int ret = 0;
  1107. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1108. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
  1109. ret = hash_hw_update(req);
  1110. /* Skip update for DMA, all data will be passed to DMA in final */
  1111. if (ret) {
  1112. pr_err("%s: hash_hw_update() failed!\n", __func__);
  1113. }
  1114. return ret;
  1115. }
  1116. /**
  1117. * hash_final - The hash final function for SHA1/SHA2 (SHA256).
  1118. * @req: The hash request for the job.
  1119. */
  1120. static int ahash_final(struct ahash_request *req)
  1121. {
  1122. int ret = 0;
  1123. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1124. pr_debug("%s: data size: %d\n", __func__, req->nbytes);
  1125. if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
  1126. ret = hash_dma_final(req);
  1127. else
  1128. ret = hash_hw_final(req);
  1129. if (ret) {
  1130. pr_err("%s: hash_hw/dma_final() failed\n", __func__);
  1131. }
  1132. return ret;
  1133. }
  1134. static int hash_setkey(struct crypto_ahash *tfm,
  1135. const u8 *key, unsigned int keylen, int alg)
  1136. {
  1137. int ret = 0;
  1138. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1139. /**
  1140. * Freed in final.
  1141. */
  1142. ctx->key = kmemdup(key, keylen, GFP_KERNEL);
  1143. if (!ctx->key) {
  1144. pr_err("%s: Failed to allocate ctx->key for %d\n",
  1145. __func__, alg);
  1146. return -ENOMEM;
  1147. }
  1148. ctx->keylen = keylen;
  1149. return ret;
  1150. }
  1151. static int ahash_sha1_init(struct ahash_request *req)
  1152. {
  1153. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1154. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1155. ctx->config.data_format = HASH_DATA_8_BITS;
  1156. ctx->config.algorithm = HASH_ALGO_SHA1;
  1157. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1158. ctx->digestsize = SHA1_DIGEST_SIZE;
  1159. return hash_init(req);
  1160. }
  1161. static int ahash_sha256_init(struct ahash_request *req)
  1162. {
  1163. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1164. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1165. ctx->config.data_format = HASH_DATA_8_BITS;
  1166. ctx->config.algorithm = HASH_ALGO_SHA256;
  1167. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1168. ctx->digestsize = SHA256_DIGEST_SIZE;
  1169. return hash_init(req);
  1170. }
  1171. static int ahash_sha1_digest(struct ahash_request *req)
  1172. {
  1173. int ret2, ret1;
  1174. ret1 = ahash_sha1_init(req);
  1175. if (ret1)
  1176. goto out;
  1177. ret1 = ahash_update(req);
  1178. ret2 = ahash_final(req);
  1179. out:
  1180. return ret1 ? ret1 : ret2;
  1181. }
  1182. static int ahash_sha256_digest(struct ahash_request *req)
  1183. {
  1184. int ret2, ret1;
  1185. ret1 = ahash_sha256_init(req);
  1186. if (ret1)
  1187. goto out;
  1188. ret1 = ahash_update(req);
  1189. ret2 = ahash_final(req);
  1190. out:
  1191. return ret1 ? ret1 : ret2;
  1192. }
  1193. static int ahash_noimport(struct ahash_request *req, const void *in)
  1194. {
  1195. return -ENOSYS;
  1196. }
  1197. static int ahash_noexport(struct ahash_request *req, void *out)
  1198. {
  1199. return -ENOSYS;
  1200. }
  1201. static int hmac_sha1_init(struct ahash_request *req)
  1202. {
  1203. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1204. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1205. ctx->config.data_format = HASH_DATA_8_BITS;
  1206. ctx->config.algorithm = HASH_ALGO_SHA1;
  1207. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1208. ctx->digestsize = SHA1_DIGEST_SIZE;
  1209. return hash_init(req);
  1210. }
  1211. static int hmac_sha256_init(struct ahash_request *req)
  1212. {
  1213. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1214. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1215. ctx->config.data_format = HASH_DATA_8_BITS;
  1216. ctx->config.algorithm = HASH_ALGO_SHA256;
  1217. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1218. ctx->digestsize = SHA256_DIGEST_SIZE;
  1219. return hash_init(req);
  1220. }
  1221. static int hmac_sha1_digest(struct ahash_request *req)
  1222. {
  1223. int ret2, ret1;
  1224. ret1 = hmac_sha1_init(req);
  1225. if (ret1)
  1226. goto out;
  1227. ret1 = ahash_update(req);
  1228. ret2 = ahash_final(req);
  1229. out:
  1230. return ret1 ? ret1 : ret2;
  1231. }
  1232. static int hmac_sha256_digest(struct ahash_request *req)
  1233. {
  1234. int ret2, ret1;
  1235. ret1 = hmac_sha256_init(req);
  1236. if (ret1)
  1237. goto out;
  1238. ret1 = ahash_update(req);
  1239. ret2 = ahash_final(req);
  1240. out:
  1241. return ret1 ? ret1 : ret2;
  1242. }
  1243. static int hmac_sha1_setkey(struct crypto_ahash *tfm,
  1244. const u8 *key, unsigned int keylen)
  1245. {
  1246. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
  1247. }
  1248. static int hmac_sha256_setkey(struct crypto_ahash *tfm,
  1249. const u8 *key, unsigned int keylen)
  1250. {
  1251. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
  1252. }
  1253. struct hash_algo_template {
  1254. struct hash_config conf;
  1255. struct ahash_alg hash;
  1256. };
  1257. static int hash_cra_init(struct crypto_tfm *tfm)
  1258. {
  1259. struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
  1260. struct crypto_alg *alg = tfm->__crt_alg;
  1261. struct hash_algo_template *hash_alg;
  1262. hash_alg = container_of(__crypto_ahash_alg(alg),
  1263. struct hash_algo_template,
  1264. hash);
  1265. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1266. sizeof(struct hash_req_ctx));
  1267. ctx->config.data_format = HASH_DATA_8_BITS;
  1268. ctx->config.algorithm = hash_alg->conf.algorithm;
  1269. ctx->config.oper_mode = hash_alg->conf.oper_mode;
  1270. ctx->digestsize = hash_alg->hash.halg.digestsize;
  1271. return 0;
  1272. }
  1273. static struct hash_algo_template hash_algs[] = {
  1274. {
  1275. .conf.algorithm = HASH_ALGO_SHA1,
  1276. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1277. .hash = {
  1278. .init = hash_init,
  1279. .update = ahash_update,
  1280. .final = ahash_final,
  1281. .digest = ahash_sha1_digest,
  1282. .export = ahash_noexport,
  1283. .import = ahash_noimport,
  1284. .halg.digestsize = SHA1_DIGEST_SIZE,
  1285. .halg.statesize = sizeof(struct hash_ctx),
  1286. .halg.base = {
  1287. .cra_name = "sha1",
  1288. .cra_driver_name = "sha1-ux500",
  1289. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1290. CRYPTO_ALG_ASYNC),
  1291. .cra_blocksize = SHA1_BLOCK_SIZE,
  1292. .cra_ctxsize = sizeof(struct hash_ctx),
  1293. .cra_init = hash_cra_init,
  1294. .cra_module = THIS_MODULE,
  1295. }
  1296. }
  1297. },
  1298. {
  1299. .conf.algorithm = HASH_ALGO_SHA256,
  1300. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1301. .hash = {
  1302. .init = hash_init,
  1303. .update = ahash_update,
  1304. .final = ahash_final,
  1305. .digest = ahash_sha256_digest,
  1306. .export = ahash_noexport,
  1307. .import = ahash_noimport,
  1308. .halg.digestsize = SHA256_DIGEST_SIZE,
  1309. .halg.statesize = sizeof(struct hash_ctx),
  1310. .halg.base = {
  1311. .cra_name = "sha256",
  1312. .cra_driver_name = "sha256-ux500",
  1313. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1314. CRYPTO_ALG_ASYNC),
  1315. .cra_blocksize = SHA256_BLOCK_SIZE,
  1316. .cra_ctxsize = sizeof(struct hash_ctx),
  1317. .cra_type = &crypto_ahash_type,
  1318. .cra_init = hash_cra_init,
  1319. .cra_module = THIS_MODULE,
  1320. }
  1321. }
  1322. },
  1323. {
  1324. .conf.algorithm = HASH_ALGO_SHA1,
  1325. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1326. .hash = {
  1327. .init = hash_init,
  1328. .update = ahash_update,
  1329. .final = ahash_final,
  1330. .digest = hmac_sha1_digest,
  1331. .setkey = hmac_sha1_setkey,
  1332. .export = ahash_noexport,
  1333. .import = ahash_noimport,
  1334. .halg.digestsize = SHA1_DIGEST_SIZE,
  1335. .halg.statesize = sizeof(struct hash_ctx),
  1336. .halg.base = {
  1337. .cra_name = "hmac(sha1)",
  1338. .cra_driver_name = "hmac-sha1-ux500",
  1339. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1340. CRYPTO_ALG_ASYNC),
  1341. .cra_blocksize = SHA1_BLOCK_SIZE,
  1342. .cra_ctxsize = sizeof(struct hash_ctx),
  1343. .cra_type = &crypto_ahash_type,
  1344. .cra_init = hash_cra_init,
  1345. .cra_module = THIS_MODULE,
  1346. }
  1347. }
  1348. },
  1349. {
  1350. .conf.algorithm = HASH_ALGO_SHA256,
  1351. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1352. .hash = {
  1353. .init = hash_init,
  1354. .update = ahash_update,
  1355. .final = ahash_final,
  1356. .digest = hmac_sha256_digest,
  1357. .setkey = hmac_sha256_setkey,
  1358. .export = ahash_noexport,
  1359. .import = ahash_noimport,
  1360. .halg.digestsize = SHA256_DIGEST_SIZE,
  1361. .halg.statesize = sizeof(struct hash_ctx),
  1362. .halg.base = {
  1363. .cra_name = "hmac(sha256)",
  1364. .cra_driver_name = "hmac-sha256-ux500",
  1365. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1366. CRYPTO_ALG_ASYNC),
  1367. .cra_blocksize = SHA256_BLOCK_SIZE,
  1368. .cra_ctxsize = sizeof(struct hash_ctx),
  1369. .cra_type = &crypto_ahash_type,
  1370. .cra_init = hash_cra_init,
  1371. .cra_module = THIS_MODULE,
  1372. }
  1373. }
  1374. }
  1375. };
  1376. /**
  1377. * hash_algs_register_all -
  1378. */
  1379. static int ahash_algs_register_all(struct hash_device_data *device_data)
  1380. {
  1381. int ret;
  1382. int i;
  1383. int count;
  1384. for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
  1385. ret = crypto_register_ahash(&hash_algs[i].hash);
  1386. if (ret) {
  1387. count = i;
  1388. dev_err(device_data->dev, "%s: alg registration failed\n",
  1389. hash_algs[i].hash.halg.base.cra_driver_name);
  1390. goto unreg;
  1391. }
  1392. }
  1393. return 0;
  1394. unreg:
  1395. for (i = 0; i < count; i++)
  1396. crypto_unregister_ahash(&hash_algs[i].hash);
  1397. return ret;
  1398. }
  1399. /**
  1400. * hash_algs_unregister_all -
  1401. */
  1402. static void ahash_algs_unregister_all(struct hash_device_data *device_data)
  1403. {
  1404. int i;
  1405. for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
  1406. crypto_unregister_ahash(&hash_algs[i].hash);
  1407. }
  1408. /**
  1409. * ux500_hash_probe - Function that probes the hash hardware.
  1410. * @pdev: The platform device.
  1411. */
  1412. static int ux500_hash_probe(struct platform_device *pdev)
  1413. {
  1414. int ret = 0;
  1415. struct resource *res = NULL;
  1416. struct hash_device_data *device_data;
  1417. struct device *dev = &pdev->dev;
  1418. device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
  1419. if (!device_data) {
  1420. ret = -ENOMEM;
  1421. goto out;
  1422. }
  1423. device_data->dev = dev;
  1424. device_data->current_ctx = NULL;
  1425. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1426. if (!res) {
  1427. dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
  1428. ret = -ENODEV;
  1429. goto out;
  1430. }
  1431. device_data->phybase = res->start;
  1432. device_data->base = devm_ioremap_resource(dev, res);
  1433. if (IS_ERR(device_data->base)) {
  1434. dev_err(dev, "%s: ioremap() failed!\n", __func__);
  1435. ret = PTR_ERR(device_data->base);
  1436. goto out;
  1437. }
  1438. spin_lock_init(&device_data->ctx_lock);
  1439. spin_lock_init(&device_data->power_state_lock);
  1440. /* Enable power for HASH1 hardware block */
  1441. device_data->regulator = regulator_get(dev, "v-ape");
  1442. if (IS_ERR(device_data->regulator)) {
  1443. dev_err(dev, "%s: regulator_get() failed!\n", __func__);
  1444. ret = PTR_ERR(device_data->regulator);
  1445. device_data->regulator = NULL;
  1446. goto out;
  1447. }
  1448. /* Enable the clock for HASH1 hardware block */
  1449. device_data->clk = devm_clk_get(dev, NULL);
  1450. if (IS_ERR(device_data->clk)) {
  1451. dev_err(dev, "%s: clk_get() failed!\n", __func__);
  1452. ret = PTR_ERR(device_data->clk);
  1453. goto out_regulator;
  1454. }
  1455. ret = clk_prepare(device_data->clk);
  1456. if (ret) {
  1457. dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
  1458. goto out_regulator;
  1459. }
  1460. /* Enable device power (and clock) */
  1461. ret = hash_enable_power(device_data, false);
  1462. if (ret) {
  1463. dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
  1464. goto out_clk_unprepare;
  1465. }
  1466. ret = hash_check_hw(device_data);
  1467. if (ret) {
  1468. dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
  1469. goto out_power;
  1470. }
  1471. if (hash_mode == HASH_MODE_DMA)
  1472. hash_dma_setup_channel(device_data, dev);
  1473. platform_set_drvdata(pdev, device_data);
  1474. /* Put the new device into the device list... */
  1475. klist_add_tail(&device_data->list_node, &driver_data.device_list);
  1476. /* ... and signal that a new device is available. */
  1477. up(&driver_data.device_allocation);
  1478. ret = ahash_algs_register_all(device_data);
  1479. if (ret) {
  1480. dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
  1481. __func__);
  1482. goto out_power;
  1483. }
  1484. dev_info(dev, "successfully registered\n");
  1485. return 0;
  1486. out_power:
  1487. hash_disable_power(device_data, false);
  1488. out_clk_unprepare:
  1489. clk_unprepare(device_data->clk);
  1490. out_regulator:
  1491. regulator_put(device_data->regulator);
  1492. out:
  1493. return ret;
  1494. }
  1495. /**
  1496. * ux500_hash_remove - Function that removes the hash device from the platform.
  1497. * @pdev: The platform device.
  1498. */
  1499. static int ux500_hash_remove(struct platform_device *pdev)
  1500. {
  1501. struct hash_device_data *device_data;
  1502. struct device *dev = &pdev->dev;
  1503. device_data = platform_get_drvdata(pdev);
  1504. if (!device_data) {
  1505. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1506. return -ENOMEM;
  1507. }
  1508. /* Try to decrease the number of available devices. */
  1509. if (down_trylock(&driver_data.device_allocation))
  1510. return -EBUSY;
  1511. /* Check that the device is free */
  1512. spin_lock(&device_data->ctx_lock);
  1513. /* current_ctx allocates a device, NULL = unallocated */
  1514. if (device_data->current_ctx) {
  1515. /* The device is busy */
  1516. spin_unlock(&device_data->ctx_lock);
  1517. /* Return the device to the pool. */
  1518. up(&driver_data.device_allocation);
  1519. return -EBUSY;
  1520. }
  1521. spin_unlock(&device_data->ctx_lock);
  1522. /* Remove the device from the list */
  1523. if (klist_node_attached(&device_data->list_node))
  1524. klist_remove(&device_data->list_node);
  1525. /* If this was the last device, remove the services */
  1526. if (list_empty(&driver_data.device_list.k_list))
  1527. ahash_algs_unregister_all(device_data);
  1528. if (hash_disable_power(device_data, false))
  1529. dev_err(dev, "%s: hash_disable_power() failed\n",
  1530. __func__);
  1531. clk_unprepare(device_data->clk);
  1532. regulator_put(device_data->regulator);
  1533. return 0;
  1534. }
  1535. /**
  1536. * ux500_hash_shutdown - Function that shutdown the hash device.
  1537. * @pdev: The platform device
  1538. */
  1539. static void ux500_hash_shutdown(struct platform_device *pdev)
  1540. {
  1541. struct hash_device_data *device_data;
  1542. device_data = platform_get_drvdata(pdev);
  1543. if (!device_data) {
  1544. dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
  1545. __func__);
  1546. return;
  1547. }
  1548. /* Check that the device is free */
  1549. spin_lock(&device_data->ctx_lock);
  1550. /* current_ctx allocates a device, NULL = unallocated */
  1551. if (!device_data->current_ctx) {
  1552. if (down_trylock(&driver_data.device_allocation))
  1553. dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
  1554. __func__);
  1555. /**
  1556. * (Allocate the device)
  1557. * Need to set this to non-null (dummy) value,
  1558. * to avoid usage if context switching.
  1559. */
  1560. device_data->current_ctx++;
  1561. }
  1562. spin_unlock(&device_data->ctx_lock);
  1563. /* Remove the device from the list */
  1564. if (klist_node_attached(&device_data->list_node))
  1565. klist_remove(&device_data->list_node);
  1566. /* If this was the last device, remove the services */
  1567. if (list_empty(&driver_data.device_list.k_list))
  1568. ahash_algs_unregister_all(device_data);
  1569. if (hash_disable_power(device_data, false))
  1570. dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
  1571. __func__);
  1572. }
  1573. #ifdef CONFIG_PM_SLEEP
  1574. /**
  1575. * ux500_hash_suspend - Function that suspends the hash device.
  1576. * @dev: Device to suspend.
  1577. */
  1578. static int ux500_hash_suspend(struct device *dev)
  1579. {
  1580. int ret;
  1581. struct hash_device_data *device_data;
  1582. struct hash_ctx *temp_ctx = NULL;
  1583. device_data = dev_get_drvdata(dev);
  1584. if (!device_data) {
  1585. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1586. return -ENOMEM;
  1587. }
  1588. spin_lock(&device_data->ctx_lock);
  1589. if (!device_data->current_ctx)
  1590. device_data->current_ctx++;
  1591. spin_unlock(&device_data->ctx_lock);
  1592. if (device_data->current_ctx == ++temp_ctx) {
  1593. if (down_interruptible(&driver_data.device_allocation))
  1594. dev_dbg(dev, "%s: down_interruptible() failed\n",
  1595. __func__);
  1596. ret = hash_disable_power(device_data, false);
  1597. } else {
  1598. ret = hash_disable_power(device_data, true);
  1599. }
  1600. if (ret)
  1601. dev_err(dev, "%s: hash_disable_power()\n", __func__);
  1602. return ret;
  1603. }
  1604. /**
  1605. * ux500_hash_resume - Function that resume the hash device.
  1606. * @dev: Device to resume.
  1607. */
  1608. static int ux500_hash_resume(struct device *dev)
  1609. {
  1610. int ret = 0;
  1611. struct hash_device_data *device_data;
  1612. struct hash_ctx *temp_ctx = NULL;
  1613. device_data = dev_get_drvdata(dev);
  1614. if (!device_data) {
  1615. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1616. return -ENOMEM;
  1617. }
  1618. spin_lock(&device_data->ctx_lock);
  1619. if (device_data->current_ctx == ++temp_ctx)
  1620. device_data->current_ctx = NULL;
  1621. spin_unlock(&device_data->ctx_lock);
  1622. if (!device_data->current_ctx)
  1623. up(&driver_data.device_allocation);
  1624. else
  1625. ret = hash_enable_power(device_data, true);
  1626. if (ret)
  1627. dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
  1628. return ret;
  1629. }
  1630. #endif
  1631. static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
  1632. static const struct of_device_id ux500_hash_match[] = {
  1633. { .compatible = "stericsson,ux500-hash" },
  1634. { },
  1635. };
  1636. MODULE_DEVICE_TABLE(of, ux500_hash_match);
  1637. static struct platform_driver hash_driver = {
  1638. .probe = ux500_hash_probe,
  1639. .remove = ux500_hash_remove,
  1640. .shutdown = ux500_hash_shutdown,
  1641. .driver = {
  1642. .name = "hash1",
  1643. .of_match_table = ux500_hash_match,
  1644. .pm = &ux500_hash_pm,
  1645. }
  1646. };
  1647. /**
  1648. * ux500_hash_mod_init - The kernel module init function.
  1649. */
  1650. static int __init ux500_hash_mod_init(void)
  1651. {
  1652. klist_init(&driver_data.device_list, NULL, NULL);
  1653. /* Initialize the semaphore to 0 devices (locked state) */
  1654. sema_init(&driver_data.device_allocation, 0);
  1655. return platform_driver_register(&hash_driver);
  1656. }
  1657. /**
  1658. * ux500_hash_mod_fini - The kernel module exit function.
  1659. */
  1660. static void __exit ux500_hash_mod_fini(void)
  1661. {
  1662. platform_driver_unregister(&hash_driver);
  1663. }
  1664. module_init(ux500_hash_mod_init);
  1665. module_exit(ux500_hash_mod_fini);
  1666. MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
  1667. MODULE_LICENSE("GPL");
  1668. MODULE_ALIAS_CRYPTO("sha1-all");
  1669. MODULE_ALIAS_CRYPTO("sha256-all");
  1670. MODULE_ALIAS_CRYPTO("hmac-sha1-all");
  1671. MODULE_ALIAS_CRYPTO("hmac-sha256-all");