mxs-dcp.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100
  1. /*
  2. * Freescale i.MX23/i.MX28 Data Co-Processor driver
  3. *
  4. * Copyright (C) 2013 Marek Vasut <marex@denx.de>
  5. *
  6. * The code contained herein is licensed under the GNU General Public
  7. * License. You may obtain a copy of the GNU General Public License
  8. * Version 2 or later at the following locations:
  9. *
  10. * http://www.opensource.org/licenses/gpl-license.html
  11. * http://www.gnu.org/copyleft/gpl.html
  12. */
  13. #include <linux/crypto.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/io.h>
  17. #include <linux/kernel.h>
  18. #include <linux/kthread.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/stmp_device.h>
  23. #include <crypto/aes.h>
  24. #include <crypto/sha.h>
  25. #include <crypto/internal/hash.h>
  26. #define DCP_MAX_CHANS 4
  27. #define DCP_BUF_SZ PAGE_SIZE
  28. /* DCP DMA descriptor. */
  29. struct dcp_dma_desc {
  30. uint32_t next_cmd_addr;
  31. uint32_t control0;
  32. uint32_t control1;
  33. uint32_t source;
  34. uint32_t destination;
  35. uint32_t size;
  36. uint32_t payload;
  37. uint32_t status;
  38. };
  39. /* Coherent aligned block for bounce buffering. */
  40. struct dcp_coherent_block {
  41. uint8_t aes_in_buf[DCP_BUF_SZ];
  42. uint8_t aes_out_buf[DCP_BUF_SZ];
  43. uint8_t sha_in_buf[DCP_BUF_SZ];
  44. uint8_t aes_key[2 * AES_KEYSIZE_128];
  45. uint8_t sha_digest[SHA256_DIGEST_SIZE];
  46. struct dcp_dma_desc desc[DCP_MAX_CHANS];
  47. };
  48. struct dcp {
  49. struct device *dev;
  50. void __iomem *base;
  51. uint32_t caps;
  52. struct dcp_coherent_block *coh;
  53. struct completion completion[DCP_MAX_CHANS];
  54. struct mutex mutex[DCP_MAX_CHANS];
  55. struct task_struct *thread[DCP_MAX_CHANS];
  56. struct crypto_queue queue[DCP_MAX_CHANS];
  57. };
  58. enum dcp_chan {
  59. DCP_CHAN_HASH_SHA = 0,
  60. DCP_CHAN_CRYPTO = 2,
  61. };
  62. struct dcp_async_ctx {
  63. /* Common context */
  64. enum dcp_chan chan;
  65. uint32_t fill;
  66. /* SHA Hash-specific context */
  67. struct mutex mutex;
  68. uint32_t alg;
  69. unsigned int hot:1;
  70. /* Crypto-specific context */
  71. unsigned int enc:1;
  72. unsigned int ecb:1;
  73. struct crypto_ablkcipher *fallback;
  74. unsigned int key_len;
  75. uint8_t key[AES_KEYSIZE_128];
  76. };
  77. struct dcp_sha_req_ctx {
  78. unsigned int init:1;
  79. unsigned int fini:1;
  80. };
  81. /*
  82. * There can even be only one instance of the MXS DCP due to the
  83. * design of Linux Crypto API.
  84. */
  85. static struct dcp *global_sdcp;
  86. static DEFINE_MUTEX(global_mutex);
  87. /* DCP register layout. */
  88. #define MXS_DCP_CTRL 0x00
  89. #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
  90. #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
  91. #define MXS_DCP_STAT 0x10
  92. #define MXS_DCP_STAT_CLR 0x18
  93. #define MXS_DCP_STAT_IRQ_MASK 0xf
  94. #define MXS_DCP_CHANNELCTRL 0x20
  95. #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
  96. #define MXS_DCP_CAPABILITY1 0x40
  97. #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
  98. #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
  99. #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
  100. #define MXS_DCP_CONTEXT 0x50
  101. #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
  102. #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
  103. #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
  104. #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
  105. /* DMA descriptor bits. */
  106. #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
  107. #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
  108. #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
  109. #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
  110. #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
  111. #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
  112. #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
  113. #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
  114. #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
  115. #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
  116. #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
  117. #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
  118. #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
  119. #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
  120. static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
  121. {
  122. struct dcp *sdcp = global_sdcp;
  123. const int chan = actx->chan;
  124. uint32_t stat;
  125. int ret;
  126. struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
  127. dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
  128. DMA_TO_DEVICE);
  129. reinit_completion(&sdcp->completion[chan]);
  130. /* Clear status register. */
  131. writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
  132. /* Load the DMA descriptor. */
  133. writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
  134. /* Increment the semaphore to start the DMA transfer. */
  135. writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
  136. ret = wait_for_completion_timeout(&sdcp->completion[chan],
  137. msecs_to_jiffies(1000));
  138. if (!ret) {
  139. dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
  140. chan, readl(sdcp->base + MXS_DCP_STAT));
  141. return -ETIMEDOUT;
  142. }
  143. stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
  144. if (stat & 0xff) {
  145. dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
  146. chan, stat);
  147. return -EINVAL;
  148. }
  149. dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
  150. return 0;
  151. }
  152. /*
  153. * Encryption (AES128)
  154. */
  155. static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init)
  156. {
  157. struct dcp *sdcp = global_sdcp;
  158. struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
  159. int ret;
  160. dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
  161. 2 * AES_KEYSIZE_128,
  162. DMA_TO_DEVICE);
  163. dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
  164. DCP_BUF_SZ, DMA_TO_DEVICE);
  165. dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
  166. DCP_BUF_SZ, DMA_FROM_DEVICE);
  167. /* Fill in the DMA descriptor. */
  168. desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
  169. MXS_DCP_CONTROL0_INTERRUPT |
  170. MXS_DCP_CONTROL0_ENABLE_CIPHER;
  171. /* Payload contains the key. */
  172. desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
  173. if (actx->enc)
  174. desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
  175. if (init)
  176. desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
  177. desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
  178. if (actx->ecb)
  179. desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
  180. else
  181. desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
  182. desc->next_cmd_addr = 0;
  183. desc->source = src_phys;
  184. desc->destination = dst_phys;
  185. desc->size = actx->fill;
  186. desc->payload = key_phys;
  187. desc->status = 0;
  188. ret = mxs_dcp_start_dma(actx);
  189. dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
  190. DMA_TO_DEVICE);
  191. dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
  192. dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
  193. return ret;
  194. }
  195. static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
  196. {
  197. struct dcp *sdcp = global_sdcp;
  198. struct ablkcipher_request *req = ablkcipher_request_cast(arq);
  199. struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
  200. struct scatterlist *dst = req->dst;
  201. struct scatterlist *src = req->src;
  202. const int nents = sg_nents(req->src);
  203. const int out_off = DCP_BUF_SZ;
  204. uint8_t *in_buf = sdcp->coh->aes_in_buf;
  205. uint8_t *out_buf = sdcp->coh->aes_out_buf;
  206. uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
  207. uint32_t dst_off = 0;
  208. uint8_t *key = sdcp->coh->aes_key;
  209. int ret = 0;
  210. int split = 0;
  211. unsigned int i, len, clen, rem = 0;
  212. int init = 0;
  213. actx->fill = 0;
  214. /* Copy the key from the temporary location. */
  215. memcpy(key, actx->key, actx->key_len);
  216. if (!actx->ecb) {
  217. /* Copy the CBC IV just past the key. */
  218. memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
  219. /* CBC needs the INIT set. */
  220. init = 1;
  221. } else {
  222. memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
  223. }
  224. for_each_sg(req->src, src, nents, i) {
  225. src_buf = sg_virt(src);
  226. len = sg_dma_len(src);
  227. do {
  228. if (actx->fill + len > out_off)
  229. clen = out_off - actx->fill;
  230. else
  231. clen = len;
  232. memcpy(in_buf + actx->fill, src_buf, clen);
  233. len -= clen;
  234. src_buf += clen;
  235. actx->fill += clen;
  236. /*
  237. * If we filled the buffer or this is the last SG,
  238. * submit the buffer.
  239. */
  240. if (actx->fill == out_off || sg_is_last(src)) {
  241. ret = mxs_dcp_run_aes(actx, init);
  242. if (ret)
  243. return ret;
  244. init = 0;
  245. out_tmp = out_buf;
  246. while (dst && actx->fill) {
  247. if (!split) {
  248. dst_buf = sg_virt(dst);
  249. dst_off = 0;
  250. }
  251. rem = min(sg_dma_len(dst) - dst_off,
  252. actx->fill);
  253. memcpy(dst_buf + dst_off, out_tmp, rem);
  254. out_tmp += rem;
  255. dst_off += rem;
  256. actx->fill -= rem;
  257. if (dst_off == sg_dma_len(dst)) {
  258. dst = sg_next(dst);
  259. split = 0;
  260. } else {
  261. split = 1;
  262. }
  263. }
  264. }
  265. } while (len);
  266. }
  267. return ret;
  268. }
  269. static int dcp_chan_thread_aes(void *data)
  270. {
  271. struct dcp *sdcp = global_sdcp;
  272. const int chan = DCP_CHAN_CRYPTO;
  273. struct crypto_async_request *backlog;
  274. struct crypto_async_request *arq;
  275. int ret;
  276. do {
  277. __set_current_state(TASK_INTERRUPTIBLE);
  278. mutex_lock(&sdcp->mutex[chan]);
  279. backlog = crypto_get_backlog(&sdcp->queue[chan]);
  280. arq = crypto_dequeue_request(&sdcp->queue[chan]);
  281. mutex_unlock(&sdcp->mutex[chan]);
  282. if (backlog)
  283. backlog->complete(backlog, -EINPROGRESS);
  284. if (arq) {
  285. ret = mxs_dcp_aes_block_crypt(arq);
  286. arq->complete(arq, ret);
  287. continue;
  288. }
  289. schedule();
  290. } while (!kthread_should_stop());
  291. return 0;
  292. }
  293. static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
  294. {
  295. struct crypto_tfm *tfm =
  296. crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
  297. struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(
  298. crypto_ablkcipher_reqtfm(req));
  299. int ret;
  300. ablkcipher_request_set_tfm(req, ctx->fallback);
  301. if (enc)
  302. ret = crypto_ablkcipher_encrypt(req);
  303. else
  304. ret = crypto_ablkcipher_decrypt(req);
  305. ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
  306. return ret;
  307. }
  308. static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
  309. {
  310. struct dcp *sdcp = global_sdcp;
  311. struct crypto_async_request *arq = &req->base;
  312. struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
  313. int ret;
  314. if (unlikely(actx->key_len != AES_KEYSIZE_128))
  315. return mxs_dcp_block_fallback(req, enc);
  316. actx->enc = enc;
  317. actx->ecb = ecb;
  318. actx->chan = DCP_CHAN_CRYPTO;
  319. mutex_lock(&sdcp->mutex[actx->chan]);
  320. ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
  321. mutex_unlock(&sdcp->mutex[actx->chan]);
  322. wake_up_process(sdcp->thread[actx->chan]);
  323. return -EINPROGRESS;
  324. }
  325. static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
  326. {
  327. return mxs_dcp_aes_enqueue(req, 0, 1);
  328. }
  329. static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
  330. {
  331. return mxs_dcp_aes_enqueue(req, 1, 1);
  332. }
  333. static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
  334. {
  335. return mxs_dcp_aes_enqueue(req, 0, 0);
  336. }
  337. static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
  338. {
  339. return mxs_dcp_aes_enqueue(req, 1, 0);
  340. }
  341. static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  342. unsigned int len)
  343. {
  344. struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
  345. unsigned int ret;
  346. /*
  347. * AES 128 is supposed by the hardware, store key into temporary
  348. * buffer and exit. We must use the temporary buffer here, since
  349. * there can still be an operation in progress.
  350. */
  351. actx->key_len = len;
  352. if (len == AES_KEYSIZE_128) {
  353. memcpy(actx->key, key, len);
  354. return 0;
  355. }
  356. /* Check if the key size is supported by kernel at all. */
  357. if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
  358. tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  359. return -EINVAL;
  360. }
  361. /*
  362. * If the requested AES key size is not supported by the hardware,
  363. * but is supported by in-kernel software implementation, we use
  364. * software fallback.
  365. */
  366. actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  367. actx->fallback->base.crt_flags |=
  368. tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK;
  369. ret = crypto_ablkcipher_setkey(actx->fallback, key, len);
  370. if (!ret)
  371. return 0;
  372. tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
  373. tfm->base.crt_flags |=
  374. actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK;
  375. return ret;
  376. }
  377. static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
  378. {
  379. const char *name = tfm->__crt_alg->cra_name;
  380. const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
  381. struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
  382. struct crypto_ablkcipher *blk;
  383. blk = crypto_alloc_ablkcipher(name, 0, flags);
  384. if (IS_ERR(blk))
  385. return PTR_ERR(blk);
  386. actx->fallback = blk;
  387. tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_async_ctx);
  388. return 0;
  389. }
  390. static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
  391. {
  392. struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
  393. crypto_free_ablkcipher(actx->fallback);
  394. actx->fallback = NULL;
  395. }
  396. /*
  397. * Hashing (SHA1/SHA256)
  398. */
  399. static int mxs_dcp_run_sha(struct ahash_request *req)
  400. {
  401. struct dcp *sdcp = global_sdcp;
  402. int ret;
  403. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  404. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  405. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  406. struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
  407. dma_addr_t digest_phys = dma_map_single(sdcp->dev,
  408. sdcp->coh->sha_digest,
  409. SHA256_DIGEST_SIZE,
  410. DMA_FROM_DEVICE);
  411. dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
  412. DCP_BUF_SZ, DMA_TO_DEVICE);
  413. /* Fill in the DMA descriptor. */
  414. desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
  415. MXS_DCP_CONTROL0_INTERRUPT |
  416. MXS_DCP_CONTROL0_ENABLE_HASH;
  417. if (rctx->init)
  418. desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
  419. desc->control1 = actx->alg;
  420. desc->next_cmd_addr = 0;
  421. desc->source = buf_phys;
  422. desc->destination = 0;
  423. desc->size = actx->fill;
  424. desc->payload = 0;
  425. desc->status = 0;
  426. /* Set HASH_TERM bit for last transfer block. */
  427. if (rctx->fini) {
  428. desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
  429. desc->payload = digest_phys;
  430. }
  431. ret = mxs_dcp_start_dma(actx);
  432. dma_unmap_single(sdcp->dev, digest_phys, SHA256_DIGEST_SIZE,
  433. DMA_FROM_DEVICE);
  434. dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
  435. return ret;
  436. }
  437. static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
  438. {
  439. struct dcp *sdcp = global_sdcp;
  440. struct ahash_request *req = ahash_request_cast(arq);
  441. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  442. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  443. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  444. struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
  445. const int nents = sg_nents(req->src);
  446. uint8_t *digest = sdcp->coh->sha_digest;
  447. uint8_t *in_buf = sdcp->coh->sha_in_buf;
  448. uint8_t *src_buf;
  449. struct scatterlist *src;
  450. unsigned int i, len, clen;
  451. int ret;
  452. int fin = rctx->fini;
  453. if (fin)
  454. rctx->fini = 0;
  455. for_each_sg(req->src, src, nents, i) {
  456. src_buf = sg_virt(src);
  457. len = sg_dma_len(src);
  458. do {
  459. if (actx->fill + len > DCP_BUF_SZ)
  460. clen = DCP_BUF_SZ - actx->fill;
  461. else
  462. clen = len;
  463. memcpy(in_buf + actx->fill, src_buf, clen);
  464. len -= clen;
  465. src_buf += clen;
  466. actx->fill += clen;
  467. /*
  468. * If we filled the buffer and still have some
  469. * more data, submit the buffer.
  470. */
  471. if (len && actx->fill == DCP_BUF_SZ) {
  472. ret = mxs_dcp_run_sha(req);
  473. if (ret)
  474. return ret;
  475. actx->fill = 0;
  476. rctx->init = 0;
  477. }
  478. } while (len);
  479. }
  480. if (fin) {
  481. rctx->fini = 1;
  482. /* Submit whatever is left. */
  483. ret = mxs_dcp_run_sha(req);
  484. if (ret || !req->result)
  485. return ret;
  486. actx->fill = 0;
  487. /* For some reason, the result is flipped. */
  488. for (i = 0; i < halg->digestsize; i++)
  489. req->result[i] = digest[halg->digestsize - i - 1];
  490. }
  491. return 0;
  492. }
  493. static int dcp_chan_thread_sha(void *data)
  494. {
  495. struct dcp *sdcp = global_sdcp;
  496. const int chan = DCP_CHAN_HASH_SHA;
  497. struct crypto_async_request *backlog;
  498. struct crypto_async_request *arq;
  499. struct dcp_sha_req_ctx *rctx;
  500. struct ahash_request *req;
  501. int ret, fini;
  502. do {
  503. __set_current_state(TASK_INTERRUPTIBLE);
  504. mutex_lock(&sdcp->mutex[chan]);
  505. backlog = crypto_get_backlog(&sdcp->queue[chan]);
  506. arq = crypto_dequeue_request(&sdcp->queue[chan]);
  507. mutex_unlock(&sdcp->mutex[chan]);
  508. if (backlog)
  509. backlog->complete(backlog, -EINPROGRESS);
  510. if (arq) {
  511. req = ahash_request_cast(arq);
  512. rctx = ahash_request_ctx(req);
  513. ret = dcp_sha_req_to_buf(arq);
  514. fini = rctx->fini;
  515. arq->complete(arq, ret);
  516. if (!fini)
  517. continue;
  518. }
  519. schedule();
  520. } while (!kthread_should_stop());
  521. return 0;
  522. }
  523. static int dcp_sha_init(struct ahash_request *req)
  524. {
  525. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  526. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  527. struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
  528. /*
  529. * Start hashing session. The code below only inits the
  530. * hashing session context, nothing more.
  531. */
  532. memset(actx, 0, sizeof(*actx));
  533. if (strcmp(halg->base.cra_name, "sha1") == 0)
  534. actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
  535. else
  536. actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
  537. actx->fill = 0;
  538. actx->hot = 0;
  539. actx->chan = DCP_CHAN_HASH_SHA;
  540. mutex_init(&actx->mutex);
  541. return 0;
  542. }
  543. static int dcp_sha_update_fx(struct ahash_request *req, int fini)
  544. {
  545. struct dcp *sdcp = global_sdcp;
  546. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  547. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  548. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  549. int ret;
  550. /*
  551. * Ignore requests that have no data in them and are not
  552. * the trailing requests in the stream of requests.
  553. */
  554. if (!req->nbytes && !fini)
  555. return 0;
  556. mutex_lock(&actx->mutex);
  557. rctx->fini = fini;
  558. if (!actx->hot) {
  559. actx->hot = 1;
  560. rctx->init = 1;
  561. }
  562. mutex_lock(&sdcp->mutex[actx->chan]);
  563. ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
  564. mutex_unlock(&sdcp->mutex[actx->chan]);
  565. wake_up_process(sdcp->thread[actx->chan]);
  566. mutex_unlock(&actx->mutex);
  567. return -EINPROGRESS;
  568. }
  569. static int dcp_sha_update(struct ahash_request *req)
  570. {
  571. return dcp_sha_update_fx(req, 0);
  572. }
  573. static int dcp_sha_final(struct ahash_request *req)
  574. {
  575. ahash_request_set_crypt(req, NULL, req->result, 0);
  576. req->nbytes = 0;
  577. return dcp_sha_update_fx(req, 1);
  578. }
  579. static int dcp_sha_finup(struct ahash_request *req)
  580. {
  581. return dcp_sha_update_fx(req, 1);
  582. }
  583. static int dcp_sha_digest(struct ahash_request *req)
  584. {
  585. int ret;
  586. ret = dcp_sha_init(req);
  587. if (ret)
  588. return ret;
  589. return dcp_sha_finup(req);
  590. }
  591. static int dcp_sha_cra_init(struct crypto_tfm *tfm)
  592. {
  593. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  594. sizeof(struct dcp_sha_req_ctx));
  595. return 0;
  596. }
  597. static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
  598. {
  599. }
  600. /* AES 128 ECB and AES 128 CBC */
  601. static struct crypto_alg dcp_aes_algs[] = {
  602. {
  603. .cra_name = "ecb(aes)",
  604. .cra_driver_name = "ecb-aes-dcp",
  605. .cra_priority = 400,
  606. .cra_alignmask = 15,
  607. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  608. CRYPTO_ALG_ASYNC |
  609. CRYPTO_ALG_NEED_FALLBACK,
  610. .cra_init = mxs_dcp_aes_fallback_init,
  611. .cra_exit = mxs_dcp_aes_fallback_exit,
  612. .cra_blocksize = AES_BLOCK_SIZE,
  613. .cra_ctxsize = sizeof(struct dcp_async_ctx),
  614. .cra_type = &crypto_ablkcipher_type,
  615. .cra_module = THIS_MODULE,
  616. .cra_u = {
  617. .ablkcipher = {
  618. .min_keysize = AES_MIN_KEY_SIZE,
  619. .max_keysize = AES_MAX_KEY_SIZE,
  620. .setkey = mxs_dcp_aes_setkey,
  621. .encrypt = mxs_dcp_aes_ecb_encrypt,
  622. .decrypt = mxs_dcp_aes_ecb_decrypt
  623. },
  624. },
  625. }, {
  626. .cra_name = "cbc(aes)",
  627. .cra_driver_name = "cbc-aes-dcp",
  628. .cra_priority = 400,
  629. .cra_alignmask = 15,
  630. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  631. CRYPTO_ALG_ASYNC |
  632. CRYPTO_ALG_NEED_FALLBACK,
  633. .cra_init = mxs_dcp_aes_fallback_init,
  634. .cra_exit = mxs_dcp_aes_fallback_exit,
  635. .cra_blocksize = AES_BLOCK_SIZE,
  636. .cra_ctxsize = sizeof(struct dcp_async_ctx),
  637. .cra_type = &crypto_ablkcipher_type,
  638. .cra_module = THIS_MODULE,
  639. .cra_u = {
  640. .ablkcipher = {
  641. .min_keysize = AES_MIN_KEY_SIZE,
  642. .max_keysize = AES_MAX_KEY_SIZE,
  643. .setkey = mxs_dcp_aes_setkey,
  644. .encrypt = mxs_dcp_aes_cbc_encrypt,
  645. .decrypt = mxs_dcp_aes_cbc_decrypt,
  646. .ivsize = AES_BLOCK_SIZE,
  647. },
  648. },
  649. },
  650. };
  651. /* SHA1 */
  652. static struct ahash_alg dcp_sha1_alg = {
  653. .init = dcp_sha_init,
  654. .update = dcp_sha_update,
  655. .final = dcp_sha_final,
  656. .finup = dcp_sha_finup,
  657. .digest = dcp_sha_digest,
  658. .halg = {
  659. .digestsize = SHA1_DIGEST_SIZE,
  660. .base = {
  661. .cra_name = "sha1",
  662. .cra_driver_name = "sha1-dcp",
  663. .cra_priority = 400,
  664. .cra_alignmask = 63,
  665. .cra_flags = CRYPTO_ALG_ASYNC,
  666. .cra_blocksize = SHA1_BLOCK_SIZE,
  667. .cra_ctxsize = sizeof(struct dcp_async_ctx),
  668. .cra_module = THIS_MODULE,
  669. .cra_init = dcp_sha_cra_init,
  670. .cra_exit = dcp_sha_cra_exit,
  671. },
  672. },
  673. };
  674. /* SHA256 */
  675. static struct ahash_alg dcp_sha256_alg = {
  676. .init = dcp_sha_init,
  677. .update = dcp_sha_update,
  678. .final = dcp_sha_final,
  679. .finup = dcp_sha_finup,
  680. .digest = dcp_sha_digest,
  681. .halg = {
  682. .digestsize = SHA256_DIGEST_SIZE,
  683. .base = {
  684. .cra_name = "sha256",
  685. .cra_driver_name = "sha256-dcp",
  686. .cra_priority = 400,
  687. .cra_alignmask = 63,
  688. .cra_flags = CRYPTO_ALG_ASYNC,
  689. .cra_blocksize = SHA256_BLOCK_SIZE,
  690. .cra_ctxsize = sizeof(struct dcp_async_ctx),
  691. .cra_module = THIS_MODULE,
  692. .cra_init = dcp_sha_cra_init,
  693. .cra_exit = dcp_sha_cra_exit,
  694. },
  695. },
  696. };
  697. static irqreturn_t mxs_dcp_irq(int irq, void *context)
  698. {
  699. struct dcp *sdcp = context;
  700. uint32_t stat;
  701. int i;
  702. stat = readl(sdcp->base + MXS_DCP_STAT);
  703. stat &= MXS_DCP_STAT_IRQ_MASK;
  704. if (!stat)
  705. return IRQ_NONE;
  706. /* Clear the interrupts. */
  707. writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
  708. /* Complete the DMA requests that finished. */
  709. for (i = 0; i < DCP_MAX_CHANS; i++)
  710. if (stat & (1 << i))
  711. complete(&sdcp->completion[i]);
  712. return IRQ_HANDLED;
  713. }
  714. static int mxs_dcp_probe(struct platform_device *pdev)
  715. {
  716. struct device *dev = &pdev->dev;
  717. struct dcp *sdcp = NULL;
  718. int i, ret;
  719. struct resource *iores;
  720. int dcp_vmi_irq, dcp_irq;
  721. mutex_lock(&global_mutex);
  722. if (global_sdcp) {
  723. dev_err(dev, "Only one DCP instance allowed!\n");
  724. ret = -ENODEV;
  725. goto err_mutex;
  726. }
  727. iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  728. dcp_vmi_irq = platform_get_irq(pdev, 0);
  729. dcp_irq = platform_get_irq(pdev, 1);
  730. if (dcp_vmi_irq < 0 || dcp_irq < 0) {
  731. ret = -EINVAL;
  732. goto err_mutex;
  733. }
  734. sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
  735. if (!sdcp) {
  736. ret = -ENOMEM;
  737. goto err_mutex;
  738. }
  739. sdcp->dev = dev;
  740. sdcp->base = devm_ioremap_resource(dev, iores);
  741. if (IS_ERR(sdcp->base)) {
  742. ret = PTR_ERR(sdcp->base);
  743. goto err_mutex;
  744. }
  745. ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
  746. "dcp-vmi-irq", sdcp);
  747. if (ret) {
  748. dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
  749. goto err_mutex;
  750. }
  751. ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
  752. "dcp-irq", sdcp);
  753. if (ret) {
  754. dev_err(dev, "Failed to claim DCP IRQ!\n");
  755. goto err_mutex;
  756. }
  757. /* Allocate coherent helper block. */
  758. sdcp->coh = kzalloc(sizeof(struct dcp_coherent_block), GFP_KERNEL);
  759. if (!sdcp->coh) {
  760. dev_err(dev, "Error allocating coherent block\n");
  761. ret = -ENOMEM;
  762. goto err_mutex;
  763. }
  764. /* Restart the DCP block. */
  765. stmp_reset_block(sdcp->base);
  766. /* Initialize control register. */
  767. writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
  768. MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
  769. sdcp->base + MXS_DCP_CTRL);
  770. /* Enable all DCP DMA channels. */
  771. writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
  772. sdcp->base + MXS_DCP_CHANNELCTRL);
  773. /*
  774. * We do not enable context switching. Give the context buffer a
  775. * pointer to an illegal address so if context switching is
  776. * inadvertantly enabled, the DCP will return an error instead of
  777. * trashing good memory. The DCP DMA cannot access ROM, so any ROM
  778. * address will do.
  779. */
  780. writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
  781. for (i = 0; i < DCP_MAX_CHANS; i++)
  782. writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
  783. writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
  784. global_sdcp = sdcp;
  785. platform_set_drvdata(pdev, sdcp);
  786. for (i = 0; i < DCP_MAX_CHANS; i++) {
  787. mutex_init(&sdcp->mutex[i]);
  788. init_completion(&sdcp->completion[i]);
  789. crypto_init_queue(&sdcp->queue[i], 50);
  790. }
  791. /* Create the SHA and AES handler threads. */
  792. sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
  793. NULL, "mxs_dcp_chan/sha");
  794. if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
  795. dev_err(dev, "Error starting SHA thread!\n");
  796. ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
  797. goto err_free_coherent;
  798. }
  799. sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
  800. NULL, "mxs_dcp_chan/aes");
  801. if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
  802. dev_err(dev, "Error starting SHA thread!\n");
  803. ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
  804. goto err_destroy_sha_thread;
  805. }
  806. /* Register the various crypto algorithms. */
  807. sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
  808. if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
  809. ret = crypto_register_algs(dcp_aes_algs,
  810. ARRAY_SIZE(dcp_aes_algs));
  811. if (ret) {
  812. /* Failed to register algorithm. */
  813. dev_err(dev, "Failed to register AES crypto!\n");
  814. goto err_destroy_aes_thread;
  815. }
  816. }
  817. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
  818. ret = crypto_register_ahash(&dcp_sha1_alg);
  819. if (ret) {
  820. dev_err(dev, "Failed to register %s hash!\n",
  821. dcp_sha1_alg.halg.base.cra_name);
  822. goto err_unregister_aes;
  823. }
  824. }
  825. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
  826. ret = crypto_register_ahash(&dcp_sha256_alg);
  827. if (ret) {
  828. dev_err(dev, "Failed to register %s hash!\n",
  829. dcp_sha256_alg.halg.base.cra_name);
  830. goto err_unregister_sha1;
  831. }
  832. }
  833. return 0;
  834. err_unregister_sha1:
  835. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
  836. crypto_unregister_ahash(&dcp_sha1_alg);
  837. err_unregister_aes:
  838. if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
  839. crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
  840. err_destroy_aes_thread:
  841. kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
  842. err_destroy_sha_thread:
  843. kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
  844. err_free_coherent:
  845. kfree(sdcp->coh);
  846. err_mutex:
  847. mutex_unlock(&global_mutex);
  848. return ret;
  849. }
  850. static int mxs_dcp_remove(struct platform_device *pdev)
  851. {
  852. struct dcp *sdcp = platform_get_drvdata(pdev);
  853. kfree(sdcp->coh);
  854. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
  855. crypto_unregister_ahash(&dcp_sha256_alg);
  856. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
  857. crypto_unregister_ahash(&dcp_sha1_alg);
  858. if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
  859. crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
  860. kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
  861. kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
  862. platform_set_drvdata(pdev, NULL);
  863. mutex_lock(&global_mutex);
  864. global_sdcp = NULL;
  865. mutex_unlock(&global_mutex);
  866. return 0;
  867. }
  868. static const struct of_device_id mxs_dcp_dt_ids[] = {
  869. { .compatible = "fsl,imx23-dcp", .data = NULL, },
  870. { .compatible = "fsl,imx28-dcp", .data = NULL, },
  871. { /* sentinel */ }
  872. };
  873. MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
  874. static struct platform_driver mxs_dcp_driver = {
  875. .probe = mxs_dcp_probe,
  876. .remove = mxs_dcp_remove,
  877. .driver = {
  878. .name = "mxs-dcp",
  879. .owner = THIS_MODULE,
  880. .of_match_table = mxs_dcp_dt_ids,
  881. },
  882. };
  883. module_platform_driver(mxs_dcp_driver);
  884. MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
  885. MODULE_DESCRIPTION("Freescale MXS DCP Driver");
  886. MODULE_LICENSE("GPL");
  887. MODULE_ALIAS("platform:mxs-dcp");