crypto_engine.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. /*
  2. * Handle async block request by crypto hardware engine.
  3. *
  4. * Copyright (C) 2016 Linaro, Inc.
  5. *
  6. * Author: Baolin Wang <baolin.wang@linaro.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <linux/err.h>
  15. #include <linux/delay.h>
  16. #include <crypto/engine.h>
  17. #include <crypto/internal/hash.h>
  18. #include <uapi/linux/sched/types.h>
  19. #include "internal.h"
  20. #define CRYPTO_ENGINE_MAX_QLEN 10
  21. /**
  22. * crypto_pump_requests - dequeue one request from engine queue to process
  23. * @engine: the hardware engine
  24. * @in_kthread: true if we are in the context of the request pump thread
  25. *
  26. * This function checks if there is any request in the engine queue that
  27. * needs processing and if so call out to the driver to initialize hardware
  28. * and handle each request.
  29. */
  30. static void crypto_pump_requests(struct crypto_engine *engine,
  31. bool in_kthread)
  32. {
  33. struct crypto_async_request *async_req, *backlog;
  34. struct ahash_request *hreq;
  35. struct ablkcipher_request *breq;
  36. unsigned long flags;
  37. bool was_busy = false;
  38. int ret, rtype;
  39. spin_lock_irqsave(&engine->queue_lock, flags);
  40. /* Make sure we are not already running a request */
  41. if (engine->cur_req)
  42. goto out;
  43. /* If another context is idling then defer */
  44. if (engine->idling) {
  45. kthread_queue_work(engine->kworker, &engine->pump_requests);
  46. goto out;
  47. }
  48. /* Check if the engine queue is idle */
  49. if (!crypto_queue_len(&engine->queue) || !engine->running) {
  50. if (!engine->busy)
  51. goto out;
  52. /* Only do teardown in the thread */
  53. if (!in_kthread) {
  54. kthread_queue_work(engine->kworker,
  55. &engine->pump_requests);
  56. goto out;
  57. }
  58. engine->busy = false;
  59. engine->idling = true;
  60. spin_unlock_irqrestore(&engine->queue_lock, flags);
  61. if (engine->unprepare_crypt_hardware &&
  62. engine->unprepare_crypt_hardware(engine))
  63. dev_err(engine->dev, "failed to unprepare crypt hardware\n");
  64. spin_lock_irqsave(&engine->queue_lock, flags);
  65. engine->idling = false;
  66. goto out;
  67. }
  68. /* Get the fist request from the engine queue to handle */
  69. backlog = crypto_get_backlog(&engine->queue);
  70. async_req = crypto_dequeue_request(&engine->queue);
  71. if (!async_req)
  72. goto out;
  73. engine->cur_req = async_req;
  74. if (backlog)
  75. backlog->complete(backlog, -EINPROGRESS);
  76. if (engine->busy)
  77. was_busy = true;
  78. else
  79. engine->busy = true;
  80. spin_unlock_irqrestore(&engine->queue_lock, flags);
  81. rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
  82. /* Until here we get the request need to be encrypted successfully */
  83. if (!was_busy && engine->prepare_crypt_hardware) {
  84. ret = engine->prepare_crypt_hardware(engine);
  85. if (ret) {
  86. dev_err(engine->dev, "failed to prepare crypt hardware\n");
  87. goto req_err;
  88. }
  89. }
  90. switch (rtype) {
  91. case CRYPTO_ALG_TYPE_AHASH:
  92. hreq = ahash_request_cast(engine->cur_req);
  93. if (engine->prepare_hash_request) {
  94. ret = engine->prepare_hash_request(engine, hreq);
  95. if (ret) {
  96. dev_err(engine->dev, "failed to prepare request: %d\n",
  97. ret);
  98. goto req_err;
  99. }
  100. engine->cur_req_prepared = true;
  101. }
  102. ret = engine->hash_one_request(engine, hreq);
  103. if (ret) {
  104. dev_err(engine->dev, "failed to hash one request from queue\n");
  105. goto req_err;
  106. }
  107. return;
  108. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  109. breq = ablkcipher_request_cast(engine->cur_req);
  110. if (engine->prepare_cipher_request) {
  111. ret = engine->prepare_cipher_request(engine, breq);
  112. if (ret) {
  113. dev_err(engine->dev, "failed to prepare request: %d\n",
  114. ret);
  115. goto req_err;
  116. }
  117. engine->cur_req_prepared = true;
  118. }
  119. ret = engine->cipher_one_request(engine, breq);
  120. if (ret) {
  121. dev_err(engine->dev, "failed to cipher one request from queue\n");
  122. goto req_err;
  123. }
  124. return;
  125. default:
  126. dev_err(engine->dev, "failed to prepare request of unknown type\n");
  127. return;
  128. }
  129. req_err:
  130. switch (rtype) {
  131. case CRYPTO_ALG_TYPE_AHASH:
  132. hreq = ahash_request_cast(engine->cur_req);
  133. crypto_finalize_hash_request(engine, hreq, ret);
  134. break;
  135. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  136. breq = ablkcipher_request_cast(engine->cur_req);
  137. crypto_finalize_cipher_request(engine, breq, ret);
  138. break;
  139. }
  140. return;
  141. out:
  142. spin_unlock_irqrestore(&engine->queue_lock, flags);
  143. }
  144. static void crypto_pump_work(struct kthread_work *work)
  145. {
  146. struct crypto_engine *engine =
  147. container_of(work, struct crypto_engine, pump_requests);
  148. crypto_pump_requests(engine, true);
  149. }
  150. /**
  151. * crypto_transfer_cipher_request - transfer the new request into the
  152. * enginequeue
  153. * @engine: the hardware engine
  154. * @req: the request need to be listed into the engine queue
  155. */
  156. int crypto_transfer_cipher_request(struct crypto_engine *engine,
  157. struct ablkcipher_request *req,
  158. bool need_pump)
  159. {
  160. unsigned long flags;
  161. int ret;
  162. spin_lock_irqsave(&engine->queue_lock, flags);
  163. if (!engine->running) {
  164. spin_unlock_irqrestore(&engine->queue_lock, flags);
  165. return -ESHUTDOWN;
  166. }
  167. ret = ablkcipher_enqueue_request(&engine->queue, req);
  168. if (!engine->busy && need_pump)
  169. kthread_queue_work(engine->kworker, &engine->pump_requests);
  170. spin_unlock_irqrestore(&engine->queue_lock, flags);
  171. return ret;
  172. }
  173. EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
  174. /**
  175. * crypto_transfer_cipher_request_to_engine - transfer one request to list
  176. * into the engine queue
  177. * @engine: the hardware engine
  178. * @req: the request need to be listed into the engine queue
  179. */
  180. int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
  181. struct ablkcipher_request *req)
  182. {
  183. return crypto_transfer_cipher_request(engine, req, true);
  184. }
  185. EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
  186. /**
  187. * crypto_transfer_hash_request - transfer the new request into the
  188. * enginequeue
  189. * @engine: the hardware engine
  190. * @req: the request need to be listed into the engine queue
  191. */
  192. int crypto_transfer_hash_request(struct crypto_engine *engine,
  193. struct ahash_request *req, bool need_pump)
  194. {
  195. unsigned long flags;
  196. int ret;
  197. spin_lock_irqsave(&engine->queue_lock, flags);
  198. if (!engine->running) {
  199. spin_unlock_irqrestore(&engine->queue_lock, flags);
  200. return -ESHUTDOWN;
  201. }
  202. ret = ahash_enqueue_request(&engine->queue, req);
  203. if (!engine->busy && need_pump)
  204. kthread_queue_work(engine->kworker, &engine->pump_requests);
  205. spin_unlock_irqrestore(&engine->queue_lock, flags);
  206. return ret;
  207. }
  208. EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
  209. /**
  210. * crypto_transfer_hash_request_to_engine - transfer one request to list
  211. * into the engine queue
  212. * @engine: the hardware engine
  213. * @req: the request need to be listed into the engine queue
  214. */
  215. int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
  216. struct ahash_request *req)
  217. {
  218. return crypto_transfer_hash_request(engine, req, true);
  219. }
  220. EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
  221. /**
  222. * crypto_finalize_cipher_request - finalize one request if the request is done
  223. * @engine: the hardware engine
  224. * @req: the request need to be finalized
  225. * @err: error number
  226. */
  227. void crypto_finalize_cipher_request(struct crypto_engine *engine,
  228. struct ablkcipher_request *req, int err)
  229. {
  230. unsigned long flags;
  231. bool finalize_cur_req = false;
  232. int ret;
  233. spin_lock_irqsave(&engine->queue_lock, flags);
  234. if (engine->cur_req == &req->base)
  235. finalize_cur_req = true;
  236. spin_unlock_irqrestore(&engine->queue_lock, flags);
  237. if (finalize_cur_req) {
  238. if (engine->cur_req_prepared &&
  239. engine->unprepare_cipher_request) {
  240. ret = engine->unprepare_cipher_request(engine, req);
  241. if (ret)
  242. dev_err(engine->dev, "failed to unprepare request\n");
  243. }
  244. spin_lock_irqsave(&engine->queue_lock, flags);
  245. engine->cur_req = NULL;
  246. engine->cur_req_prepared = false;
  247. spin_unlock_irqrestore(&engine->queue_lock, flags);
  248. }
  249. req->base.complete(&req->base, err);
  250. kthread_queue_work(engine->kworker, &engine->pump_requests);
  251. }
  252. EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
  253. /**
  254. * crypto_finalize_hash_request - finalize one request if the request is done
  255. * @engine: the hardware engine
  256. * @req: the request need to be finalized
  257. * @err: error number
  258. */
  259. void crypto_finalize_hash_request(struct crypto_engine *engine,
  260. struct ahash_request *req, int err)
  261. {
  262. unsigned long flags;
  263. bool finalize_cur_req = false;
  264. int ret;
  265. spin_lock_irqsave(&engine->queue_lock, flags);
  266. if (engine->cur_req == &req->base)
  267. finalize_cur_req = true;
  268. spin_unlock_irqrestore(&engine->queue_lock, flags);
  269. if (finalize_cur_req) {
  270. if (engine->cur_req_prepared &&
  271. engine->unprepare_hash_request) {
  272. ret = engine->unprepare_hash_request(engine, req);
  273. if (ret)
  274. dev_err(engine->dev, "failed to unprepare request\n");
  275. }
  276. spin_lock_irqsave(&engine->queue_lock, flags);
  277. engine->cur_req = NULL;
  278. engine->cur_req_prepared = false;
  279. spin_unlock_irqrestore(&engine->queue_lock, flags);
  280. }
  281. req->base.complete(&req->base, err);
  282. kthread_queue_work(engine->kworker, &engine->pump_requests);
  283. }
  284. EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
  285. /**
  286. * crypto_engine_start - start the hardware engine
  287. * @engine: the hardware engine need to be started
  288. *
  289. * Return 0 on success, else on fail.
  290. */
  291. int crypto_engine_start(struct crypto_engine *engine)
  292. {
  293. unsigned long flags;
  294. spin_lock_irqsave(&engine->queue_lock, flags);
  295. if (engine->running || engine->busy) {
  296. spin_unlock_irqrestore(&engine->queue_lock, flags);
  297. return -EBUSY;
  298. }
  299. engine->running = true;
  300. spin_unlock_irqrestore(&engine->queue_lock, flags);
  301. kthread_queue_work(engine->kworker, &engine->pump_requests);
  302. return 0;
  303. }
  304. EXPORT_SYMBOL_GPL(crypto_engine_start);
  305. /**
  306. * crypto_engine_stop - stop the hardware engine
  307. * @engine: the hardware engine need to be stopped
  308. *
  309. * Return 0 on success, else on fail.
  310. */
  311. int crypto_engine_stop(struct crypto_engine *engine)
  312. {
  313. unsigned long flags;
  314. unsigned int limit = 500;
  315. int ret = 0;
  316. spin_lock_irqsave(&engine->queue_lock, flags);
  317. /*
  318. * If the engine queue is not empty or the engine is on busy state,
  319. * we need to wait for a while to pump the requests of engine queue.
  320. */
  321. while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
  322. spin_unlock_irqrestore(&engine->queue_lock, flags);
  323. msleep(20);
  324. spin_lock_irqsave(&engine->queue_lock, flags);
  325. }
  326. if (crypto_queue_len(&engine->queue) || engine->busy)
  327. ret = -EBUSY;
  328. else
  329. engine->running = false;
  330. spin_unlock_irqrestore(&engine->queue_lock, flags);
  331. if (ret)
  332. dev_warn(engine->dev, "could not stop engine\n");
  333. return ret;
  334. }
  335. EXPORT_SYMBOL_GPL(crypto_engine_stop);
  336. /**
  337. * crypto_engine_alloc_init - allocate crypto hardware engine structure and
  338. * initialize it.
  339. * @dev: the device attached with one hardware engine
  340. * @rt: whether this queue is set to run as a realtime task
  341. *
  342. * This must be called from context that can sleep.
  343. * Return: the crypto engine structure on success, else NULL.
  344. */
  345. struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
  346. {
  347. struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  348. struct crypto_engine *engine;
  349. if (!dev)
  350. return NULL;
  351. engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
  352. if (!engine)
  353. return NULL;
  354. engine->dev = dev;
  355. engine->rt = rt;
  356. engine->running = false;
  357. engine->busy = false;
  358. engine->idling = false;
  359. engine->cur_req_prepared = false;
  360. engine->priv_data = dev;
  361. snprintf(engine->name, sizeof(engine->name),
  362. "%s-engine", dev_name(dev));
  363. crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
  364. spin_lock_init(&engine->queue_lock);
  365. engine->kworker = kthread_create_worker(0, "%s", engine->name);
  366. if (IS_ERR(engine->kworker)) {
  367. dev_err(dev, "failed to create crypto request pump task\n");
  368. return NULL;
  369. }
  370. kthread_init_work(&engine->pump_requests, crypto_pump_work);
  371. if (engine->rt) {
  372. dev_info(dev, "will run requests pump with realtime priority\n");
  373. sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
  374. }
  375. return engine;
  376. }
  377. EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
  378. /**
  379. * crypto_engine_exit - free the resources of hardware engine when exit
  380. * @engine: the hardware engine need to be freed
  381. *
  382. * Return 0 for success.
  383. */
  384. int crypto_engine_exit(struct crypto_engine *engine)
  385. {
  386. int ret;
  387. ret = crypto_engine_stop(engine);
  388. if (ret)
  389. return ret;
  390. kthread_destroy_worker(engine->kworker);
  391. return 0;
  392. }
  393. EXPORT_SYMBOL_GPL(crypto_engine_exit);
  394. MODULE_LICENSE("GPL");
  395. MODULE_DESCRIPTION("Crypto hardware engine framework");