crypto_engine.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. /*
  2. * Handle async block request by crypto hardware engine.
  3. *
  4. * Copyright (C) 2016 Linaro, Inc.
  5. *
  6. * Author: Baolin Wang <baolin.wang@linaro.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <linux/err.h>
  15. #include <linux/delay.h>
  16. #include <crypto/engine.h>
  17. #include <crypto/internal/hash.h>
  18. #include <uapi/linux/sched/types.h>
  19. #include "internal.h"
  20. #define CRYPTO_ENGINE_MAX_QLEN 10
  21. /**
  22. * crypto_pump_requests - dequeue one request from engine queue to process
  23. * @engine: the hardware engine
  24. * @in_kthread: true if we are in the context of the request pump thread
  25. *
  26. * This function checks if there is any request in the engine queue that
  27. * needs processing and if so call out to the driver to initialize hardware
  28. * and handle each request.
  29. */
  30. static void crypto_pump_requests(struct crypto_engine *engine,
  31. bool in_kthread)
  32. {
  33. struct crypto_async_request *async_req, *backlog;
  34. struct ahash_request *hreq;
  35. struct ablkcipher_request *breq;
  36. unsigned long flags;
  37. bool was_busy = false;
  38. int ret, rtype;
  39. spin_lock_irqsave(&engine->queue_lock, flags);
  40. /* Make sure we are not already running a request */
  41. if (engine->cur_req)
  42. goto out;
  43. /* If another context is idling then defer */
  44. if (engine->idling) {
  45. kthread_queue_work(engine->kworker, &engine->pump_requests);
  46. goto out;
  47. }
  48. /* Check if the engine queue is idle */
  49. if (!crypto_queue_len(&engine->queue) || !engine->running) {
  50. if (!engine->busy)
  51. goto out;
  52. /* Only do teardown in the thread */
  53. if (!in_kthread) {
  54. kthread_queue_work(engine->kworker,
  55. &engine->pump_requests);
  56. goto out;
  57. }
  58. engine->busy = false;
  59. engine->idling = true;
  60. spin_unlock_irqrestore(&engine->queue_lock, flags);
  61. if (engine->unprepare_crypt_hardware &&
  62. engine->unprepare_crypt_hardware(engine))
  63. pr_err("failed to unprepare crypt hardware\n");
  64. spin_lock_irqsave(&engine->queue_lock, flags);
  65. engine->idling = false;
  66. goto out;
  67. }
  68. /* Get the fist request from the engine queue to handle */
  69. backlog = crypto_get_backlog(&engine->queue);
  70. async_req = crypto_dequeue_request(&engine->queue);
  71. if (!async_req)
  72. goto out;
  73. engine->cur_req = async_req;
  74. if (backlog)
  75. backlog->complete(backlog, -EINPROGRESS);
  76. if (engine->busy)
  77. was_busy = true;
  78. else
  79. engine->busy = true;
  80. spin_unlock_irqrestore(&engine->queue_lock, flags);
  81. rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
  82. /* Until here we get the request need to be encrypted successfully */
  83. if (!was_busy && engine->prepare_crypt_hardware) {
  84. ret = engine->prepare_crypt_hardware(engine);
  85. if (ret) {
  86. pr_err("failed to prepare crypt hardware\n");
  87. goto req_err;
  88. }
  89. }
  90. switch (rtype) {
  91. case CRYPTO_ALG_TYPE_AHASH:
  92. hreq = ahash_request_cast(engine->cur_req);
  93. if (engine->prepare_hash_request) {
  94. ret = engine->prepare_hash_request(engine, hreq);
  95. if (ret) {
  96. pr_err("failed to prepare request: %d\n", ret);
  97. goto req_err;
  98. }
  99. engine->cur_req_prepared = true;
  100. }
  101. ret = engine->hash_one_request(engine, hreq);
  102. if (ret) {
  103. pr_err("failed to hash one request from queue\n");
  104. goto req_err;
  105. }
  106. return;
  107. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  108. breq = ablkcipher_request_cast(engine->cur_req);
  109. if (engine->prepare_cipher_request) {
  110. ret = engine->prepare_cipher_request(engine, breq);
  111. if (ret) {
  112. pr_err("failed to prepare request: %d\n", ret);
  113. goto req_err;
  114. }
  115. engine->cur_req_prepared = true;
  116. }
  117. ret = engine->cipher_one_request(engine, breq);
  118. if (ret) {
  119. pr_err("failed to cipher one request from queue\n");
  120. goto req_err;
  121. }
  122. return;
  123. default:
  124. pr_err("failed to prepare request of unknown type\n");
  125. return;
  126. }
  127. req_err:
  128. switch (rtype) {
  129. case CRYPTO_ALG_TYPE_AHASH:
  130. hreq = ahash_request_cast(engine->cur_req);
  131. crypto_finalize_hash_request(engine, hreq, ret);
  132. break;
  133. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  134. breq = ablkcipher_request_cast(engine->cur_req);
  135. crypto_finalize_cipher_request(engine, breq, ret);
  136. break;
  137. }
  138. return;
  139. out:
  140. spin_unlock_irqrestore(&engine->queue_lock, flags);
  141. }
  142. static void crypto_pump_work(struct kthread_work *work)
  143. {
  144. struct crypto_engine *engine =
  145. container_of(work, struct crypto_engine, pump_requests);
  146. crypto_pump_requests(engine, true);
  147. }
  148. /**
  149. * crypto_transfer_cipher_request - transfer the new request into the
  150. * enginequeue
  151. * @engine: the hardware engine
  152. * @req: the request need to be listed into the engine queue
  153. */
  154. int crypto_transfer_cipher_request(struct crypto_engine *engine,
  155. struct ablkcipher_request *req,
  156. bool need_pump)
  157. {
  158. unsigned long flags;
  159. int ret;
  160. spin_lock_irqsave(&engine->queue_lock, flags);
  161. if (!engine->running) {
  162. spin_unlock_irqrestore(&engine->queue_lock, flags);
  163. return -ESHUTDOWN;
  164. }
  165. ret = ablkcipher_enqueue_request(&engine->queue, req);
  166. if (!engine->busy && need_pump)
  167. kthread_queue_work(engine->kworker, &engine->pump_requests);
  168. spin_unlock_irqrestore(&engine->queue_lock, flags);
  169. return ret;
  170. }
  171. EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
  172. /**
  173. * crypto_transfer_cipher_request_to_engine - transfer one request to list
  174. * into the engine queue
  175. * @engine: the hardware engine
  176. * @req: the request need to be listed into the engine queue
  177. */
  178. int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
  179. struct ablkcipher_request *req)
  180. {
  181. return crypto_transfer_cipher_request(engine, req, true);
  182. }
  183. EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
  184. /**
  185. * crypto_transfer_hash_request - transfer the new request into the
  186. * enginequeue
  187. * @engine: the hardware engine
  188. * @req: the request need to be listed into the engine queue
  189. */
  190. int crypto_transfer_hash_request(struct crypto_engine *engine,
  191. struct ahash_request *req, bool need_pump)
  192. {
  193. unsigned long flags;
  194. int ret;
  195. spin_lock_irqsave(&engine->queue_lock, flags);
  196. if (!engine->running) {
  197. spin_unlock_irqrestore(&engine->queue_lock, flags);
  198. return -ESHUTDOWN;
  199. }
  200. ret = ahash_enqueue_request(&engine->queue, req);
  201. if (!engine->busy && need_pump)
  202. kthread_queue_work(engine->kworker, &engine->pump_requests);
  203. spin_unlock_irqrestore(&engine->queue_lock, flags);
  204. return ret;
  205. }
  206. EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
  207. /**
  208. * crypto_transfer_hash_request_to_engine - transfer one request to list
  209. * into the engine queue
  210. * @engine: the hardware engine
  211. * @req: the request need to be listed into the engine queue
  212. */
  213. int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
  214. struct ahash_request *req)
  215. {
  216. return crypto_transfer_hash_request(engine, req, true);
  217. }
  218. EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
  219. /**
  220. * crypto_finalize_cipher_request - finalize one request if the request is done
  221. * @engine: the hardware engine
  222. * @req: the request need to be finalized
  223. * @err: error number
  224. */
  225. void crypto_finalize_cipher_request(struct crypto_engine *engine,
  226. struct ablkcipher_request *req, int err)
  227. {
  228. unsigned long flags;
  229. bool finalize_cur_req = false;
  230. int ret;
  231. spin_lock_irqsave(&engine->queue_lock, flags);
  232. if (engine->cur_req == &req->base)
  233. finalize_cur_req = true;
  234. spin_unlock_irqrestore(&engine->queue_lock, flags);
  235. if (finalize_cur_req) {
  236. if (engine->cur_req_prepared &&
  237. engine->unprepare_cipher_request) {
  238. ret = engine->unprepare_cipher_request(engine, req);
  239. if (ret)
  240. pr_err("failed to unprepare request\n");
  241. }
  242. spin_lock_irqsave(&engine->queue_lock, flags);
  243. engine->cur_req = NULL;
  244. engine->cur_req_prepared = false;
  245. spin_unlock_irqrestore(&engine->queue_lock, flags);
  246. }
  247. req->base.complete(&req->base, err);
  248. kthread_queue_work(engine->kworker, &engine->pump_requests);
  249. }
  250. EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
  251. /**
  252. * crypto_finalize_hash_request - finalize one request if the request is done
  253. * @engine: the hardware engine
  254. * @req: the request need to be finalized
  255. * @err: error number
  256. */
  257. void crypto_finalize_hash_request(struct crypto_engine *engine,
  258. struct ahash_request *req, int err)
  259. {
  260. unsigned long flags;
  261. bool finalize_cur_req = false;
  262. int ret;
  263. spin_lock_irqsave(&engine->queue_lock, flags);
  264. if (engine->cur_req == &req->base)
  265. finalize_cur_req = true;
  266. spin_unlock_irqrestore(&engine->queue_lock, flags);
  267. if (finalize_cur_req) {
  268. if (engine->cur_req_prepared &&
  269. engine->unprepare_hash_request) {
  270. ret = engine->unprepare_hash_request(engine, req);
  271. if (ret)
  272. pr_err("failed to unprepare request\n");
  273. }
  274. spin_lock_irqsave(&engine->queue_lock, flags);
  275. engine->cur_req = NULL;
  276. engine->cur_req_prepared = false;
  277. spin_unlock_irqrestore(&engine->queue_lock, flags);
  278. }
  279. req->base.complete(&req->base, err);
  280. kthread_queue_work(engine->kworker, &engine->pump_requests);
  281. }
  282. EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
  283. /**
  284. * crypto_engine_start - start the hardware engine
  285. * @engine: the hardware engine need to be started
  286. *
  287. * Return 0 on success, else on fail.
  288. */
  289. int crypto_engine_start(struct crypto_engine *engine)
  290. {
  291. unsigned long flags;
  292. spin_lock_irqsave(&engine->queue_lock, flags);
  293. if (engine->running || engine->busy) {
  294. spin_unlock_irqrestore(&engine->queue_lock, flags);
  295. return -EBUSY;
  296. }
  297. engine->running = true;
  298. spin_unlock_irqrestore(&engine->queue_lock, flags);
  299. kthread_queue_work(engine->kworker, &engine->pump_requests);
  300. return 0;
  301. }
  302. EXPORT_SYMBOL_GPL(crypto_engine_start);
  303. /**
  304. * crypto_engine_stop - stop the hardware engine
  305. * @engine: the hardware engine need to be stopped
  306. *
  307. * Return 0 on success, else on fail.
  308. */
  309. int crypto_engine_stop(struct crypto_engine *engine)
  310. {
  311. unsigned long flags;
  312. unsigned int limit = 500;
  313. int ret = 0;
  314. spin_lock_irqsave(&engine->queue_lock, flags);
  315. /*
  316. * If the engine queue is not empty or the engine is on busy state,
  317. * we need to wait for a while to pump the requests of engine queue.
  318. */
  319. while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
  320. spin_unlock_irqrestore(&engine->queue_lock, flags);
  321. msleep(20);
  322. spin_lock_irqsave(&engine->queue_lock, flags);
  323. }
  324. if (crypto_queue_len(&engine->queue) || engine->busy)
  325. ret = -EBUSY;
  326. else
  327. engine->running = false;
  328. spin_unlock_irqrestore(&engine->queue_lock, flags);
  329. if (ret)
  330. pr_warn("could not stop engine\n");
  331. return ret;
  332. }
  333. EXPORT_SYMBOL_GPL(crypto_engine_stop);
  334. /**
  335. * crypto_engine_alloc_init - allocate crypto hardware engine structure and
  336. * initialize it.
  337. * @dev: the device attached with one hardware engine
  338. * @rt: whether this queue is set to run as a realtime task
  339. *
  340. * This must be called from context that can sleep.
  341. * Return: the crypto engine structure on success, else NULL.
  342. */
  343. struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
  344. {
  345. struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  346. struct crypto_engine *engine;
  347. if (!dev)
  348. return NULL;
  349. engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
  350. if (!engine)
  351. return NULL;
  352. engine->rt = rt;
  353. engine->running = false;
  354. engine->busy = false;
  355. engine->idling = false;
  356. engine->cur_req_prepared = false;
  357. engine->priv_data = dev;
  358. snprintf(engine->name, sizeof(engine->name),
  359. "%s-engine", dev_name(dev));
  360. crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
  361. spin_lock_init(&engine->queue_lock);
  362. engine->kworker = kthread_create_worker(0, "%s", engine->name);
  363. if (IS_ERR(engine->kworker)) {
  364. dev_err(dev, "failed to create crypto request pump task\n");
  365. return NULL;
  366. }
  367. kthread_init_work(&engine->pump_requests, crypto_pump_work);
  368. if (engine->rt) {
  369. dev_info(dev, "will run requests pump with realtime priority\n");
  370. sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
  371. }
  372. return engine;
  373. }
  374. EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
  375. /**
  376. * crypto_engine_exit - free the resources of hardware engine when exit
  377. * @engine: the hardware engine need to be freed
  378. *
  379. * Return 0 for success.
  380. */
  381. int crypto_engine_exit(struct crypto_engine *engine)
  382. {
  383. int ret;
  384. ret = crypto_engine_stop(engine);
  385. if (ret)
  386. return ret;
  387. kthread_destroy_worker(engine->kworker);
  388. return 0;
  389. }
  390. EXPORT_SYMBOL_GPL(crypto_engine_exit);
  391. MODULE_LICENSE("GPL");
  392. MODULE_DESCRIPTION("Crypto hardware engine framework");