crypto_engine.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * Handle async block request by crypto hardware engine.
  3. *
  4. * Copyright (C) 2016 Linaro, Inc.
  5. *
  6. * Author: Baolin Wang <baolin.wang@linaro.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <linux/err.h>
  15. #include <linux/delay.h>
  16. #include "internal.h"
  17. #define CRYPTO_ENGINE_MAX_QLEN 10
  18. void crypto_finalize_request(struct crypto_engine *engine,
  19. struct ablkcipher_request *req, int err);
  20. /**
  21. * crypto_pump_requests - dequeue one request from engine queue to process
  22. * @engine: the hardware engine
  23. * @in_kthread: true if we are in the context of the request pump thread
  24. *
  25. * This function checks if there is any request in the engine queue that
  26. * needs processing and if so call out to the driver to initialize hardware
  27. * and handle each request.
  28. */
  29. static void crypto_pump_requests(struct crypto_engine *engine,
  30. bool in_kthread)
  31. {
  32. struct crypto_async_request *async_req, *backlog;
  33. struct ablkcipher_request *req;
  34. unsigned long flags;
  35. bool was_busy = false;
  36. int ret;
  37. spin_lock_irqsave(&engine->queue_lock, flags);
  38. /* Make sure we are not already running a request */
  39. if (engine->cur_req)
  40. goto out;
  41. /* If another context is idling then defer */
  42. if (engine->idling) {
  43. queue_kthread_work(&engine->kworker, &engine->pump_requests);
  44. goto out;
  45. }
  46. /* Check if the engine queue is idle */
  47. if (!crypto_queue_len(&engine->queue) || !engine->running) {
  48. if (!engine->busy)
  49. goto out;
  50. /* Only do teardown in the thread */
  51. if (!in_kthread) {
  52. queue_kthread_work(&engine->kworker,
  53. &engine->pump_requests);
  54. goto out;
  55. }
  56. engine->busy = false;
  57. engine->idling = true;
  58. spin_unlock_irqrestore(&engine->queue_lock, flags);
  59. if (engine->unprepare_crypt_hardware &&
  60. engine->unprepare_crypt_hardware(engine))
  61. pr_err("failed to unprepare crypt hardware\n");
  62. spin_lock_irqsave(&engine->queue_lock, flags);
  63. engine->idling = false;
  64. goto out;
  65. }
  66. /* Get the fist request from the engine queue to handle */
  67. backlog = crypto_get_backlog(&engine->queue);
  68. async_req = crypto_dequeue_request(&engine->queue);
  69. if (!async_req)
  70. goto out;
  71. req = ablkcipher_request_cast(async_req);
  72. engine->cur_req = req;
  73. if (backlog)
  74. backlog->complete(backlog, -EINPROGRESS);
  75. if (engine->busy)
  76. was_busy = true;
  77. else
  78. engine->busy = true;
  79. spin_unlock_irqrestore(&engine->queue_lock, flags);
  80. /* Until here we get the request need to be encrypted successfully */
  81. if (!was_busy && engine->prepare_crypt_hardware) {
  82. ret = engine->prepare_crypt_hardware(engine);
  83. if (ret) {
  84. pr_err("failed to prepare crypt hardware\n");
  85. goto req_err;
  86. }
  87. }
  88. if (engine->prepare_request) {
  89. ret = engine->prepare_request(engine, engine->cur_req);
  90. if (ret) {
  91. pr_err("failed to prepare request: %d\n", ret);
  92. goto req_err;
  93. }
  94. engine->cur_req_prepared = true;
  95. }
  96. ret = engine->crypt_one_request(engine, engine->cur_req);
  97. if (ret) {
  98. pr_err("failed to crypt one request from queue\n");
  99. goto req_err;
  100. }
  101. return;
  102. req_err:
  103. crypto_finalize_request(engine, engine->cur_req, ret);
  104. return;
  105. out:
  106. spin_unlock_irqrestore(&engine->queue_lock, flags);
  107. }
  108. static void crypto_pump_work(struct kthread_work *work)
  109. {
  110. struct crypto_engine *engine =
  111. container_of(work, struct crypto_engine, pump_requests);
  112. crypto_pump_requests(engine, true);
  113. }
  114. /**
  115. * crypto_transfer_request - transfer the new request into the engine queue
  116. * @engine: the hardware engine
  117. * @req: the request need to be listed into the engine queue
  118. */
  119. int crypto_transfer_request(struct crypto_engine *engine,
  120. struct ablkcipher_request *req, bool need_pump)
  121. {
  122. unsigned long flags;
  123. int ret;
  124. spin_lock_irqsave(&engine->queue_lock, flags);
  125. if (!engine->running) {
  126. spin_unlock_irqrestore(&engine->queue_lock, flags);
  127. return -ESHUTDOWN;
  128. }
  129. ret = ablkcipher_enqueue_request(&engine->queue, req);
  130. if (!engine->busy && need_pump)
  131. queue_kthread_work(&engine->kworker, &engine->pump_requests);
  132. spin_unlock_irqrestore(&engine->queue_lock, flags);
  133. return ret;
  134. }
  135. EXPORT_SYMBOL_GPL(crypto_transfer_request);
  136. /**
  137. * crypto_transfer_request_to_engine - transfer one request to list into the
  138. * engine queue
  139. * @engine: the hardware engine
  140. * @req: the request need to be listed into the engine queue
  141. */
  142. int crypto_transfer_request_to_engine(struct crypto_engine *engine,
  143. struct ablkcipher_request *req)
  144. {
  145. return crypto_transfer_request(engine, req, true);
  146. }
  147. EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
  148. /**
  149. * crypto_finalize_request - finalize one request if the request is done
  150. * @engine: the hardware engine
  151. * @req: the request need to be finalized
  152. * @err: error number
  153. */
  154. void crypto_finalize_request(struct crypto_engine *engine,
  155. struct ablkcipher_request *req, int err)
  156. {
  157. unsigned long flags;
  158. bool finalize_cur_req = false;
  159. int ret;
  160. spin_lock_irqsave(&engine->queue_lock, flags);
  161. if (engine->cur_req == req)
  162. finalize_cur_req = true;
  163. spin_unlock_irqrestore(&engine->queue_lock, flags);
  164. if (finalize_cur_req) {
  165. if (engine->cur_req_prepared && engine->unprepare_request) {
  166. ret = engine->unprepare_request(engine, req);
  167. if (ret)
  168. pr_err("failed to unprepare request\n");
  169. }
  170. spin_lock_irqsave(&engine->queue_lock, flags);
  171. engine->cur_req = NULL;
  172. engine->cur_req_prepared = false;
  173. spin_unlock_irqrestore(&engine->queue_lock, flags);
  174. }
  175. req->base.complete(&req->base, err);
  176. queue_kthread_work(&engine->kworker, &engine->pump_requests);
  177. }
  178. EXPORT_SYMBOL_GPL(crypto_finalize_request);
  179. /**
  180. * crypto_engine_start - start the hardware engine
  181. * @engine: the hardware engine need to be started
  182. *
  183. * Return 0 on success, else on fail.
  184. */
  185. int crypto_engine_start(struct crypto_engine *engine)
  186. {
  187. unsigned long flags;
  188. spin_lock_irqsave(&engine->queue_lock, flags);
  189. if (engine->running || engine->busy) {
  190. spin_unlock_irqrestore(&engine->queue_lock, flags);
  191. return -EBUSY;
  192. }
  193. engine->running = true;
  194. spin_unlock_irqrestore(&engine->queue_lock, flags);
  195. queue_kthread_work(&engine->kworker, &engine->pump_requests);
  196. return 0;
  197. }
  198. EXPORT_SYMBOL_GPL(crypto_engine_start);
  199. /**
  200. * crypto_engine_stop - stop the hardware engine
  201. * @engine: the hardware engine need to be stopped
  202. *
  203. * Return 0 on success, else on fail.
  204. */
  205. int crypto_engine_stop(struct crypto_engine *engine)
  206. {
  207. unsigned long flags;
  208. unsigned limit = 500;
  209. int ret = 0;
  210. spin_lock_irqsave(&engine->queue_lock, flags);
  211. /*
  212. * If the engine queue is not empty or the engine is on busy state,
  213. * we need to wait for a while to pump the requests of engine queue.
  214. */
  215. while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
  216. spin_unlock_irqrestore(&engine->queue_lock, flags);
  217. msleep(20);
  218. spin_lock_irqsave(&engine->queue_lock, flags);
  219. }
  220. if (crypto_queue_len(&engine->queue) || engine->busy)
  221. ret = -EBUSY;
  222. else
  223. engine->running = false;
  224. spin_unlock_irqrestore(&engine->queue_lock, flags);
  225. if (ret)
  226. pr_warn("could not stop engine\n");
  227. return ret;
  228. }
  229. EXPORT_SYMBOL_GPL(crypto_engine_stop);
  230. /**
  231. * crypto_engine_alloc_init - allocate crypto hardware engine structure and
  232. * initialize it.
  233. * @dev: the device attached with one hardware engine
  234. * @rt: whether this queue is set to run as a realtime task
  235. *
  236. * This must be called from context that can sleep.
  237. * Return: the crypto engine structure on success, else NULL.
  238. */
  239. struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
  240. {
  241. struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  242. struct crypto_engine *engine;
  243. if (!dev)
  244. return NULL;
  245. engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
  246. if (!engine)
  247. return NULL;
  248. engine->rt = rt;
  249. engine->running = false;
  250. engine->busy = false;
  251. engine->idling = false;
  252. engine->cur_req_prepared = false;
  253. engine->priv_data = dev;
  254. snprintf(engine->name, sizeof(engine->name),
  255. "%s-engine", dev_name(dev));
  256. crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
  257. spin_lock_init(&engine->queue_lock);
  258. init_kthread_worker(&engine->kworker);
  259. engine->kworker_task = kthread_run(kthread_worker_fn,
  260. &engine->kworker, "%s",
  261. engine->name);
  262. if (IS_ERR(engine->kworker_task)) {
  263. dev_err(dev, "failed to create crypto request pump task\n");
  264. return NULL;
  265. }
  266. init_kthread_work(&engine->pump_requests, crypto_pump_work);
  267. if (engine->rt) {
  268. dev_info(dev, "will run requests pump with realtime priority\n");
  269. sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
  270. }
  271. return engine;
  272. }
  273. EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
  274. /**
  275. * crypto_engine_exit - free the resources of hardware engine when exit
  276. * @engine: the hardware engine need to be freed
  277. *
  278. * Return 0 for success.
  279. */
  280. int crypto_engine_exit(struct crypto_engine *engine)
  281. {
  282. int ret;
  283. ret = crypto_engine_stop(engine);
  284. if (ret)
  285. return ret;
  286. flush_kthread_worker(&engine->kworker);
  287. kthread_stop(engine->kworker_task);
  288. return 0;
  289. }
  290. EXPORT_SYMBOL_GPL(crypto_engine_exit);
  291. MODULE_LICENSE("GPL");
  292. MODULE_DESCRIPTION("Crypto hardware engine framework");