ccp-crypto-main.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) crypto API support
  3. *
  4. * Copyright (C) 2013 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/list.h>
  15. #include <linux/ccp.h>
  16. #include <linux/scatterlist.h>
  17. #include <crypto/internal/hash.h>
  18. #include "ccp-crypto.h"
  19. MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
  20. MODULE_LICENSE("GPL");
  21. MODULE_VERSION("1.0.0");
  22. MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
  23. /* List heads for the supported algorithms */
  24. static LIST_HEAD(hash_algs);
  25. static LIST_HEAD(cipher_algs);
  26. /* For any tfm, requests for that tfm on the same CPU must be returned
  27. * in the order received. With multiple queues available, the CCP can
  28. * process more than one cmd at a time. Therefore we must maintain
  29. * a cmd list to insure the proper ordering of requests on a given tfm/cpu
  30. * combination.
  31. */
  32. struct ccp_crypto_cpu_queue {
  33. struct list_head cmds;
  34. struct list_head *backlog;
  35. unsigned int cmd_count;
  36. };
  37. #define CCP_CRYPTO_MAX_QLEN 50
  38. struct ccp_crypto_percpu_queue {
  39. struct ccp_crypto_cpu_queue __percpu *cpu_queue;
  40. };
  41. static struct ccp_crypto_percpu_queue req_queue;
  42. struct ccp_crypto_cmd {
  43. struct list_head entry;
  44. struct ccp_cmd *cmd;
  45. /* Save the crypto_tfm and crypto_async_request addresses
  46. * separately to avoid any reference to a possibly invalid
  47. * crypto_async_request structure after invoking the request
  48. * callback
  49. */
  50. struct crypto_async_request *req;
  51. struct crypto_tfm *tfm;
  52. /* Used for held command processing to determine state */
  53. int ret;
  54. int cpu;
  55. };
  56. struct ccp_crypto_cpu {
  57. struct work_struct work;
  58. struct completion completion;
  59. struct ccp_crypto_cmd *crypto_cmd;
  60. int err;
  61. };
  62. static inline bool ccp_crypto_success(int err)
  63. {
  64. if (err && (err != -EINPROGRESS) && (err != -EBUSY))
  65. return false;
  66. return true;
  67. }
  68. /*
  69. * ccp_crypto_cmd_complete must be called while running on the appropriate
  70. * cpu and the caller must have done a get_cpu to disable preemption
  71. */
  72. static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
  73. struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
  74. {
  75. struct ccp_crypto_cpu_queue *cpu_queue;
  76. struct ccp_crypto_cmd *held = NULL, *tmp;
  77. *backlog = NULL;
  78. cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
  79. /* Held cmds will be after the current cmd in the queue so start
  80. * searching for a cmd with a matching tfm for submission.
  81. */
  82. tmp = crypto_cmd;
  83. list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
  84. if (crypto_cmd->tfm != tmp->tfm)
  85. continue;
  86. held = tmp;
  87. break;
  88. }
  89. /* Process the backlog:
  90. * Because cmds can be executed from any point in the cmd list
  91. * special precautions have to be taken when handling the backlog.
  92. */
  93. if (cpu_queue->backlog != &cpu_queue->cmds) {
  94. /* Skip over this cmd if it is the next backlog cmd */
  95. if (cpu_queue->backlog == &crypto_cmd->entry)
  96. cpu_queue->backlog = crypto_cmd->entry.next;
  97. *backlog = container_of(cpu_queue->backlog,
  98. struct ccp_crypto_cmd, entry);
  99. cpu_queue->backlog = cpu_queue->backlog->next;
  100. /* Skip over this cmd if it is now the next backlog cmd */
  101. if (cpu_queue->backlog == &crypto_cmd->entry)
  102. cpu_queue->backlog = crypto_cmd->entry.next;
  103. }
  104. /* Remove the cmd entry from the list of cmds */
  105. cpu_queue->cmd_count--;
  106. list_del(&crypto_cmd->entry);
  107. return held;
  108. }
  109. static void ccp_crypto_complete_on_cpu(struct work_struct *work)
  110. {
  111. struct ccp_crypto_cpu *cpu_work =
  112. container_of(work, struct ccp_crypto_cpu, work);
  113. struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
  114. struct ccp_crypto_cmd *held, *next, *backlog;
  115. struct crypto_async_request *req = crypto_cmd->req;
  116. struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
  117. int cpu, ret;
  118. cpu = get_cpu();
  119. if (cpu_work->err == -EINPROGRESS) {
  120. /* Only propogate the -EINPROGRESS if necessary */
  121. if (crypto_cmd->ret == -EBUSY) {
  122. crypto_cmd->ret = -EINPROGRESS;
  123. req->complete(req, -EINPROGRESS);
  124. }
  125. goto e_cpu;
  126. }
  127. /* Operation has completed - update the queue before invoking
  128. * the completion callbacks and retrieve the next cmd (cmd with
  129. * a matching tfm) that can be submitted to the CCP.
  130. */
  131. held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
  132. if (backlog) {
  133. backlog->ret = -EINPROGRESS;
  134. backlog->req->complete(backlog->req, -EINPROGRESS);
  135. }
  136. /* Transition the state from -EBUSY to -EINPROGRESS first */
  137. if (crypto_cmd->ret == -EBUSY)
  138. req->complete(req, -EINPROGRESS);
  139. /* Completion callbacks */
  140. ret = cpu_work->err;
  141. if (ctx->complete)
  142. ret = ctx->complete(req, ret);
  143. req->complete(req, ret);
  144. /* Submit the next cmd */
  145. while (held) {
  146. ret = ccp_enqueue_cmd(held->cmd);
  147. if (ccp_crypto_success(ret))
  148. break;
  149. /* Error occurred, report it and get the next entry */
  150. held->req->complete(held->req, ret);
  151. next = ccp_crypto_cmd_complete(held, &backlog);
  152. if (backlog) {
  153. backlog->ret = -EINPROGRESS;
  154. backlog->req->complete(backlog->req, -EINPROGRESS);
  155. }
  156. kfree(held);
  157. held = next;
  158. }
  159. kfree(crypto_cmd);
  160. e_cpu:
  161. put_cpu();
  162. complete(&cpu_work->completion);
  163. }
  164. static void ccp_crypto_complete(void *data, int err)
  165. {
  166. struct ccp_crypto_cmd *crypto_cmd = data;
  167. struct ccp_crypto_cpu cpu_work;
  168. INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
  169. init_completion(&cpu_work.completion);
  170. cpu_work.crypto_cmd = crypto_cmd;
  171. cpu_work.err = err;
  172. schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
  173. /* Keep the completion call synchronous */
  174. wait_for_completion(&cpu_work.completion);
  175. }
  176. static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
  177. {
  178. struct ccp_crypto_cpu_queue *cpu_queue;
  179. struct ccp_crypto_cmd *active = NULL, *tmp;
  180. int cpu, ret;
  181. cpu = get_cpu();
  182. crypto_cmd->cpu = cpu;
  183. cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
  184. /* Check if the cmd can/should be queued */
  185. if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
  186. ret = -EBUSY;
  187. if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
  188. goto e_cpu;
  189. }
  190. /* Look for an entry with the same tfm. If there is a cmd
  191. * with the same tfm in the list for this cpu then the current
  192. * cmd cannot be submitted to the CCP yet.
  193. */
  194. list_for_each_entry(tmp, &cpu_queue->cmds, entry) {
  195. if (crypto_cmd->tfm != tmp->tfm)
  196. continue;
  197. active = tmp;
  198. break;
  199. }
  200. ret = -EINPROGRESS;
  201. if (!active) {
  202. ret = ccp_enqueue_cmd(crypto_cmd->cmd);
  203. if (!ccp_crypto_success(ret))
  204. goto e_cpu;
  205. }
  206. if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
  207. ret = -EBUSY;
  208. if (cpu_queue->backlog == &cpu_queue->cmds)
  209. cpu_queue->backlog = &crypto_cmd->entry;
  210. }
  211. crypto_cmd->ret = ret;
  212. cpu_queue->cmd_count++;
  213. list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds);
  214. e_cpu:
  215. put_cpu();
  216. return ret;
  217. }
  218. /**
  219. * ccp_crypto_enqueue_request - queue an crypto async request for processing
  220. * by the CCP
  221. *
  222. * @req: crypto_async_request struct to be processed
  223. * @cmd: ccp_cmd struct to be sent to the CCP
  224. */
  225. int ccp_crypto_enqueue_request(struct crypto_async_request *req,
  226. struct ccp_cmd *cmd)
  227. {
  228. struct ccp_crypto_cmd *crypto_cmd;
  229. gfp_t gfp;
  230. int ret;
  231. gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
  232. crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
  233. if (!crypto_cmd)
  234. return -ENOMEM;
  235. /* The tfm pointer must be saved and not referenced from the
  236. * crypto_async_request (req) pointer because it is used after
  237. * completion callback for the request and the req pointer
  238. * might not be valid anymore.
  239. */
  240. crypto_cmd->cmd = cmd;
  241. crypto_cmd->req = req;
  242. crypto_cmd->tfm = req->tfm;
  243. cmd->callback = ccp_crypto_complete;
  244. cmd->data = crypto_cmd;
  245. if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
  246. cmd->flags |= CCP_CMD_MAY_BACKLOG;
  247. else
  248. cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
  249. ret = ccp_crypto_enqueue_cmd(crypto_cmd);
  250. if (!ccp_crypto_success(ret))
  251. kfree(crypto_cmd);
  252. return ret;
  253. }
  254. struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
  255. struct scatterlist *sg_add)
  256. {
  257. struct scatterlist *sg, *sg_last = NULL;
  258. for (sg = table->sgl; sg; sg = sg_next(sg))
  259. if (!sg_page(sg))
  260. break;
  261. BUG_ON(!sg);
  262. for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
  263. sg_set_page(sg, sg_page(sg_add), sg_add->length,
  264. sg_add->offset);
  265. sg_last = sg;
  266. }
  267. BUG_ON(sg_add);
  268. return sg_last;
  269. }
  270. static int ccp_register_algs(void)
  271. {
  272. int ret;
  273. ret = ccp_register_aes_algs(&cipher_algs);
  274. if (ret)
  275. return ret;
  276. ret = ccp_register_aes_cmac_algs(&hash_algs);
  277. if (ret)
  278. return ret;
  279. ret = ccp_register_aes_xts_algs(&cipher_algs);
  280. if (ret)
  281. return ret;
  282. ret = ccp_register_sha_algs(&hash_algs);
  283. if (ret)
  284. return ret;
  285. return 0;
  286. }
  287. static void ccp_unregister_algs(void)
  288. {
  289. struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
  290. struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
  291. list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
  292. crypto_unregister_ahash(&ahash_alg->alg);
  293. list_del(&ahash_alg->entry);
  294. kfree(ahash_alg);
  295. }
  296. list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
  297. crypto_unregister_alg(&ablk_alg->alg);
  298. list_del(&ablk_alg->entry);
  299. kfree(ablk_alg);
  300. }
  301. }
  302. static int ccp_init_queues(void)
  303. {
  304. struct ccp_crypto_cpu_queue *cpu_queue;
  305. int cpu;
  306. req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
  307. if (!req_queue.cpu_queue)
  308. return -ENOMEM;
  309. for_each_possible_cpu(cpu) {
  310. cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
  311. INIT_LIST_HEAD(&cpu_queue->cmds);
  312. cpu_queue->backlog = &cpu_queue->cmds;
  313. cpu_queue->cmd_count = 0;
  314. }
  315. return 0;
  316. }
  317. static void ccp_fini_queue(void)
  318. {
  319. struct ccp_crypto_cpu_queue *cpu_queue;
  320. int cpu;
  321. for_each_possible_cpu(cpu) {
  322. cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
  323. BUG_ON(!list_empty(&cpu_queue->cmds));
  324. }
  325. free_percpu(req_queue.cpu_queue);
  326. }
  327. static int ccp_crypto_init(void)
  328. {
  329. int ret;
  330. ret = ccp_init_queues();
  331. if (ret)
  332. return ret;
  333. ret = ccp_register_algs();
  334. if (ret) {
  335. ccp_unregister_algs();
  336. ccp_fini_queue();
  337. }
  338. return ret;
  339. }
  340. static void ccp_crypto_exit(void)
  341. {
  342. ccp_unregister_algs();
  343. ccp_fini_queue();
  344. }
  345. module_init(ccp_crypto_init);
  346. module_exit(ccp_crypto_exit);