ccp-dev.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2013 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/kthread.h>
  15. #include <linux/sched.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/mutex.h>
  19. #include <linux/delay.h>
  20. #include <linux/hw_random.h>
  21. #include <linux/cpu.h>
  22. #include <asm/cpu_device_id.h>
  23. #include <linux/ccp.h>
  24. #include "ccp-dev.h"
  25. MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
  26. MODULE_LICENSE("GPL");
  27. MODULE_VERSION("1.0.0");
  28. MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
  29. static struct ccp_device *ccp_dev;
  30. static inline struct ccp_device *ccp_get_device(void)
  31. {
  32. return ccp_dev;
  33. }
  34. static inline void ccp_add_device(struct ccp_device *ccp)
  35. {
  36. ccp_dev = ccp;
  37. }
  38. static inline void ccp_del_device(struct ccp_device *ccp)
  39. {
  40. ccp_dev = NULL;
  41. }
  42. /**
  43. * ccp_enqueue_cmd - queue an operation for processing by the CCP
  44. *
  45. * @cmd: ccp_cmd struct to be processed
  46. *
  47. * Queue a cmd to be processed by the CCP. If queueing the cmd
  48. * would exceed the defined length of the cmd queue the cmd will
  49. * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
  50. * result in a return code of -EBUSY.
  51. *
  52. * The callback routine specified in the ccp_cmd struct will be
  53. * called to notify the caller of completion (if the cmd was not
  54. * backlogged) or advancement out of the backlog. If the cmd has
  55. * advanced out of the backlog the "err" value of the callback
  56. * will be -EINPROGRESS. Any other "err" value during callback is
  57. * the result of the operation.
  58. *
  59. * The cmd has been successfully queued if:
  60. * the return code is -EINPROGRESS or
  61. * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
  62. */
  63. int ccp_enqueue_cmd(struct ccp_cmd *cmd)
  64. {
  65. struct ccp_device *ccp = ccp_get_device();
  66. unsigned long flags;
  67. unsigned int i;
  68. int ret;
  69. if (!ccp)
  70. return -ENODEV;
  71. /* Caller must supply a callback routine */
  72. if (!cmd->callback)
  73. return -EINVAL;
  74. cmd->ccp = ccp;
  75. spin_lock_irqsave(&ccp->cmd_lock, flags);
  76. i = ccp->cmd_q_count;
  77. if (ccp->cmd_count >= MAX_CMD_QLEN) {
  78. ret = -EBUSY;
  79. if (cmd->flags & CCP_CMD_MAY_BACKLOG)
  80. list_add_tail(&cmd->entry, &ccp->backlog);
  81. } else {
  82. ret = -EINPROGRESS;
  83. ccp->cmd_count++;
  84. list_add_tail(&cmd->entry, &ccp->cmd);
  85. /* Find an idle queue */
  86. if (!ccp->suspending) {
  87. for (i = 0; i < ccp->cmd_q_count; i++) {
  88. if (ccp->cmd_q[i].active)
  89. continue;
  90. break;
  91. }
  92. }
  93. }
  94. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  95. /* If we found an idle queue, wake it up */
  96. if (i < ccp->cmd_q_count)
  97. wake_up_process(ccp->cmd_q[i].kthread);
  98. return ret;
  99. }
  100. EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
  101. static void ccp_do_cmd_backlog(struct work_struct *work)
  102. {
  103. struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
  104. struct ccp_device *ccp = cmd->ccp;
  105. unsigned long flags;
  106. unsigned int i;
  107. cmd->callback(cmd->data, -EINPROGRESS);
  108. spin_lock_irqsave(&ccp->cmd_lock, flags);
  109. ccp->cmd_count++;
  110. list_add_tail(&cmd->entry, &ccp->cmd);
  111. /* Find an idle queue */
  112. for (i = 0; i < ccp->cmd_q_count; i++) {
  113. if (ccp->cmd_q[i].active)
  114. continue;
  115. break;
  116. }
  117. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  118. /* If we found an idle queue, wake it up */
  119. if (i < ccp->cmd_q_count)
  120. wake_up_process(ccp->cmd_q[i].kthread);
  121. }
  122. static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
  123. {
  124. struct ccp_device *ccp = cmd_q->ccp;
  125. struct ccp_cmd *cmd = NULL;
  126. struct ccp_cmd *backlog = NULL;
  127. unsigned long flags;
  128. spin_lock_irqsave(&ccp->cmd_lock, flags);
  129. cmd_q->active = 0;
  130. if (ccp->suspending) {
  131. cmd_q->suspended = 1;
  132. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  133. wake_up_interruptible(&ccp->suspend_queue);
  134. return NULL;
  135. }
  136. if (ccp->cmd_count) {
  137. cmd_q->active = 1;
  138. cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
  139. list_del(&cmd->entry);
  140. ccp->cmd_count--;
  141. }
  142. if (!list_empty(&ccp->backlog)) {
  143. backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
  144. entry);
  145. list_del(&backlog->entry);
  146. }
  147. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  148. if (backlog) {
  149. INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
  150. schedule_work(&backlog->work);
  151. }
  152. return cmd;
  153. }
  154. static void ccp_do_cmd_complete(struct work_struct *work)
  155. {
  156. struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
  157. cmd->callback(cmd->data, cmd->ret);
  158. }
  159. static int ccp_cmd_queue_thread(void *data)
  160. {
  161. struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
  162. struct ccp_cmd *cmd;
  163. set_current_state(TASK_INTERRUPTIBLE);
  164. while (!kthread_should_stop()) {
  165. schedule();
  166. set_current_state(TASK_INTERRUPTIBLE);
  167. cmd = ccp_dequeue_cmd(cmd_q);
  168. if (!cmd)
  169. continue;
  170. __set_current_state(TASK_RUNNING);
  171. /* Execute the command */
  172. cmd->ret = ccp_run_cmd(cmd_q, cmd);
  173. /* Schedule the completion callback */
  174. INIT_WORK(&cmd->work, ccp_do_cmd_complete);
  175. schedule_work(&cmd->work);
  176. }
  177. __set_current_state(TASK_RUNNING);
  178. return 0;
  179. }
  180. static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
  181. {
  182. struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
  183. u32 trng_value;
  184. int len = min_t(int, sizeof(trng_value), max);
  185. /*
  186. * Locking is provided by the caller so we can update device
  187. * hwrng-related fields safely
  188. */
  189. trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
  190. if (!trng_value) {
  191. /* Zero is returned if not data is available or if a
  192. * bad-entropy error is present. Assume an error if
  193. * we exceed TRNG_RETRIES reads of zero.
  194. */
  195. if (ccp->hwrng_retries++ > TRNG_RETRIES)
  196. return -EIO;
  197. return 0;
  198. }
  199. /* Reset the counter and save the rng value */
  200. ccp->hwrng_retries = 0;
  201. memcpy(data, &trng_value, len);
  202. return len;
  203. }
  204. /**
  205. * ccp_alloc_struct - allocate and initialize the ccp_device struct
  206. *
  207. * @dev: device struct of the CCP
  208. */
  209. struct ccp_device *ccp_alloc_struct(struct device *dev)
  210. {
  211. struct ccp_device *ccp;
  212. ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
  213. if (ccp == NULL) {
  214. dev_err(dev, "unable to allocate device struct\n");
  215. return NULL;
  216. }
  217. ccp->dev = dev;
  218. INIT_LIST_HEAD(&ccp->cmd);
  219. INIT_LIST_HEAD(&ccp->backlog);
  220. spin_lock_init(&ccp->cmd_lock);
  221. mutex_init(&ccp->req_mutex);
  222. mutex_init(&ccp->ksb_mutex);
  223. ccp->ksb_count = KSB_COUNT;
  224. ccp->ksb_start = 0;
  225. return ccp;
  226. }
  227. /**
  228. * ccp_init - initialize the CCP device
  229. *
  230. * @ccp: ccp_device struct
  231. */
  232. int ccp_init(struct ccp_device *ccp)
  233. {
  234. struct device *dev = ccp->dev;
  235. struct ccp_cmd_queue *cmd_q;
  236. struct dma_pool *dma_pool;
  237. char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
  238. unsigned int qmr, qim, i;
  239. int ret;
  240. /* Find available queues */
  241. qim = 0;
  242. qmr = ioread32(ccp->io_regs + Q_MASK_REG);
  243. for (i = 0; i < MAX_HW_QUEUES; i++) {
  244. if (!(qmr & (1 << i)))
  245. continue;
  246. /* Allocate a dma pool for this queue */
  247. snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
  248. dma_pool = dma_pool_create(dma_pool_name, dev,
  249. CCP_DMAPOOL_MAX_SIZE,
  250. CCP_DMAPOOL_ALIGN, 0);
  251. if (!dma_pool) {
  252. dev_err(dev, "unable to allocate dma pool\n");
  253. ret = -ENOMEM;
  254. goto e_pool;
  255. }
  256. cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
  257. ccp->cmd_q_count++;
  258. cmd_q->ccp = ccp;
  259. cmd_q->id = i;
  260. cmd_q->dma_pool = dma_pool;
  261. /* Reserve 2 KSB regions for the queue */
  262. cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
  263. cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
  264. ccp->ksb_count -= 2;
  265. /* Preset some register values and masks that are queue
  266. * number dependent
  267. */
  268. cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
  269. (CMD_Q_STATUS_INCR * i);
  270. cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
  271. (CMD_Q_STATUS_INCR * i);
  272. cmd_q->int_ok = 1 << (i * 2);
  273. cmd_q->int_err = 1 << ((i * 2) + 1);
  274. cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
  275. init_waitqueue_head(&cmd_q->int_queue);
  276. /* Build queue interrupt mask (two interrupts per queue) */
  277. qim |= cmd_q->int_ok | cmd_q->int_err;
  278. dev_dbg(dev, "queue #%u available\n", i);
  279. }
  280. if (ccp->cmd_q_count == 0) {
  281. dev_notice(dev, "no command queues available\n");
  282. ret = -EIO;
  283. goto e_pool;
  284. }
  285. dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
  286. /* Disable and clear interrupts until ready */
  287. iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
  288. for (i = 0; i < ccp->cmd_q_count; i++) {
  289. cmd_q = &ccp->cmd_q[i];
  290. ioread32(cmd_q->reg_int_status);
  291. ioread32(cmd_q->reg_status);
  292. }
  293. iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
  294. /* Request an irq */
  295. ret = ccp->get_irq(ccp);
  296. if (ret) {
  297. dev_err(dev, "unable to allocate an IRQ\n");
  298. goto e_pool;
  299. }
  300. /* Initialize the queues used to wait for KSB space and suspend */
  301. init_waitqueue_head(&ccp->ksb_queue);
  302. init_waitqueue_head(&ccp->suspend_queue);
  303. /* Create a kthread for each queue */
  304. for (i = 0; i < ccp->cmd_q_count; i++) {
  305. struct task_struct *kthread;
  306. cmd_q = &ccp->cmd_q[i];
  307. kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
  308. "ccp-q%u", cmd_q->id);
  309. if (IS_ERR(kthread)) {
  310. dev_err(dev, "error creating queue thread (%ld)\n",
  311. PTR_ERR(kthread));
  312. ret = PTR_ERR(kthread);
  313. goto e_kthread;
  314. }
  315. cmd_q->kthread = kthread;
  316. wake_up_process(kthread);
  317. }
  318. /* Register the RNG */
  319. ccp->hwrng.name = "ccp-rng";
  320. ccp->hwrng.read = ccp_trng_read;
  321. ret = hwrng_register(&ccp->hwrng);
  322. if (ret) {
  323. dev_err(dev, "error registering hwrng (%d)\n", ret);
  324. goto e_kthread;
  325. }
  326. /* Make the device struct available before enabling interrupts */
  327. ccp_add_device(ccp);
  328. /* Enable interrupts */
  329. iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
  330. return 0;
  331. e_kthread:
  332. for (i = 0; i < ccp->cmd_q_count; i++)
  333. if (ccp->cmd_q[i].kthread)
  334. kthread_stop(ccp->cmd_q[i].kthread);
  335. ccp->free_irq(ccp);
  336. e_pool:
  337. for (i = 0; i < ccp->cmd_q_count; i++)
  338. dma_pool_destroy(ccp->cmd_q[i].dma_pool);
  339. return ret;
  340. }
  341. /**
  342. * ccp_destroy - tear down the CCP device
  343. *
  344. * @ccp: ccp_device struct
  345. */
  346. void ccp_destroy(struct ccp_device *ccp)
  347. {
  348. struct ccp_cmd_queue *cmd_q;
  349. struct ccp_cmd *cmd;
  350. unsigned int qim, i;
  351. /* Remove general access to the device struct */
  352. ccp_del_device(ccp);
  353. /* Unregister the RNG */
  354. hwrng_unregister(&ccp->hwrng);
  355. /* Stop the queue kthreads */
  356. for (i = 0; i < ccp->cmd_q_count; i++)
  357. if (ccp->cmd_q[i].kthread)
  358. kthread_stop(ccp->cmd_q[i].kthread);
  359. /* Build queue interrupt mask (two interrupt masks per queue) */
  360. qim = 0;
  361. for (i = 0; i < ccp->cmd_q_count; i++) {
  362. cmd_q = &ccp->cmd_q[i];
  363. qim |= cmd_q->int_ok | cmd_q->int_err;
  364. }
  365. /* Disable and clear interrupts */
  366. iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
  367. for (i = 0; i < ccp->cmd_q_count; i++) {
  368. cmd_q = &ccp->cmd_q[i];
  369. ioread32(cmd_q->reg_int_status);
  370. ioread32(cmd_q->reg_status);
  371. }
  372. iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
  373. ccp->free_irq(ccp);
  374. for (i = 0; i < ccp->cmd_q_count; i++)
  375. dma_pool_destroy(ccp->cmd_q[i].dma_pool);
  376. /* Flush the cmd and backlog queue */
  377. while (!list_empty(&ccp->cmd)) {
  378. /* Invoke the callback directly with an error code */
  379. cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
  380. list_del(&cmd->entry);
  381. cmd->callback(cmd->data, -ENODEV);
  382. }
  383. while (!list_empty(&ccp->backlog)) {
  384. /* Invoke the callback directly with an error code */
  385. cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
  386. list_del(&cmd->entry);
  387. cmd->callback(cmd->data, -ENODEV);
  388. }
  389. }
  390. /**
  391. * ccp_irq_handler - handle interrupts generated by the CCP device
  392. *
  393. * @irq: the irq associated with the interrupt
  394. * @data: the data value supplied when the irq was created
  395. */
  396. irqreturn_t ccp_irq_handler(int irq, void *data)
  397. {
  398. struct device *dev = data;
  399. struct ccp_device *ccp = dev_get_drvdata(dev);
  400. struct ccp_cmd_queue *cmd_q;
  401. u32 q_int, status;
  402. unsigned int i;
  403. status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
  404. for (i = 0; i < ccp->cmd_q_count; i++) {
  405. cmd_q = &ccp->cmd_q[i];
  406. q_int = status & (cmd_q->int_ok | cmd_q->int_err);
  407. if (q_int) {
  408. cmd_q->int_status = status;
  409. cmd_q->q_status = ioread32(cmd_q->reg_status);
  410. cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
  411. /* On error, only save the first error value */
  412. if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
  413. cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
  414. cmd_q->int_rcvd = 1;
  415. /* Acknowledge the interrupt and wake the kthread */
  416. iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
  417. wake_up_interruptible(&cmd_q->int_queue);
  418. }
  419. }
  420. return IRQ_HANDLED;
  421. }
  422. #ifdef CONFIG_PM
  423. bool ccp_queues_suspended(struct ccp_device *ccp)
  424. {
  425. unsigned int suspended = 0;
  426. unsigned long flags;
  427. unsigned int i;
  428. spin_lock_irqsave(&ccp->cmd_lock, flags);
  429. for (i = 0; i < ccp->cmd_q_count; i++)
  430. if (ccp->cmd_q[i].suspended)
  431. suspended++;
  432. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  433. return ccp->cmd_q_count == suspended;
  434. }
  435. #endif
  436. static const struct x86_cpu_id ccp_support[] = {
  437. { X86_VENDOR_AMD, 22, },
  438. };
  439. static int __init ccp_mod_init(void)
  440. {
  441. struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
  442. int ret;
  443. if (!x86_match_cpu(ccp_support))
  444. return -ENODEV;
  445. switch (cpuinfo->x86) {
  446. case 22:
  447. if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
  448. return -ENODEV;
  449. ret = ccp_pci_init();
  450. if (ret)
  451. return ret;
  452. /* Don't leave the driver loaded if init failed */
  453. if (!ccp_get_device()) {
  454. ccp_pci_exit();
  455. return -ENODEV;
  456. }
  457. return 0;
  458. break;
  459. }
  460. return -ENODEV;
  461. }
  462. static void __exit ccp_mod_exit(void)
  463. {
  464. struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
  465. switch (cpuinfo->x86) {
  466. case 22:
  467. ccp_pci_exit();
  468. break;
  469. }
  470. }
  471. module_init(ccp_mod_init);
  472. module_exit(ccp_mod_exit);