ccp-dev.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2013 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/kthread.h>
  15. #include <linux/sched.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/mutex.h>
  19. #include <linux/delay.h>
  20. #include <linux/hw_random.h>
  21. #include <linux/cpu.h>
  22. #include <asm/cpu_device_id.h>
  23. #include <linux/ccp.h>
  24. #include "ccp-dev.h"
  25. MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
  26. MODULE_LICENSE("GPL");
  27. MODULE_VERSION("1.0.0");
  28. MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
  29. struct ccp_tasklet_data {
  30. struct completion completion;
  31. struct ccp_cmd *cmd;
  32. };
  33. static struct ccp_device *ccp_dev;
  34. static inline struct ccp_device *ccp_get_device(void)
  35. {
  36. return ccp_dev;
  37. }
  38. static inline void ccp_add_device(struct ccp_device *ccp)
  39. {
  40. ccp_dev = ccp;
  41. }
  42. static inline void ccp_del_device(struct ccp_device *ccp)
  43. {
  44. ccp_dev = NULL;
  45. }
  46. /**
  47. * ccp_enqueue_cmd - queue an operation for processing by the CCP
  48. *
  49. * @cmd: ccp_cmd struct to be processed
  50. *
  51. * Queue a cmd to be processed by the CCP. If queueing the cmd
  52. * would exceed the defined length of the cmd queue the cmd will
  53. * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
  54. * result in a return code of -EBUSY.
  55. *
  56. * The callback routine specified in the ccp_cmd struct will be
  57. * called to notify the caller of completion (if the cmd was not
  58. * backlogged) or advancement out of the backlog. If the cmd has
  59. * advanced out of the backlog the "err" value of the callback
  60. * will be -EINPROGRESS. Any other "err" value during callback is
  61. * the result of the operation.
  62. *
  63. * The cmd has been successfully queued if:
  64. * the return code is -EINPROGRESS or
  65. * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
  66. */
  67. int ccp_enqueue_cmd(struct ccp_cmd *cmd)
  68. {
  69. struct ccp_device *ccp = ccp_get_device();
  70. unsigned long flags;
  71. unsigned int i;
  72. int ret;
  73. if (!ccp)
  74. return -ENODEV;
  75. /* Caller must supply a callback routine */
  76. if (!cmd->callback)
  77. return -EINVAL;
  78. cmd->ccp = ccp;
  79. spin_lock_irqsave(&ccp->cmd_lock, flags);
  80. i = ccp->cmd_q_count;
  81. if (ccp->cmd_count >= MAX_CMD_QLEN) {
  82. ret = -EBUSY;
  83. if (cmd->flags & CCP_CMD_MAY_BACKLOG)
  84. list_add_tail(&cmd->entry, &ccp->backlog);
  85. } else {
  86. ret = -EINPROGRESS;
  87. ccp->cmd_count++;
  88. list_add_tail(&cmd->entry, &ccp->cmd);
  89. /* Find an idle queue */
  90. if (!ccp->suspending) {
  91. for (i = 0; i < ccp->cmd_q_count; i++) {
  92. if (ccp->cmd_q[i].active)
  93. continue;
  94. break;
  95. }
  96. }
  97. }
  98. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  99. /* If we found an idle queue, wake it up */
  100. if (i < ccp->cmd_q_count)
  101. wake_up_process(ccp->cmd_q[i].kthread);
  102. return ret;
  103. }
  104. EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
  105. static void ccp_do_cmd_backlog(struct work_struct *work)
  106. {
  107. struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
  108. struct ccp_device *ccp = cmd->ccp;
  109. unsigned long flags;
  110. unsigned int i;
  111. cmd->callback(cmd->data, -EINPROGRESS);
  112. spin_lock_irqsave(&ccp->cmd_lock, flags);
  113. ccp->cmd_count++;
  114. list_add_tail(&cmd->entry, &ccp->cmd);
  115. /* Find an idle queue */
  116. for (i = 0; i < ccp->cmd_q_count; i++) {
  117. if (ccp->cmd_q[i].active)
  118. continue;
  119. break;
  120. }
  121. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  122. /* If we found an idle queue, wake it up */
  123. if (i < ccp->cmd_q_count)
  124. wake_up_process(ccp->cmd_q[i].kthread);
  125. }
  126. static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
  127. {
  128. struct ccp_device *ccp = cmd_q->ccp;
  129. struct ccp_cmd *cmd = NULL;
  130. struct ccp_cmd *backlog = NULL;
  131. unsigned long flags;
  132. spin_lock_irqsave(&ccp->cmd_lock, flags);
  133. cmd_q->active = 0;
  134. if (ccp->suspending) {
  135. cmd_q->suspended = 1;
  136. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  137. wake_up_interruptible(&ccp->suspend_queue);
  138. return NULL;
  139. }
  140. if (ccp->cmd_count) {
  141. cmd_q->active = 1;
  142. cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
  143. list_del(&cmd->entry);
  144. ccp->cmd_count--;
  145. }
  146. if (!list_empty(&ccp->backlog)) {
  147. backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
  148. entry);
  149. list_del(&backlog->entry);
  150. }
  151. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  152. if (backlog) {
  153. INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
  154. schedule_work(&backlog->work);
  155. }
  156. return cmd;
  157. }
  158. static void ccp_do_cmd_complete(unsigned long data)
  159. {
  160. struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
  161. struct ccp_cmd *cmd = tdata->cmd;
  162. cmd->callback(cmd->data, cmd->ret);
  163. complete(&tdata->completion);
  164. }
  165. static int ccp_cmd_queue_thread(void *data)
  166. {
  167. struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
  168. struct ccp_cmd *cmd;
  169. struct ccp_tasklet_data tdata;
  170. struct tasklet_struct tasklet;
  171. tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
  172. set_current_state(TASK_INTERRUPTIBLE);
  173. while (!kthread_should_stop()) {
  174. schedule();
  175. set_current_state(TASK_INTERRUPTIBLE);
  176. cmd = ccp_dequeue_cmd(cmd_q);
  177. if (!cmd)
  178. continue;
  179. __set_current_state(TASK_RUNNING);
  180. /* Execute the command */
  181. cmd->ret = ccp_run_cmd(cmd_q, cmd);
  182. /* Schedule the completion callback */
  183. tdata.cmd = cmd;
  184. init_completion(&tdata.completion);
  185. tasklet_schedule(&tasklet);
  186. wait_for_completion(&tdata.completion);
  187. }
  188. __set_current_state(TASK_RUNNING);
  189. return 0;
  190. }
  191. static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
  192. {
  193. struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
  194. u32 trng_value;
  195. int len = min_t(int, sizeof(trng_value), max);
  196. /*
  197. * Locking is provided by the caller so we can update device
  198. * hwrng-related fields safely
  199. */
  200. trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
  201. if (!trng_value) {
  202. /* Zero is returned if not data is available or if a
  203. * bad-entropy error is present. Assume an error if
  204. * we exceed TRNG_RETRIES reads of zero.
  205. */
  206. if (ccp->hwrng_retries++ > TRNG_RETRIES)
  207. return -EIO;
  208. return 0;
  209. }
  210. /* Reset the counter and save the rng value */
  211. ccp->hwrng_retries = 0;
  212. memcpy(data, &trng_value, len);
  213. return len;
  214. }
  215. /**
  216. * ccp_alloc_struct - allocate and initialize the ccp_device struct
  217. *
  218. * @dev: device struct of the CCP
  219. */
  220. struct ccp_device *ccp_alloc_struct(struct device *dev)
  221. {
  222. struct ccp_device *ccp;
  223. ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
  224. if (ccp == NULL) {
  225. dev_err(dev, "unable to allocate device struct\n");
  226. return NULL;
  227. }
  228. ccp->dev = dev;
  229. INIT_LIST_HEAD(&ccp->cmd);
  230. INIT_LIST_HEAD(&ccp->backlog);
  231. spin_lock_init(&ccp->cmd_lock);
  232. mutex_init(&ccp->req_mutex);
  233. mutex_init(&ccp->ksb_mutex);
  234. ccp->ksb_count = KSB_COUNT;
  235. ccp->ksb_start = 0;
  236. return ccp;
  237. }
  238. /**
  239. * ccp_init - initialize the CCP device
  240. *
  241. * @ccp: ccp_device struct
  242. */
  243. int ccp_init(struct ccp_device *ccp)
  244. {
  245. struct device *dev = ccp->dev;
  246. struct ccp_cmd_queue *cmd_q;
  247. struct dma_pool *dma_pool;
  248. char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
  249. unsigned int qmr, qim, i;
  250. int ret;
  251. /* Find available queues */
  252. qim = 0;
  253. qmr = ioread32(ccp->io_regs + Q_MASK_REG);
  254. for (i = 0; i < MAX_HW_QUEUES; i++) {
  255. if (!(qmr & (1 << i)))
  256. continue;
  257. /* Allocate a dma pool for this queue */
  258. snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
  259. dma_pool = dma_pool_create(dma_pool_name, dev,
  260. CCP_DMAPOOL_MAX_SIZE,
  261. CCP_DMAPOOL_ALIGN, 0);
  262. if (!dma_pool) {
  263. dev_err(dev, "unable to allocate dma pool\n");
  264. ret = -ENOMEM;
  265. goto e_pool;
  266. }
  267. cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
  268. ccp->cmd_q_count++;
  269. cmd_q->ccp = ccp;
  270. cmd_q->id = i;
  271. cmd_q->dma_pool = dma_pool;
  272. /* Reserve 2 KSB regions for the queue */
  273. cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
  274. cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
  275. ccp->ksb_count -= 2;
  276. /* Preset some register values and masks that are queue
  277. * number dependent
  278. */
  279. cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
  280. (CMD_Q_STATUS_INCR * i);
  281. cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
  282. (CMD_Q_STATUS_INCR * i);
  283. cmd_q->int_ok = 1 << (i * 2);
  284. cmd_q->int_err = 1 << ((i * 2) + 1);
  285. cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
  286. init_waitqueue_head(&cmd_q->int_queue);
  287. /* Build queue interrupt mask (two interrupts per queue) */
  288. qim |= cmd_q->int_ok | cmd_q->int_err;
  289. dev_dbg(dev, "queue #%u available\n", i);
  290. }
  291. if (ccp->cmd_q_count == 0) {
  292. dev_notice(dev, "no command queues available\n");
  293. ret = -EIO;
  294. goto e_pool;
  295. }
  296. dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
  297. /* Disable and clear interrupts until ready */
  298. iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
  299. for (i = 0; i < ccp->cmd_q_count; i++) {
  300. cmd_q = &ccp->cmd_q[i];
  301. ioread32(cmd_q->reg_int_status);
  302. ioread32(cmd_q->reg_status);
  303. }
  304. iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
  305. /* Request an irq */
  306. ret = ccp->get_irq(ccp);
  307. if (ret) {
  308. dev_err(dev, "unable to allocate an IRQ\n");
  309. goto e_pool;
  310. }
  311. /* Initialize the queues used to wait for KSB space and suspend */
  312. init_waitqueue_head(&ccp->ksb_queue);
  313. init_waitqueue_head(&ccp->suspend_queue);
  314. /* Create a kthread for each queue */
  315. for (i = 0; i < ccp->cmd_q_count; i++) {
  316. struct task_struct *kthread;
  317. cmd_q = &ccp->cmd_q[i];
  318. kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
  319. "ccp-q%u", cmd_q->id);
  320. if (IS_ERR(kthread)) {
  321. dev_err(dev, "error creating queue thread (%ld)\n",
  322. PTR_ERR(kthread));
  323. ret = PTR_ERR(kthread);
  324. goto e_kthread;
  325. }
  326. cmd_q->kthread = kthread;
  327. wake_up_process(kthread);
  328. }
  329. /* Register the RNG */
  330. ccp->hwrng.name = "ccp-rng";
  331. ccp->hwrng.read = ccp_trng_read;
  332. ret = hwrng_register(&ccp->hwrng);
  333. if (ret) {
  334. dev_err(dev, "error registering hwrng (%d)\n", ret);
  335. goto e_kthread;
  336. }
  337. /* Make the device struct available before enabling interrupts */
  338. ccp_add_device(ccp);
  339. /* Enable interrupts */
  340. iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
  341. return 0;
  342. e_kthread:
  343. for (i = 0; i < ccp->cmd_q_count; i++)
  344. if (ccp->cmd_q[i].kthread)
  345. kthread_stop(ccp->cmd_q[i].kthread);
  346. ccp->free_irq(ccp);
  347. e_pool:
  348. for (i = 0; i < ccp->cmd_q_count; i++)
  349. dma_pool_destroy(ccp->cmd_q[i].dma_pool);
  350. return ret;
  351. }
  352. /**
  353. * ccp_destroy - tear down the CCP device
  354. *
  355. * @ccp: ccp_device struct
  356. */
  357. void ccp_destroy(struct ccp_device *ccp)
  358. {
  359. struct ccp_cmd_queue *cmd_q;
  360. struct ccp_cmd *cmd;
  361. unsigned int qim, i;
  362. /* Remove general access to the device struct */
  363. ccp_del_device(ccp);
  364. /* Unregister the RNG */
  365. hwrng_unregister(&ccp->hwrng);
  366. /* Stop the queue kthreads */
  367. for (i = 0; i < ccp->cmd_q_count; i++)
  368. if (ccp->cmd_q[i].kthread)
  369. kthread_stop(ccp->cmd_q[i].kthread);
  370. /* Build queue interrupt mask (two interrupt masks per queue) */
  371. qim = 0;
  372. for (i = 0; i < ccp->cmd_q_count; i++) {
  373. cmd_q = &ccp->cmd_q[i];
  374. qim |= cmd_q->int_ok | cmd_q->int_err;
  375. }
  376. /* Disable and clear interrupts */
  377. iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
  378. for (i = 0; i < ccp->cmd_q_count; i++) {
  379. cmd_q = &ccp->cmd_q[i];
  380. ioread32(cmd_q->reg_int_status);
  381. ioread32(cmd_q->reg_status);
  382. }
  383. iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
  384. ccp->free_irq(ccp);
  385. for (i = 0; i < ccp->cmd_q_count; i++)
  386. dma_pool_destroy(ccp->cmd_q[i].dma_pool);
  387. /* Flush the cmd and backlog queue */
  388. while (!list_empty(&ccp->cmd)) {
  389. /* Invoke the callback directly with an error code */
  390. cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
  391. list_del(&cmd->entry);
  392. cmd->callback(cmd->data, -ENODEV);
  393. }
  394. while (!list_empty(&ccp->backlog)) {
  395. /* Invoke the callback directly with an error code */
  396. cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
  397. list_del(&cmd->entry);
  398. cmd->callback(cmd->data, -ENODEV);
  399. }
  400. }
  401. /**
  402. * ccp_irq_handler - handle interrupts generated by the CCP device
  403. *
  404. * @irq: the irq associated with the interrupt
  405. * @data: the data value supplied when the irq was created
  406. */
  407. irqreturn_t ccp_irq_handler(int irq, void *data)
  408. {
  409. struct device *dev = data;
  410. struct ccp_device *ccp = dev_get_drvdata(dev);
  411. struct ccp_cmd_queue *cmd_q;
  412. u32 q_int, status;
  413. unsigned int i;
  414. status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
  415. for (i = 0; i < ccp->cmd_q_count; i++) {
  416. cmd_q = &ccp->cmd_q[i];
  417. q_int = status & (cmd_q->int_ok | cmd_q->int_err);
  418. if (q_int) {
  419. cmd_q->int_status = status;
  420. cmd_q->q_status = ioread32(cmd_q->reg_status);
  421. cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
  422. /* On error, only save the first error value */
  423. if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
  424. cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
  425. cmd_q->int_rcvd = 1;
  426. /* Acknowledge the interrupt and wake the kthread */
  427. iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
  428. wake_up_interruptible(&cmd_q->int_queue);
  429. }
  430. }
  431. return IRQ_HANDLED;
  432. }
  433. #ifdef CONFIG_PM
  434. bool ccp_queues_suspended(struct ccp_device *ccp)
  435. {
  436. unsigned int suspended = 0;
  437. unsigned long flags;
  438. unsigned int i;
  439. spin_lock_irqsave(&ccp->cmd_lock, flags);
  440. for (i = 0; i < ccp->cmd_q_count; i++)
  441. if (ccp->cmd_q[i].suspended)
  442. suspended++;
  443. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  444. return ccp->cmd_q_count == suspended;
  445. }
  446. #endif
  447. static const struct x86_cpu_id ccp_support[] = {
  448. { X86_VENDOR_AMD, 22, },
  449. };
  450. static int __init ccp_mod_init(void)
  451. {
  452. struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
  453. int ret;
  454. if (!x86_match_cpu(ccp_support))
  455. return -ENODEV;
  456. switch (cpuinfo->x86) {
  457. case 22:
  458. if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
  459. return -ENODEV;
  460. ret = ccp_pci_init();
  461. if (ret)
  462. return ret;
  463. /* Don't leave the driver loaded if init failed */
  464. if (!ccp_get_device()) {
  465. ccp_pci_exit();
  466. return -ENODEV;
  467. }
  468. return 0;
  469. break;
  470. }
  471. return -ENODEV;
  472. }
  473. static void __exit ccp_mod_exit(void)
  474. {
  475. struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
  476. switch (cpuinfo->x86) {
  477. case 22:
  478. ccp_pci_exit();
  479. break;
  480. }
  481. }
  482. module_init(ccp_mod_init);
  483. module_exit(ccp_mod_exit);