api.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/pci.h>
  10. #include <linux/slab.h>
  11. #include <linux/anon_inodes.h>
  12. #include <linux/file.h>
  13. #include <misc/cxl.h>
  14. #include <linux/fs.h>
  15. #include <asm/pnv-pci.h>
  16. #include <linux/msi.h>
  17. #include "cxl.h"
  18. struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
  19. {
  20. struct address_space *mapping;
  21. struct cxl_afu *afu;
  22. struct cxl_context *ctx;
  23. int rc;
  24. afu = cxl_pci_to_afu(dev);
  25. if (IS_ERR(afu))
  26. return ERR_CAST(afu);
  27. ctx = cxl_context_alloc();
  28. if (IS_ERR(ctx)) {
  29. rc = PTR_ERR(ctx);
  30. goto err_dev;
  31. }
  32. ctx->kernelapi = true;
  33. /*
  34. * Make our own address space since we won't have one from the
  35. * filesystem like the user api has, and even if we do associate a file
  36. * with this context we don't want to use the global anonymous inode's
  37. * address space as that can invalidate unrelated users:
  38. */
  39. mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
  40. if (!mapping) {
  41. rc = -ENOMEM;
  42. goto err_ctx;
  43. }
  44. address_space_init_once(mapping);
  45. /* Make it a slave context. We can promote it later? */
  46. rc = cxl_context_init(ctx, afu, false, mapping);
  47. if (rc)
  48. goto err_mapping;
  49. return ctx;
  50. err_mapping:
  51. kfree(mapping);
  52. err_ctx:
  53. kfree(ctx);
  54. err_dev:
  55. return ERR_PTR(rc);
  56. }
  57. EXPORT_SYMBOL_GPL(cxl_dev_context_init);
  58. struct cxl_context *cxl_get_context(struct pci_dev *dev)
  59. {
  60. return dev->dev.archdata.cxl_ctx;
  61. }
  62. EXPORT_SYMBOL_GPL(cxl_get_context);
  63. int cxl_release_context(struct cxl_context *ctx)
  64. {
  65. if (ctx->status >= STARTED)
  66. return -EBUSY;
  67. cxl_context_free(ctx);
  68. return 0;
  69. }
  70. EXPORT_SYMBOL_GPL(cxl_release_context);
  71. static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
  72. {
  73. __u16 range;
  74. int r;
  75. for (r = 0; r < CXL_IRQ_RANGES; r++) {
  76. range = ctx->irqs.range[r];
  77. if (num < range) {
  78. return ctx->irqs.offset[r] + num;
  79. }
  80. num -= range;
  81. }
  82. return 0;
  83. }
  84. int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
  85. {
  86. if (*ctx == NULL || *afu_irq == 0) {
  87. *afu_irq = 1;
  88. *ctx = cxl_get_context(pdev);
  89. } else {
  90. (*afu_irq)++;
  91. if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
  92. *ctx = list_next_entry(*ctx, extra_irq_contexts);
  93. *afu_irq = 1;
  94. }
  95. }
  96. return cxl_find_afu_irq(*ctx, *afu_irq);
  97. }
  98. /* Exported via cxl_base */
  99. int cxl_set_priv(struct cxl_context *ctx, void *priv)
  100. {
  101. if (!ctx)
  102. return -EINVAL;
  103. ctx->priv = priv;
  104. return 0;
  105. }
  106. EXPORT_SYMBOL_GPL(cxl_set_priv);
  107. void *cxl_get_priv(struct cxl_context *ctx)
  108. {
  109. if (!ctx)
  110. return ERR_PTR(-EINVAL);
  111. return ctx->priv;
  112. }
  113. EXPORT_SYMBOL_GPL(cxl_get_priv);
  114. int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
  115. {
  116. int res;
  117. irq_hw_number_t hwirq;
  118. if (num == 0)
  119. num = ctx->afu->pp_irqs;
  120. res = afu_allocate_irqs(ctx, num);
  121. if (res)
  122. return res;
  123. if (!cpu_has_feature(CPU_FTR_HVMODE)) {
  124. /* In a guest, the PSL interrupt is not multiplexed. It was
  125. * allocated above, and we need to set its handler
  126. */
  127. hwirq = cxl_find_afu_irq(ctx, 0);
  128. if (hwirq)
  129. cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
  130. }
  131. if (ctx->status == STARTED) {
  132. if (cxl_ops->update_ivtes)
  133. cxl_ops->update_ivtes(ctx);
  134. else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
  135. }
  136. return res;
  137. }
  138. EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
  139. void cxl_free_afu_irqs(struct cxl_context *ctx)
  140. {
  141. irq_hw_number_t hwirq;
  142. unsigned int virq;
  143. if (!cpu_has_feature(CPU_FTR_HVMODE)) {
  144. hwirq = cxl_find_afu_irq(ctx, 0);
  145. if (hwirq) {
  146. virq = irq_find_mapping(NULL, hwirq);
  147. if (virq)
  148. cxl_unmap_irq(virq, ctx);
  149. }
  150. }
  151. afu_irq_name_free(ctx);
  152. cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
  153. }
  154. EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
  155. int cxl_map_afu_irq(struct cxl_context *ctx, int num,
  156. irq_handler_t handler, void *cookie, char *name)
  157. {
  158. irq_hw_number_t hwirq;
  159. /*
  160. * Find interrupt we are to register.
  161. */
  162. hwirq = cxl_find_afu_irq(ctx, num);
  163. if (!hwirq)
  164. return -ENOENT;
  165. return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
  166. }
  167. EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
  168. void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
  169. {
  170. irq_hw_number_t hwirq;
  171. unsigned int virq;
  172. hwirq = cxl_find_afu_irq(ctx, num);
  173. if (!hwirq)
  174. return;
  175. virq = irq_find_mapping(NULL, hwirq);
  176. if (virq)
  177. cxl_unmap_irq(virq, cookie);
  178. }
  179. EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
  180. /*
  181. * Start a context
  182. * Code here similar to afu_ioctl_start_work().
  183. */
  184. int cxl_start_context(struct cxl_context *ctx, u64 wed,
  185. struct task_struct *task)
  186. {
  187. int rc = 0;
  188. bool kernel = true;
  189. pr_devel("%s: pe: %i\n", __func__, ctx->pe);
  190. mutex_lock(&ctx->status_mutex);
  191. if (ctx->status == STARTED)
  192. goto out; /* already started */
  193. if (task) {
  194. ctx->pid = get_task_pid(task, PIDTYPE_PID);
  195. ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
  196. kernel = false;
  197. ctx->real_mode = false;
  198. }
  199. cxl_ctx_get();
  200. if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
  201. put_pid(ctx->pid);
  202. cxl_ctx_put();
  203. goto out;
  204. }
  205. ctx->status = STARTED;
  206. out:
  207. mutex_unlock(&ctx->status_mutex);
  208. return rc;
  209. }
  210. EXPORT_SYMBOL_GPL(cxl_start_context);
  211. int cxl_process_element(struct cxl_context *ctx)
  212. {
  213. return ctx->external_pe;
  214. }
  215. EXPORT_SYMBOL_GPL(cxl_process_element);
  216. /* Stop a context. Returns 0 on success, otherwise -Errno */
  217. int cxl_stop_context(struct cxl_context *ctx)
  218. {
  219. return __detach_context(ctx);
  220. }
  221. EXPORT_SYMBOL_GPL(cxl_stop_context);
  222. void cxl_set_master(struct cxl_context *ctx)
  223. {
  224. ctx->master = true;
  225. }
  226. EXPORT_SYMBOL_GPL(cxl_set_master);
  227. int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode)
  228. {
  229. if (ctx->status == STARTED) {
  230. /*
  231. * We could potentially update the PE and issue an update LLCMD
  232. * to support this, but it doesn't seem to have a good use case
  233. * since it's trivial to just create a second kernel context
  234. * with different translation modes, so until someone convinces
  235. * me otherwise:
  236. */
  237. return -EBUSY;
  238. }
  239. ctx->real_mode = real_mode;
  240. return 0;
  241. }
  242. EXPORT_SYMBOL_GPL(cxl_set_translation_mode);
  243. /* wrappers around afu_* file ops which are EXPORTED */
  244. int cxl_fd_open(struct inode *inode, struct file *file)
  245. {
  246. return afu_open(inode, file);
  247. }
  248. EXPORT_SYMBOL_GPL(cxl_fd_open);
  249. int cxl_fd_release(struct inode *inode, struct file *file)
  250. {
  251. return afu_release(inode, file);
  252. }
  253. EXPORT_SYMBOL_GPL(cxl_fd_release);
  254. long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  255. {
  256. return afu_ioctl(file, cmd, arg);
  257. }
  258. EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
  259. int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
  260. {
  261. return afu_mmap(file, vm);
  262. }
  263. EXPORT_SYMBOL_GPL(cxl_fd_mmap);
  264. unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
  265. {
  266. return afu_poll(file, poll);
  267. }
  268. EXPORT_SYMBOL_GPL(cxl_fd_poll);
  269. ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
  270. loff_t *off)
  271. {
  272. return afu_read(file, buf, count, off);
  273. }
  274. EXPORT_SYMBOL_GPL(cxl_fd_read);
  275. #define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
  276. /* Get a struct file and fd for a context and attach the ops */
  277. struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
  278. int *fd)
  279. {
  280. struct file *file;
  281. int rc, flags, fdtmp;
  282. flags = O_RDWR | O_CLOEXEC;
  283. /* This code is similar to anon_inode_getfd() */
  284. rc = get_unused_fd_flags(flags);
  285. if (rc < 0)
  286. return ERR_PTR(rc);
  287. fdtmp = rc;
  288. /*
  289. * Patch the file ops. Needs to be careful that this is rentrant safe.
  290. */
  291. if (fops) {
  292. PATCH_FOPS(open);
  293. PATCH_FOPS(poll);
  294. PATCH_FOPS(read);
  295. PATCH_FOPS(release);
  296. PATCH_FOPS(unlocked_ioctl);
  297. PATCH_FOPS(compat_ioctl);
  298. PATCH_FOPS(mmap);
  299. } else /* use default ops */
  300. fops = (struct file_operations *)&afu_fops;
  301. file = anon_inode_getfile("cxl", fops, ctx, flags);
  302. if (IS_ERR(file))
  303. goto err_fd;
  304. file->f_mapping = ctx->mapping;
  305. *fd = fdtmp;
  306. return file;
  307. err_fd:
  308. put_unused_fd(fdtmp);
  309. return NULL;
  310. }
  311. EXPORT_SYMBOL_GPL(cxl_get_fd);
  312. struct cxl_context *cxl_fops_get_context(struct file *file)
  313. {
  314. return file->private_data;
  315. }
  316. EXPORT_SYMBOL_GPL(cxl_fops_get_context);
  317. void cxl_set_driver_ops(struct cxl_context *ctx,
  318. struct cxl_afu_driver_ops *ops)
  319. {
  320. WARN_ON(!ops->fetch_event || !ops->event_delivered);
  321. atomic_set(&ctx->afu_driver_events, 0);
  322. ctx->afu_driver_ops = ops;
  323. }
  324. EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
  325. void cxl_context_events_pending(struct cxl_context *ctx,
  326. unsigned int new_events)
  327. {
  328. atomic_add(new_events, &ctx->afu_driver_events);
  329. wake_up_all(&ctx->wq);
  330. }
  331. EXPORT_SYMBOL_GPL(cxl_context_events_pending);
  332. int cxl_start_work(struct cxl_context *ctx,
  333. struct cxl_ioctl_start_work *work)
  334. {
  335. int rc;
  336. /* code taken from afu_ioctl_start_work */
  337. if (!(work->flags & CXL_START_WORK_NUM_IRQS))
  338. work->num_interrupts = ctx->afu->pp_irqs;
  339. else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
  340. (work->num_interrupts > ctx->afu->irqs_max)) {
  341. return -EINVAL;
  342. }
  343. rc = afu_register_irqs(ctx, work->num_interrupts);
  344. if (rc)
  345. return rc;
  346. rc = cxl_start_context(ctx, work->work_element_descriptor, current);
  347. if (rc < 0) {
  348. afu_release_irqs(ctx, ctx);
  349. return rc;
  350. }
  351. return 0;
  352. }
  353. EXPORT_SYMBOL_GPL(cxl_start_work);
  354. void __iomem *cxl_psa_map(struct cxl_context *ctx)
  355. {
  356. if (ctx->status != STARTED)
  357. return NULL;
  358. pr_devel("%s: psn_phys%llx size:%llx\n",
  359. __func__, ctx->psn_phys, ctx->psn_size);
  360. return ioremap(ctx->psn_phys, ctx->psn_size);
  361. }
  362. EXPORT_SYMBOL_GPL(cxl_psa_map);
  363. void cxl_psa_unmap(void __iomem *addr)
  364. {
  365. iounmap(addr);
  366. }
  367. EXPORT_SYMBOL_GPL(cxl_psa_unmap);
  368. int cxl_afu_reset(struct cxl_context *ctx)
  369. {
  370. struct cxl_afu *afu = ctx->afu;
  371. int rc;
  372. rc = cxl_ops->afu_reset(afu);
  373. if (rc)
  374. return rc;
  375. return cxl_ops->afu_check_and_enable(afu);
  376. }
  377. EXPORT_SYMBOL_GPL(cxl_afu_reset);
  378. void cxl_perst_reloads_same_image(struct cxl_afu *afu,
  379. bool perst_reloads_same_image)
  380. {
  381. afu->adapter->perst_same_image = perst_reloads_same_image;
  382. }
  383. EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
  384. ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
  385. {
  386. struct cxl_afu *afu = cxl_pci_to_afu(dev);
  387. if (IS_ERR(afu))
  388. return -ENODEV;
  389. return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
  390. }
  391. EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
  392. int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs)
  393. {
  394. struct cxl_afu *afu = cxl_pci_to_afu(dev);
  395. if (IS_ERR(afu))
  396. return -ENODEV;
  397. if (irqs > afu->adapter->user_irqs)
  398. return -EINVAL;
  399. /* Limit user_irqs to prevent the user increasing this via sysfs */
  400. afu->adapter->user_irqs = irqs;
  401. afu->irqs_max = irqs;
  402. return 0;
  403. }
  404. EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process);
  405. int cxl_get_max_irqs_per_process(struct pci_dev *dev)
  406. {
  407. struct cxl_afu *afu = cxl_pci_to_afu(dev);
  408. if (IS_ERR(afu))
  409. return -ENODEV;
  410. return afu->irqs_max;
  411. }
  412. EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process);
  413. /*
  414. * This is a special interrupt allocation routine called from the PHB's MSI
  415. * setup function. When capi interrupts are allocated in this manner they must
  416. * still be associated with a running context, but since the MSI APIs have no
  417. * way to specify this we use the default context associated with the device.
  418. *
  419. * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU
  420. * interrupt number, so in order to overcome this their driver informs us of
  421. * the restriction by setting the maximum interrupts per context, and we
  422. * allocate additional contexts as necessary so that we can keep the AFU
  423. * interrupt number within the supported range.
  424. */
  425. int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  426. {
  427. struct cxl_context *ctx, *new_ctx, *default_ctx;
  428. int remaining;
  429. int rc;
  430. ctx = default_ctx = cxl_get_context(pdev);
  431. if (WARN_ON(!default_ctx))
  432. return -ENODEV;
  433. remaining = nvec;
  434. while (remaining > 0) {
  435. rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max));
  436. if (rc) {
  437. pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev));
  438. return rc;
  439. }
  440. remaining -= ctx->afu->irqs_max;
  441. if (ctx != default_ctx && default_ctx->status == STARTED) {
  442. WARN_ON(cxl_start_context(ctx,
  443. be64_to_cpu(default_ctx->elem->common.wed),
  444. NULL));
  445. }
  446. if (remaining > 0) {
  447. new_ctx = cxl_dev_context_init(pdev);
  448. if (!new_ctx) {
  449. pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
  450. return -ENOSPC;
  451. }
  452. list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts);
  453. ctx = new_ctx;
  454. }
  455. }
  456. return 0;
  457. }
  458. /* Exported via cxl_base */
  459. void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
  460. {
  461. struct cxl_context *ctx, *pos, *tmp;
  462. ctx = cxl_get_context(pdev);
  463. if (WARN_ON(!ctx))
  464. return;
  465. cxl_free_afu_irqs(ctx);
  466. list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) {
  467. cxl_stop_context(pos);
  468. cxl_free_afu_irqs(pos);
  469. list_del(&pos->extra_irq_contexts);
  470. cxl_release_context(pos);
  471. }
  472. }
  473. /* Exported via cxl_base */