link.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. // SPDX-License-Identifier: GPL-2.0+
  2. // Copyright 2017 IBM Corp.
  3. #include <linux/sched/mm.h>
  4. #include <linux/mutex.h>
  5. #include <linux/mmu_context.h>
  6. #include <asm/copro.h>
  7. #include <asm/pnv-ocxl.h>
  8. #include <misc/ocxl.h>
  9. #include "ocxl_internal.h"
  10. #include "trace.h"
  11. #define SPA_PASID_BITS 15
  12. #define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1)
  13. #define SPA_PE_MASK SPA_PASID_MAX
  14. #define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */
  15. #define SPA_CFG_SF (1ull << (63-0))
  16. #define SPA_CFG_TA (1ull << (63-1))
  17. #define SPA_CFG_HV (1ull << (63-3))
  18. #define SPA_CFG_UV (1ull << (63-4))
  19. #define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table (HPT) mode */
  20. #define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */
  21. #define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */
  22. #define SPA_CFG_PR (1ull << (63-49))
  23. #define SPA_CFG_TC (1ull << (63-54))
  24. #define SPA_CFG_DR (1ull << (63-59))
  25. #define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */
  26. #define SPA_XSL_S (1ull << (63-38)) /* Store operation */
  27. #define SPA_PE_VALID 0x80000000
  28. struct pe_data {
  29. struct mm_struct *mm;
  30. /* callback to trigger when a translation fault occurs */
  31. void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr);
  32. /* opaque pointer to be passed to the above callback */
  33. void *xsl_err_data;
  34. struct rcu_head rcu;
  35. };
  36. struct spa {
  37. struct ocxl_process_element *spa_mem;
  38. int spa_order;
  39. struct mutex spa_lock;
  40. struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */
  41. char *irq_name;
  42. int virq;
  43. void __iomem *reg_dsisr;
  44. void __iomem *reg_dar;
  45. void __iomem *reg_tfc;
  46. void __iomem *reg_pe_handle;
  47. /*
  48. * The following field are used by the memory fault
  49. * interrupt handler. We can only have one interrupt at a
  50. * time. The NPU won't raise another interrupt until the
  51. * previous one has been ack'd by writing to the TFC register
  52. */
  53. struct xsl_fault {
  54. struct work_struct fault_work;
  55. u64 pe;
  56. u64 dsisr;
  57. u64 dar;
  58. struct pe_data pe_data;
  59. } xsl_fault;
  60. };
  61. /*
  62. * A opencapi link can be used be by several PCI functions. We have
  63. * one link per device slot.
  64. *
  65. * A linked list of opencapi links should suffice, as there's a
  66. * limited number of opencapi slots on a system and lookup is only
  67. * done when the device is probed
  68. */
  69. struct link {
  70. struct list_head list;
  71. struct kref ref;
  72. int domain;
  73. int bus;
  74. int dev;
  75. atomic_t irq_available;
  76. struct spa *spa;
  77. void *platform_data;
  78. };
  79. static struct list_head links_list = LIST_HEAD_INIT(links_list);
  80. static DEFINE_MUTEX(links_list_lock);
  81. enum xsl_response {
  82. CONTINUE,
  83. ADDRESS_ERROR,
  84. RESTART,
  85. };
  86. static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe)
  87. {
  88. u64 reg;
  89. *dsisr = in_be64(spa->reg_dsisr);
  90. *dar = in_be64(spa->reg_dar);
  91. reg = in_be64(spa->reg_pe_handle);
  92. *pe = reg & SPA_PE_MASK;
  93. }
  94. static void ack_irq(struct spa *spa, enum xsl_response r)
  95. {
  96. u64 reg = 0;
  97. /* continue is not supported */
  98. if (r == RESTART)
  99. reg = PPC_BIT(31);
  100. else if (r == ADDRESS_ERROR)
  101. reg = PPC_BIT(30);
  102. else
  103. WARN(1, "Invalid irq response %d\n", r);
  104. if (reg) {
  105. trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe,
  106. spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg);
  107. out_be64(spa->reg_tfc, reg);
  108. }
  109. }
  110. static void xsl_fault_handler_bh(struct work_struct *fault_work)
  111. {
  112. unsigned int flt = 0;
  113. unsigned long access, flags, inv_flags = 0;
  114. enum xsl_response r;
  115. struct xsl_fault *fault = container_of(fault_work, struct xsl_fault,
  116. fault_work);
  117. struct spa *spa = container_of(fault, struct spa, xsl_fault);
  118. int rc;
  119. /*
  120. * We need to release a reference on the mm whenever exiting this
  121. * function (taken in the memory fault interrupt handler)
  122. */
  123. rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr,
  124. &flt);
  125. if (rc) {
  126. pr_debug("copro_handle_mm_fault failed: %d\n", rc);
  127. if (fault->pe_data.xsl_err_cb) {
  128. fault->pe_data.xsl_err_cb(
  129. fault->pe_data.xsl_err_data,
  130. fault->dar, fault->dsisr);
  131. }
  132. r = ADDRESS_ERROR;
  133. goto ack;
  134. }
  135. if (!radix_enabled()) {
  136. /*
  137. * update_mmu_cache() will not have loaded the hash
  138. * since current->trap is not a 0x400 or 0x300, so
  139. * just call hash_page_mm() here.
  140. */
  141. access = _PAGE_PRESENT | _PAGE_READ;
  142. if (fault->dsisr & SPA_XSL_S)
  143. access |= _PAGE_WRITE;
  144. if (REGION_ID(fault->dar) != USER_REGION_ID)
  145. access |= _PAGE_PRIVILEGED;
  146. local_irq_save(flags);
  147. hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300,
  148. inv_flags);
  149. local_irq_restore(flags);
  150. }
  151. r = RESTART;
  152. ack:
  153. mmdrop(fault->pe_data.mm);
  154. ack_irq(spa, r);
  155. }
  156. static irqreturn_t xsl_fault_handler(int irq, void *data)
  157. {
  158. struct link *link = (struct link *) data;
  159. struct spa *spa = link->spa;
  160. u64 dsisr, dar, pe_handle;
  161. struct pe_data *pe_data;
  162. struct ocxl_process_element *pe;
  163. int lpid, pid, tid;
  164. read_irq(spa, &dsisr, &dar, &pe_handle);
  165. trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1);
  166. WARN_ON(pe_handle > SPA_PE_MASK);
  167. pe = spa->spa_mem + pe_handle;
  168. lpid = be32_to_cpu(pe->lpid);
  169. pid = be32_to_cpu(pe->pid);
  170. tid = be32_to_cpu(pe->tid);
  171. /* We could be reading all null values here if the PE is being
  172. * removed while an interrupt kicks in. It's not supposed to
  173. * happen if the driver notified the AFU to terminate the
  174. * PASID, and the AFU waited for pending operations before
  175. * acknowledging. But even if it happens, we won't find a
  176. * memory context below and fail silently, so it should be ok.
  177. */
  178. if (!(dsisr & SPA_XSL_TF)) {
  179. WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr);
  180. ack_irq(spa, ADDRESS_ERROR);
  181. return IRQ_HANDLED;
  182. }
  183. rcu_read_lock();
  184. pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle);
  185. if (!pe_data) {
  186. /*
  187. * Could only happen if the driver didn't notify the
  188. * AFU about PASID termination before removing the PE,
  189. * or the AFU didn't wait for all memory access to
  190. * have completed.
  191. *
  192. * Either way, we fail early, but we shouldn't log an
  193. * error message, as it is a valid (if unexpected)
  194. * scenario
  195. */
  196. rcu_read_unlock();
  197. pr_debug("Unknown mm context for xsl interrupt\n");
  198. ack_irq(spa, ADDRESS_ERROR);
  199. return IRQ_HANDLED;
  200. }
  201. WARN_ON(pe_data->mm->context.id != pid);
  202. spa->xsl_fault.pe = pe_handle;
  203. spa->xsl_fault.dar = dar;
  204. spa->xsl_fault.dsisr = dsisr;
  205. spa->xsl_fault.pe_data = *pe_data;
  206. mmgrab(pe_data->mm); /* mm count is released by bottom half */
  207. rcu_read_unlock();
  208. schedule_work(&spa->xsl_fault.fault_work);
  209. return IRQ_HANDLED;
  210. }
  211. static void unmap_irq_registers(struct spa *spa)
  212. {
  213. pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc,
  214. spa->reg_pe_handle);
  215. }
  216. static int map_irq_registers(struct pci_dev *dev, struct spa *spa)
  217. {
  218. return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar,
  219. &spa->reg_tfc, &spa->reg_pe_handle);
  220. }
  221. static int setup_xsl_irq(struct pci_dev *dev, struct link *link)
  222. {
  223. struct spa *spa = link->spa;
  224. int rc;
  225. int hwirq;
  226. rc = pnv_ocxl_get_xsl_irq(dev, &hwirq);
  227. if (rc)
  228. return rc;
  229. rc = map_irq_registers(dev, spa);
  230. if (rc)
  231. return rc;
  232. spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x",
  233. link->domain, link->bus, link->dev);
  234. if (!spa->irq_name) {
  235. unmap_irq_registers(spa);
  236. dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n");
  237. return -ENOMEM;
  238. }
  239. /*
  240. * At some point, we'll need to look into allowing a higher
  241. * number of interrupts. Could we have an IRQ domain per link?
  242. */
  243. spa->virq = irq_create_mapping(NULL, hwirq);
  244. if (!spa->virq) {
  245. kfree(spa->irq_name);
  246. unmap_irq_registers(spa);
  247. dev_err(&dev->dev,
  248. "irq_create_mapping failed for translation interrupt\n");
  249. return -EINVAL;
  250. }
  251. dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq);
  252. rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name,
  253. link);
  254. if (rc) {
  255. irq_dispose_mapping(spa->virq);
  256. kfree(spa->irq_name);
  257. unmap_irq_registers(spa);
  258. dev_err(&dev->dev,
  259. "request_irq failed for translation interrupt: %d\n",
  260. rc);
  261. return -EINVAL;
  262. }
  263. return 0;
  264. }
  265. static void release_xsl_irq(struct link *link)
  266. {
  267. struct spa *spa = link->spa;
  268. if (spa->virq) {
  269. free_irq(spa->virq, link);
  270. irq_dispose_mapping(spa->virq);
  271. }
  272. kfree(spa->irq_name);
  273. unmap_irq_registers(spa);
  274. }
  275. static int alloc_spa(struct pci_dev *dev, struct link *link)
  276. {
  277. struct spa *spa;
  278. spa = kzalloc(sizeof(struct spa), GFP_KERNEL);
  279. if (!spa)
  280. return -ENOMEM;
  281. mutex_init(&spa->spa_lock);
  282. INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL);
  283. INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh);
  284. spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT;
  285. spa->spa_mem = (struct ocxl_process_element *)
  286. __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order);
  287. if (!spa->spa_mem) {
  288. dev_err(&dev->dev, "Can't allocate Shared Process Area\n");
  289. kfree(spa);
  290. return -ENOMEM;
  291. }
  292. pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain, link->bus,
  293. link->dev, spa->spa_mem);
  294. link->spa = spa;
  295. return 0;
  296. }
  297. static void free_spa(struct link *link)
  298. {
  299. struct spa *spa = link->spa;
  300. pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus,
  301. link->dev);
  302. if (spa && spa->spa_mem) {
  303. free_pages((unsigned long) spa->spa_mem, spa->spa_order);
  304. kfree(spa);
  305. link->spa = NULL;
  306. }
  307. }
  308. static int alloc_link(struct pci_dev *dev, int PE_mask, struct link **out_link)
  309. {
  310. struct link *link;
  311. int rc;
  312. link = kzalloc(sizeof(struct link), GFP_KERNEL);
  313. if (!link)
  314. return -ENOMEM;
  315. kref_init(&link->ref);
  316. link->domain = pci_domain_nr(dev->bus);
  317. link->bus = dev->bus->number;
  318. link->dev = PCI_SLOT(dev->devfn);
  319. atomic_set(&link->irq_available, MAX_IRQ_PER_LINK);
  320. rc = alloc_spa(dev, link);
  321. if (rc)
  322. goto err_free;
  323. rc = setup_xsl_irq(dev, link);
  324. if (rc)
  325. goto err_spa;
  326. /* platform specific hook */
  327. rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask,
  328. &link->platform_data);
  329. if (rc)
  330. goto err_xsl_irq;
  331. *out_link = link;
  332. return 0;
  333. err_xsl_irq:
  334. release_xsl_irq(link);
  335. err_spa:
  336. free_spa(link);
  337. err_free:
  338. kfree(link);
  339. return rc;
  340. }
  341. static void free_link(struct link *link)
  342. {
  343. release_xsl_irq(link);
  344. free_spa(link);
  345. kfree(link);
  346. }
  347. int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle)
  348. {
  349. int rc = 0;
  350. struct link *link;
  351. mutex_lock(&links_list_lock);
  352. list_for_each_entry(link, &links_list, list) {
  353. /* The functions of a device all share the same link */
  354. if (link->domain == pci_domain_nr(dev->bus) &&
  355. link->bus == dev->bus->number &&
  356. link->dev == PCI_SLOT(dev->devfn)) {
  357. kref_get(&link->ref);
  358. *link_handle = link;
  359. goto unlock;
  360. }
  361. }
  362. rc = alloc_link(dev, PE_mask, &link);
  363. if (rc)
  364. goto unlock;
  365. list_add(&link->list, &links_list);
  366. *link_handle = link;
  367. unlock:
  368. mutex_unlock(&links_list_lock);
  369. return rc;
  370. }
  371. EXPORT_SYMBOL_GPL(ocxl_link_setup);
  372. static void release_xsl(struct kref *ref)
  373. {
  374. struct link *link = container_of(ref, struct link, ref);
  375. list_del(&link->list);
  376. /* call platform code before releasing data */
  377. pnv_ocxl_spa_release(link->platform_data);
  378. free_link(link);
  379. }
  380. void ocxl_link_release(struct pci_dev *dev, void *link_handle)
  381. {
  382. struct link *link = (struct link *) link_handle;
  383. mutex_lock(&links_list_lock);
  384. kref_put(&link->ref, release_xsl);
  385. mutex_unlock(&links_list_lock);
  386. }
  387. EXPORT_SYMBOL_GPL(ocxl_link_release);
  388. static u64 calculate_cfg_state(bool kernel)
  389. {
  390. u64 state;
  391. state = SPA_CFG_DR;
  392. if (mfspr(SPRN_LPCR) & LPCR_TC)
  393. state |= SPA_CFG_TC;
  394. if (radix_enabled())
  395. state |= SPA_CFG_XLAT_ror;
  396. else
  397. state |= SPA_CFG_XLAT_hpt;
  398. state |= SPA_CFG_HV;
  399. if (kernel) {
  400. if (mfmsr() & MSR_SF)
  401. state |= SPA_CFG_SF;
  402. } else {
  403. state |= SPA_CFG_PR;
  404. if (!test_tsk_thread_flag(current, TIF_32BIT))
  405. state |= SPA_CFG_SF;
  406. }
  407. return state;
  408. }
  409. int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
  410. u64 amr, struct mm_struct *mm,
  411. void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
  412. void *xsl_err_data)
  413. {
  414. struct link *link = (struct link *) link_handle;
  415. struct spa *spa = link->spa;
  416. struct ocxl_process_element *pe;
  417. int pe_handle, rc = 0;
  418. struct pe_data *pe_data;
  419. BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128);
  420. if (pasid > SPA_PASID_MAX)
  421. return -EINVAL;
  422. mutex_lock(&spa->spa_lock);
  423. pe_handle = pasid & SPA_PE_MASK;
  424. pe = spa->spa_mem + pe_handle;
  425. if (pe->software_state) {
  426. rc = -EBUSY;
  427. goto unlock;
  428. }
  429. pe_data = kmalloc(sizeof(*pe_data), GFP_KERNEL);
  430. if (!pe_data) {
  431. rc = -ENOMEM;
  432. goto unlock;
  433. }
  434. pe_data->mm = mm;
  435. pe_data->xsl_err_cb = xsl_err_cb;
  436. pe_data->xsl_err_data = xsl_err_data;
  437. memset(pe, 0, sizeof(struct ocxl_process_element));
  438. pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0));
  439. pe->lpid = cpu_to_be32(mfspr(SPRN_LPID));
  440. pe->pid = cpu_to_be32(pidr);
  441. pe->tid = cpu_to_be32(tidr);
  442. pe->amr = cpu_to_be64(amr);
  443. pe->software_state = cpu_to_be32(SPA_PE_VALID);
  444. mm_context_add_copro(mm);
  445. /*
  446. * Barrier is to make sure PE is visible in the SPA before it
  447. * is used by the device. It also helps with the global TLBI
  448. * invalidation
  449. */
  450. mb();
  451. radix_tree_insert(&spa->pe_tree, pe_handle, pe_data);
  452. /*
  453. * The mm must stay valid for as long as the device uses it. We
  454. * lower the count when the context is removed from the SPA.
  455. *
  456. * We grab mm_count (and not mm_users), as we don't want to
  457. * end up in a circular dependency if a process mmaps its
  458. * mmio, therefore incrementing the file ref count when
  459. * calling mmap(), and forgets to unmap before exiting. In
  460. * that scenario, when the kernel handles the death of the
  461. * process, the file is not cleaned because unmap was not
  462. * called, and the mm wouldn't be freed because we would still
  463. * have a reference on mm_users. Incrementing mm_count solves
  464. * the problem.
  465. */
  466. mmgrab(mm);
  467. trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr);
  468. unlock:
  469. mutex_unlock(&spa->spa_lock);
  470. return rc;
  471. }
  472. EXPORT_SYMBOL_GPL(ocxl_link_add_pe);
  473. int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
  474. {
  475. struct link *link = (struct link *) link_handle;
  476. struct spa *spa = link->spa;
  477. struct ocxl_process_element *pe;
  478. int pe_handle, rc;
  479. if (pasid > SPA_PASID_MAX)
  480. return -EINVAL;
  481. pe_handle = pasid & SPA_PE_MASK;
  482. pe = spa->spa_mem + pe_handle;
  483. mutex_lock(&spa->spa_lock);
  484. pe->tid = tid;
  485. /*
  486. * The barrier makes sure the PE is updated
  487. * before we clear the NPU context cache below, so that the
  488. * old PE cannot be reloaded erroneously.
  489. */
  490. mb();
  491. /*
  492. * hook to platform code
  493. * On powerpc, the entry needs to be cleared from the context
  494. * cache of the NPU.
  495. */
  496. rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle);
  497. WARN_ON(rc);
  498. mutex_unlock(&spa->spa_lock);
  499. return rc;
  500. }
  501. int ocxl_link_remove_pe(void *link_handle, int pasid)
  502. {
  503. struct link *link = (struct link *) link_handle;
  504. struct spa *spa = link->spa;
  505. struct ocxl_process_element *pe;
  506. struct pe_data *pe_data;
  507. int pe_handle, rc;
  508. if (pasid > SPA_PASID_MAX)
  509. return -EINVAL;
  510. /*
  511. * About synchronization with our memory fault handler:
  512. *
  513. * Before removing the PE, the driver is supposed to have
  514. * notified the AFU, which should have cleaned up and make
  515. * sure the PASID is no longer in use, including pending
  516. * interrupts. However, there's no way to be sure...
  517. *
  518. * We clear the PE and remove the context from our radix
  519. * tree. From that point on, any new interrupt for that
  520. * context will fail silently, which is ok. As mentioned
  521. * above, that's not expected, but it could happen if the
  522. * driver or AFU didn't do the right thing.
  523. *
  524. * There could still be a bottom half running, but we don't
  525. * need to wait/flush, as it is managing a reference count on
  526. * the mm it reads from the radix tree.
  527. */
  528. pe_handle = pasid & SPA_PE_MASK;
  529. pe = spa->spa_mem + pe_handle;
  530. mutex_lock(&spa->spa_lock);
  531. if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID)) {
  532. rc = -EINVAL;
  533. goto unlock;
  534. }
  535. trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid,
  536. be32_to_cpu(pe->pid), be32_to_cpu(pe->tid));
  537. memset(pe, 0, sizeof(struct ocxl_process_element));
  538. /*
  539. * The barrier makes sure the PE is removed from the SPA
  540. * before we clear the NPU context cache below, so that the
  541. * old PE cannot be reloaded erroneously.
  542. */
  543. mb();
  544. /*
  545. * hook to platform code
  546. * On powerpc, the entry needs to be cleared from the context
  547. * cache of the NPU.
  548. */
  549. rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle);
  550. WARN_ON(rc);
  551. pe_data = radix_tree_delete(&spa->pe_tree, pe_handle);
  552. if (!pe_data) {
  553. WARN(1, "Couldn't find pe data when removing PE\n");
  554. } else {
  555. mm_context_remove_copro(pe_data->mm);
  556. mmdrop(pe_data->mm);
  557. kfree_rcu(pe_data, rcu);
  558. }
  559. unlock:
  560. mutex_unlock(&spa->spa_lock);
  561. return rc;
  562. }
  563. EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
  564. int ocxl_link_irq_alloc(void *link_handle, int *hw_irq, u64 *trigger_addr)
  565. {
  566. struct link *link = (struct link *) link_handle;
  567. int rc, irq;
  568. u64 addr;
  569. if (atomic_dec_if_positive(&link->irq_available) < 0)
  570. return -ENOSPC;
  571. rc = pnv_ocxl_alloc_xive_irq(&irq, &addr);
  572. if (rc) {
  573. atomic_inc(&link->irq_available);
  574. return rc;
  575. }
  576. *hw_irq = irq;
  577. *trigger_addr = addr;
  578. return 0;
  579. }
  580. EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
  581. void ocxl_link_free_irq(void *link_handle, int hw_irq)
  582. {
  583. struct link *link = (struct link *) link_handle;
  584. pnv_ocxl_free_xive_irq(hw_irq);
  585. atomic_inc(&link->irq_available);
  586. }
  587. EXPORT_SYMBOL_GPL(ocxl_link_free_irq);