irq.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/interrupt.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/sched.h>
  12. #include <linux/wait.h>
  13. #include <linux/slab.h>
  14. #include <linux/pid.h>
  15. #include <asm/cputable.h>
  16. #include <misc/cxl.h>
  17. #include "cxl.h"
  18. /* XXX: This is implementation specific */
  19. static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)
  20. {
  21. u64 fir1, fir2, fir_slice, serr, afu_debug;
  22. fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
  23. fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
  24. fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
  25. serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
  26. afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
  27. dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
  28. dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1);
  29. dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2);
  30. dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
  31. dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
  32. dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
  33. dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
  34. cxl_stop_trace(ctx->afu->adapter);
  35. return cxl_ack_irq(ctx, 0, errstat);
  36. }
  37. irqreturn_t cxl_slice_irq_err(int irq, void *data)
  38. {
  39. struct cxl_afu *afu = data;
  40. u64 fir_slice, errstat, serr, afu_debug;
  41. WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
  42. serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
  43. fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
  44. errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
  45. afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
  46. dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
  47. dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
  48. dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat);
  49. dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
  50. cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
  51. return IRQ_HANDLED;
  52. }
  53. static irqreturn_t cxl_irq_err(int irq, void *data)
  54. {
  55. struct cxl *adapter = data;
  56. u64 fir1, fir2, err_ivte;
  57. WARN(1, "CXL ERROR interrupt %i\n", irq);
  58. err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
  59. dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte);
  60. dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
  61. cxl_stop_trace(adapter);
  62. fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
  63. fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
  64. dev_crit(&adapter->dev, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1, fir2);
  65. return IRQ_HANDLED;
  66. }
  67. static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
  68. {
  69. ctx->dsisr = dsisr;
  70. ctx->dar = dar;
  71. schedule_work(&ctx->fault_work);
  72. return IRQ_HANDLED;
  73. }
  74. static irqreturn_t cxl_irq(int irq, void *data)
  75. {
  76. struct cxl_context *ctx = data;
  77. struct cxl_irq_info irq_info;
  78. u64 dsisr, dar;
  79. int result;
  80. if ((result = cxl_get_irq(ctx, &irq_info))) {
  81. WARN(1, "Unable to get CXL IRQ Info: %i\n", result);
  82. return IRQ_HANDLED;
  83. }
  84. dsisr = irq_info.dsisr;
  85. dar = irq_info.dar;
  86. pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
  87. if (dsisr & CXL_PSL_DSISR_An_DS) {
  88. /*
  89. * We don't inherently need to sleep to handle this, but we do
  90. * need to get a ref to the task's mm, which we can't do from
  91. * irq context without the potential for a deadlock since it
  92. * takes the task_lock. An alternate option would be to keep a
  93. * reference to the task's mm the entire time it has cxl open,
  94. * but to do that we need to solve the issue where we hold a
  95. * ref to the mm, but the mm can hold a ref to the fd after an
  96. * mmap preventing anything from being cleaned up.
  97. */
  98. pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
  99. return schedule_cxl_fault(ctx, dsisr, dar);
  100. }
  101. if (dsisr & CXL_PSL_DSISR_An_M)
  102. pr_devel("CXL interrupt: PTE not found\n");
  103. if (dsisr & CXL_PSL_DSISR_An_P)
  104. pr_devel("CXL interrupt: Storage protection violation\n");
  105. if (dsisr & CXL_PSL_DSISR_An_A)
  106. pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
  107. if (dsisr & CXL_PSL_DSISR_An_S)
  108. pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
  109. if (dsisr & CXL_PSL_DSISR_An_K)
  110. pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
  111. if (dsisr & CXL_PSL_DSISR_An_DM) {
  112. /*
  113. * In some cases we might be able to handle the fault
  114. * immediately if hash_page would succeed, but we still need
  115. * the task's mm, which as above we can't get without a lock
  116. */
  117. pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
  118. return schedule_cxl_fault(ctx, dsisr, dar);
  119. }
  120. if (dsisr & CXL_PSL_DSISR_An_ST)
  121. WARN(1, "CXL interrupt: Segment Table PTE not found\n");
  122. if (dsisr & CXL_PSL_DSISR_An_UR)
  123. pr_devel("CXL interrupt: AURP PTE not found\n");
  124. if (dsisr & CXL_PSL_DSISR_An_PE)
  125. return handle_psl_slice_error(ctx, dsisr, irq_info.errstat);
  126. if (dsisr & CXL_PSL_DSISR_An_AE) {
  127. pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info.afu_err);
  128. if (ctx->pending_afu_err) {
  129. /*
  130. * This shouldn't happen - the PSL treats these errors
  131. * as fatal and will have reset the AFU, so there's not
  132. * much point buffering multiple AFU errors.
  133. * OTOH if we DO ever see a storm of these come in it's
  134. * probably best that we log them somewhere:
  135. */
  136. dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
  137. "undelivered to pe %i: %.llx\n",
  138. ctx->pe, irq_info.afu_err);
  139. } else {
  140. spin_lock(&ctx->lock);
  141. ctx->afu_err = irq_info.afu_err;
  142. ctx->pending_afu_err = 1;
  143. spin_unlock(&ctx->lock);
  144. wake_up_all(&ctx->wq);
  145. }
  146. cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
  147. }
  148. if (dsisr & CXL_PSL_DSISR_An_OC)
  149. pr_devel("CXL interrupt: OS Context Warning\n");
  150. WARN(1, "Unhandled CXL PSL IRQ\n");
  151. return IRQ_HANDLED;
  152. }
  153. static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
  154. {
  155. struct cxl_afu *afu = data;
  156. struct cxl_context *ctx;
  157. int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
  158. int ret;
  159. rcu_read_lock();
  160. ctx = idr_find(&afu->contexts_idr, ph);
  161. if (ctx) {
  162. ret = cxl_irq(irq, ctx);
  163. rcu_read_unlock();
  164. return ret;
  165. }
  166. rcu_read_unlock();
  167. WARN(1, "Unable to demultiplex CXL PSL IRQ\n");
  168. return IRQ_HANDLED;
  169. }
  170. static irqreturn_t cxl_irq_afu(int irq, void *data)
  171. {
  172. struct cxl_context *ctx = data;
  173. irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
  174. int irq_off, afu_irq = 1;
  175. __u16 range;
  176. int r;
  177. for (r = 1; r < CXL_IRQ_RANGES; r++) {
  178. irq_off = hwirq - ctx->irqs.offset[r];
  179. range = ctx->irqs.range[r];
  180. if (irq_off >= 0 && irq_off < range) {
  181. afu_irq += irq_off;
  182. break;
  183. }
  184. afu_irq += range;
  185. }
  186. if (unlikely(r >= CXL_IRQ_RANGES)) {
  187. WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
  188. ctx->pe, irq, hwirq);
  189. return IRQ_HANDLED;
  190. }
  191. pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
  192. afu_irq, ctx->pe, irq, hwirq);
  193. if (unlikely(!ctx->irq_bitmap)) {
  194. WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
  195. return IRQ_HANDLED;
  196. }
  197. spin_lock(&ctx->lock);
  198. set_bit(afu_irq - 1, ctx->irq_bitmap);
  199. ctx->pending_irq = true;
  200. spin_unlock(&ctx->lock);
  201. wake_up_all(&ctx->wq);
  202. return IRQ_HANDLED;
  203. }
  204. unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
  205. irq_handler_t handler, void *cookie)
  206. {
  207. unsigned int virq;
  208. int result;
  209. /* IRQ Domain? */
  210. virq = irq_create_mapping(NULL, hwirq);
  211. if (!virq) {
  212. dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
  213. return 0;
  214. }
  215. cxl_setup_irq(adapter, hwirq, virq);
  216. pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
  217. result = request_irq(virq, handler, 0, "cxl", cookie);
  218. if (result) {
  219. dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
  220. return 0;
  221. }
  222. return virq;
  223. }
  224. void cxl_unmap_irq(unsigned int virq, void *cookie)
  225. {
  226. free_irq(virq, cookie);
  227. irq_dispose_mapping(virq);
  228. }
  229. static int cxl_register_one_irq(struct cxl *adapter,
  230. irq_handler_t handler,
  231. void *cookie,
  232. irq_hw_number_t *dest_hwirq,
  233. unsigned int *dest_virq)
  234. {
  235. int hwirq, virq;
  236. if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
  237. return hwirq;
  238. if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie)))
  239. goto err;
  240. *dest_hwirq = hwirq;
  241. *dest_virq = virq;
  242. return 0;
  243. err:
  244. cxl_release_one_irq(adapter, hwirq);
  245. return -ENOMEM;
  246. }
  247. int cxl_register_psl_err_irq(struct cxl *adapter)
  248. {
  249. int rc;
  250. if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
  251. &adapter->err_hwirq,
  252. &adapter->err_virq)))
  253. return rc;
  254. cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
  255. return 0;
  256. }
  257. void cxl_release_psl_err_irq(struct cxl *adapter)
  258. {
  259. cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
  260. cxl_unmap_irq(adapter->err_virq, adapter);
  261. cxl_release_one_irq(adapter, adapter->err_hwirq);
  262. }
  263. int cxl_register_serr_irq(struct cxl_afu *afu)
  264. {
  265. u64 serr;
  266. int rc;
  267. if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
  268. &afu->serr_hwirq,
  269. &afu->serr_virq)))
  270. return rc;
  271. serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
  272. serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
  273. cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
  274. return 0;
  275. }
  276. void cxl_release_serr_irq(struct cxl_afu *afu)
  277. {
  278. cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
  279. cxl_unmap_irq(afu->serr_virq, afu);
  280. cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
  281. }
  282. int cxl_register_psl_irq(struct cxl_afu *afu)
  283. {
  284. return cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
  285. &afu->psl_hwirq, &afu->psl_virq);
  286. }
  287. void cxl_release_psl_irq(struct cxl_afu *afu)
  288. {
  289. cxl_unmap_irq(afu->psl_virq, afu);
  290. cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
  291. }
  292. int afu_register_irqs(struct cxl_context *ctx, u32 count)
  293. {
  294. irq_hw_number_t hwirq;
  295. int rc, r, i;
  296. if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
  297. return rc;
  298. /* Multiplexed PSL Interrupt */
  299. ctx->irqs.offset[0] = ctx->afu->psl_hwirq;
  300. ctx->irqs.range[0] = 1;
  301. ctx->irq_count = count;
  302. ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
  303. sizeof(*ctx->irq_bitmap), GFP_KERNEL);
  304. if (!ctx->irq_bitmap)
  305. return -ENOMEM;
  306. for (r = 1; r < CXL_IRQ_RANGES; r++) {
  307. hwirq = ctx->irqs.offset[r];
  308. for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
  309. cxl_map_irq(ctx->afu->adapter, hwirq,
  310. cxl_irq_afu, ctx);
  311. }
  312. }
  313. return 0;
  314. }
  315. void afu_release_irqs(struct cxl_context *ctx)
  316. {
  317. irq_hw_number_t hwirq;
  318. unsigned int virq;
  319. int r, i;
  320. for (r = 1; r < CXL_IRQ_RANGES; r++) {
  321. hwirq = ctx->irqs.offset[r];
  322. for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
  323. virq = irq_find_mapping(NULL, hwirq);
  324. if (virq)
  325. cxl_unmap_irq(virq, ctx);
  326. }
  327. }
  328. cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
  329. }