nitrox_isr.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/pci.h>
  3. #include <linux/printk.h>
  4. #include <linux/slab.h>
  5. #include "nitrox_dev.h"
  6. #include "nitrox_csr.h"
  7. #include "nitrox_common.h"
  8. #include "nitrox_hal.h"
  9. /**
  10. * One vector for each type of ring
  11. * - NPS packet ring, AQMQ ring and ZQMQ ring
  12. */
  13. #define NR_RING_VECTORS 3
  14. /* base entry for packet ring/port */
  15. #define PKT_RING_MSIX_BASE 0
  16. #define NON_RING_MSIX_BASE 192
  17. /**
  18. * nps_pkt_slc_isr - IRQ handler for NPS solicit port
  19. * @irq: irq number
  20. * @data: argument
  21. */
  22. static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
  23. {
  24. struct nitrox_q_vector *qvec = data;
  25. union nps_pkt_slc_cnts slc_cnts;
  26. struct nitrox_cmdq *cmdq = qvec->cmdq;
  27. slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
  28. /* New packet on SLC output port */
  29. if (slc_cnts.s.slc_int)
  30. tasklet_hi_schedule(&qvec->resp_tasklet);
  31. return IRQ_HANDLED;
  32. }
  33. static void clear_nps_core_err_intr(struct nitrox_device *ndev)
  34. {
  35. u64 value;
  36. /* Write 1 to clear */
  37. value = nitrox_read_csr(ndev, NPS_CORE_INT);
  38. nitrox_write_csr(ndev, NPS_CORE_INT, value);
  39. dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT 0x%016llx\n", value);
  40. }
  41. static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
  42. {
  43. union nps_pkt_int pkt_int;
  44. unsigned long value, offset;
  45. int i;
  46. pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
  47. dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT 0x%016llx\n",
  48. pkt_int.value);
  49. if (pkt_int.s.slc_err) {
  50. offset = NPS_PKT_SLC_ERR_TYPE;
  51. value = nitrox_read_csr(ndev, offset);
  52. nitrox_write_csr(ndev, offset, value);
  53. dev_err_ratelimited(DEV(ndev),
  54. "NPS_PKT_SLC_ERR_TYPE 0x%016lx\n", value);
  55. offset = NPS_PKT_SLC_RERR_LO;
  56. value = nitrox_read_csr(ndev, offset);
  57. nitrox_write_csr(ndev, offset, value);
  58. /* enable the solicit ports */
  59. for_each_set_bit(i, &value, BITS_PER_LONG)
  60. enable_pkt_solicit_port(ndev, i);
  61. dev_err_ratelimited(DEV(ndev),
  62. "NPS_PKT_SLC_RERR_LO 0x%016lx\n", value);
  63. offset = NPS_PKT_SLC_RERR_HI;
  64. value = nitrox_read_csr(ndev, offset);
  65. nitrox_write_csr(ndev, offset, value);
  66. dev_err_ratelimited(DEV(ndev),
  67. "NPS_PKT_SLC_RERR_HI 0x%016lx\n", value);
  68. }
  69. if (pkt_int.s.in_err) {
  70. offset = NPS_PKT_IN_ERR_TYPE;
  71. value = nitrox_read_csr(ndev, offset);
  72. nitrox_write_csr(ndev, offset, value);
  73. dev_err_ratelimited(DEV(ndev),
  74. "NPS_PKT_IN_ERR_TYPE 0x%016lx\n", value);
  75. offset = NPS_PKT_IN_RERR_LO;
  76. value = nitrox_read_csr(ndev, offset);
  77. nitrox_write_csr(ndev, offset, value);
  78. /* enable the input ring */
  79. for_each_set_bit(i, &value, BITS_PER_LONG)
  80. enable_pkt_input_ring(ndev, i);
  81. dev_err_ratelimited(DEV(ndev),
  82. "NPS_PKT_IN_RERR_LO 0x%016lx\n", value);
  83. offset = NPS_PKT_IN_RERR_HI;
  84. value = nitrox_read_csr(ndev, offset);
  85. nitrox_write_csr(ndev, offset, value);
  86. dev_err_ratelimited(DEV(ndev),
  87. "NPS_PKT_IN_RERR_HI 0x%016lx\n", value);
  88. }
  89. }
  90. static void clear_pom_err_intr(struct nitrox_device *ndev)
  91. {
  92. u64 value;
  93. value = nitrox_read_csr(ndev, POM_INT);
  94. nitrox_write_csr(ndev, POM_INT, value);
  95. dev_err_ratelimited(DEV(ndev), "POM_INT 0x%016llx\n", value);
  96. }
  97. static void clear_pem_err_intr(struct nitrox_device *ndev)
  98. {
  99. u64 value;
  100. value = nitrox_read_csr(ndev, PEM0_INT);
  101. nitrox_write_csr(ndev, PEM0_INT, value);
  102. dev_err_ratelimited(DEV(ndev), "PEM(0)_INT 0x%016llx\n", value);
  103. }
  104. static void clear_lbc_err_intr(struct nitrox_device *ndev)
  105. {
  106. union lbc_int lbc_int;
  107. u64 value, offset;
  108. int i;
  109. lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
  110. dev_err_ratelimited(DEV(ndev), "LBC_INT 0x%016llx\n", lbc_int.value);
  111. if (lbc_int.s.dma_rd_err) {
  112. for (i = 0; i < NR_CLUSTERS; i++) {
  113. offset = EFL_CORE_VF_ERR_INT0X(i);
  114. value = nitrox_read_csr(ndev, offset);
  115. nitrox_write_csr(ndev, offset, value);
  116. offset = EFL_CORE_VF_ERR_INT1X(i);
  117. value = nitrox_read_csr(ndev, offset);
  118. nitrox_write_csr(ndev, offset, value);
  119. }
  120. }
  121. if (lbc_int.s.cam_soft_err) {
  122. dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
  123. invalidate_lbc(ndev);
  124. }
  125. if (lbc_int.s.pref_dat_len_mismatch_err) {
  126. offset = LBC_PLM_VF1_64_INT;
  127. value = nitrox_read_csr(ndev, offset);
  128. nitrox_write_csr(ndev, offset, value);
  129. offset = LBC_PLM_VF65_128_INT;
  130. value = nitrox_read_csr(ndev, offset);
  131. nitrox_write_csr(ndev, offset, value);
  132. }
  133. if (lbc_int.s.rd_dat_len_mismatch_err) {
  134. offset = LBC_ELM_VF1_64_INT;
  135. value = nitrox_read_csr(ndev, offset);
  136. nitrox_write_csr(ndev, offset, value);
  137. offset = LBC_ELM_VF65_128_INT;
  138. value = nitrox_read_csr(ndev, offset);
  139. nitrox_write_csr(ndev, offset, value);
  140. }
  141. nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
  142. }
  143. static void clear_efl_err_intr(struct nitrox_device *ndev)
  144. {
  145. int i;
  146. for (i = 0; i < NR_CLUSTERS; i++) {
  147. union efl_core_int core_int;
  148. u64 value, offset;
  149. offset = EFL_CORE_INTX(i);
  150. core_int.value = nitrox_read_csr(ndev, offset);
  151. nitrox_write_csr(ndev, offset, core_int.value);
  152. dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT 0x%016llx\n",
  153. i, core_int.value);
  154. if (core_int.s.se_err) {
  155. offset = EFL_CORE_SE_ERR_INTX(i);
  156. value = nitrox_read_csr(ndev, offset);
  157. nitrox_write_csr(ndev, offset, value);
  158. }
  159. }
  160. }
  161. static void clear_bmi_err_intr(struct nitrox_device *ndev)
  162. {
  163. u64 value;
  164. value = nitrox_read_csr(ndev, BMI_INT);
  165. nitrox_write_csr(ndev, BMI_INT, value);
  166. dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
  167. }
  168. static void nps_core_int_tasklet(unsigned long data)
  169. {
  170. struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
  171. struct nitrox_device *ndev = qvec->ndev;
  172. /* if pf mode do queue recovery */
  173. if (ndev->mode == __NDEV_MODE_PF) {
  174. } else {
  175. /**
  176. * if VF(s) enabled communicate the error information
  177. * to VF(s)
  178. */
  179. }
  180. }
  181. /**
  182. * nps_core_int_isr - interrupt handler for NITROX errors and
  183. * mailbox communication
  184. */
  185. static irqreturn_t nps_core_int_isr(int irq, void *data)
  186. {
  187. struct nitrox_device *ndev = data;
  188. union nps_core_int_active core_int;
  189. core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
  190. if (core_int.s.nps_core)
  191. clear_nps_core_err_intr(ndev);
  192. if (core_int.s.nps_pkt)
  193. clear_nps_pkt_err_intr(ndev);
  194. if (core_int.s.pom)
  195. clear_pom_err_intr(ndev);
  196. if (core_int.s.pem)
  197. clear_pem_err_intr(ndev);
  198. if (core_int.s.lbc)
  199. clear_lbc_err_intr(ndev);
  200. if (core_int.s.efl)
  201. clear_efl_err_intr(ndev);
  202. if (core_int.s.bmi)
  203. clear_bmi_err_intr(ndev);
  204. /* If more work callback the ISR, set resend */
  205. core_int.s.resend = 1;
  206. nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
  207. return IRQ_HANDLED;
  208. }
  209. void nitrox_unregister_interrupts(struct nitrox_device *ndev)
  210. {
  211. struct pci_dev *pdev = ndev->pdev;
  212. int i;
  213. for (i = 0; i < ndev->num_vecs; i++) {
  214. struct nitrox_q_vector *qvec;
  215. int vec;
  216. qvec = ndev->qvec + i;
  217. if (!qvec->valid)
  218. continue;
  219. /* get the vector number */
  220. vec = pci_irq_vector(pdev, i);
  221. irq_set_affinity_hint(vec, NULL);
  222. free_irq(vec, qvec);
  223. tasklet_disable(&qvec->resp_tasklet);
  224. tasklet_kill(&qvec->resp_tasklet);
  225. qvec->valid = false;
  226. }
  227. kfree(ndev->qvec);
  228. pci_free_irq_vectors(pdev);
  229. }
  230. int nitrox_register_interrupts(struct nitrox_device *ndev)
  231. {
  232. struct pci_dev *pdev = ndev->pdev;
  233. struct nitrox_q_vector *qvec;
  234. int nr_vecs, vec, cpu;
  235. int ret, i;
  236. /*
  237. * PF MSI-X vectors
  238. *
  239. * Entry 0: NPS PKT ring 0
  240. * Entry 1: AQMQ ring 0
  241. * Entry 2: ZQM ring 0
  242. * Entry 3: NPS PKT ring 1
  243. * Entry 4: AQMQ ring 1
  244. * Entry 5: ZQM ring 1
  245. * ....
  246. * Entry 192: NPS_CORE_INT_ACTIVE
  247. */
  248. nr_vecs = pci_msix_vec_count(pdev);
  249. /* Enable MSI-X */
  250. ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
  251. if (ret < 0) {
  252. dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
  253. return ret;
  254. }
  255. ndev->num_vecs = nr_vecs;
  256. ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
  257. if (!ndev->qvec) {
  258. pci_free_irq_vectors(pdev);
  259. return -ENOMEM;
  260. }
  261. /* request irqs for packet rings/ports */
  262. for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
  263. qvec = &ndev->qvec[i];
  264. qvec->ring = i / NR_RING_VECTORS;
  265. if (qvec->ring >= ndev->nr_queues)
  266. break;
  267. snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
  268. /* get the vector number */
  269. vec = pci_irq_vector(pdev, i);
  270. ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
  271. if (ret) {
  272. dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
  273. qvec->ring);
  274. goto irq_fail;
  275. }
  276. cpu = qvec->ring % num_online_cpus();
  277. irq_set_affinity_hint(vec, get_cpu_mask(cpu));
  278. tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
  279. (unsigned long)qvec);
  280. qvec->cmdq = &ndev->pkt_inq[qvec->ring];
  281. qvec->valid = true;
  282. }
  283. /* request irqs for non ring vectors */
  284. i = NON_RING_MSIX_BASE;
  285. qvec = &ndev->qvec[i];
  286. snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
  287. /* get the vector number */
  288. vec = pci_irq_vector(pdev, i);
  289. ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
  290. if (ret) {
  291. dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
  292. goto irq_fail;
  293. }
  294. cpu = num_online_cpus();
  295. irq_set_affinity_hint(vec, get_cpu_mask(cpu));
  296. tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
  297. (unsigned long)qvec);
  298. qvec->ndev = ndev;
  299. qvec->valid = true;
  300. return 0;
  301. irq_fail:
  302. nitrox_unregister_interrupts(ndev);
  303. return ret;
  304. }