aer.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Implement the AER root port service driver. The driver registers an IRQ
  4. * handler. When a root port triggers an AER interrupt, the IRQ handler
  5. * collects root port status and schedules work.
  6. *
  7. * Copyright (C) 2006 Intel Corp.
  8. * Tom Long Nguyen (tom.l.nguyen@intel.com)
  9. * Zhang Yanmin (yanmin.zhang@intel.com)
  10. *
  11. * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
  12. * Andrew Patterson <andrew.patterson@hp.com>
  13. */
  14. #include <linux/cper.h>
  15. #include <linux/pci.h>
  16. #include <linux/pci-acpi.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/errno.h>
  20. #include <linux/pm.h>
  21. #include <linux/init.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/delay.h>
  24. #include <linux/kfifo.h>
  25. #include <linux/slab.h>
  26. #include <acpi/apei.h>
  27. #include <ras/ras_event.h>
  28. #include "../pci.h"
  29. #include "portdrv.h"
  30. #define AER_ERROR_SOURCES_MAX 128
  31. #define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
  32. #define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
  33. struct aer_err_source {
  34. unsigned int status;
  35. unsigned int id;
  36. };
  37. struct aer_rpc {
  38. struct pci_dev *rpd; /* Root Port device */
  39. DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
  40. };
  41. /* AER stats for the device */
  42. struct aer_stats {
  43. /*
  44. * Fields for all AER capable devices. They indicate the errors
  45. * "as seen by this device". Note that this may mean that if an
  46. * end point is causing problems, the AER counters may increment
  47. * at its link partner (e.g. root port) because the errors will be
  48. * "seen" by the link partner and not the the problematic end point
  49. * itself (which may report all counters as 0 as it never saw any
  50. * problems).
  51. */
  52. /* Counters for different type of correctable errors */
  53. u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS];
  54. /* Counters for different type of fatal uncorrectable errors */
  55. u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
  56. /* Counters for different type of nonfatal uncorrectable errors */
  57. u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
  58. /* Total number of ERR_COR sent by this device */
  59. u64 dev_total_cor_errs;
  60. /* Total number of ERR_FATAL sent by this device */
  61. u64 dev_total_fatal_errs;
  62. /* Total number of ERR_NONFATAL sent by this device */
  63. u64 dev_total_nonfatal_errs;
  64. /*
  65. * Fields for Root ports & root complex event collectors only, these
  66. * indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
  67. * messages received by the root port / event collector, INCLUDING the
  68. * ones that are generated internally (by the rootport itself)
  69. */
  70. u64 rootport_total_cor_errs;
  71. u64 rootport_total_fatal_errs;
  72. u64 rootport_total_nonfatal_errs;
  73. };
  74. #define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
  75. PCI_ERR_UNC_ECRC| \
  76. PCI_ERR_UNC_UNSUP| \
  77. PCI_ERR_UNC_COMP_ABORT| \
  78. PCI_ERR_UNC_UNX_COMP| \
  79. PCI_ERR_UNC_MALF_TLP)
  80. #define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
  81. PCI_EXP_RTCTL_SENFEE| \
  82. PCI_EXP_RTCTL_SEFEE)
  83. #define ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \
  84. PCI_ERR_ROOT_CMD_NONFATAL_EN| \
  85. PCI_ERR_ROOT_CMD_FATAL_EN)
  86. #define ERR_COR_ID(d) (d & 0xffff)
  87. #define ERR_UNCOR_ID(d) (d >> 16)
  88. static int pcie_aer_disable;
  89. void pci_no_aer(void)
  90. {
  91. pcie_aer_disable = 1;
  92. }
  93. bool pci_aer_available(void)
  94. {
  95. return !pcie_aer_disable && pci_msi_enabled();
  96. }
  97. #ifdef CONFIG_PCIE_ECRC
  98. #define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
  99. #define ECRC_POLICY_OFF 1 /* ECRC off for performance */
  100. #define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
  101. static int ecrc_policy = ECRC_POLICY_DEFAULT;
  102. static const char *ecrc_policy_str[] = {
  103. [ECRC_POLICY_DEFAULT] = "bios",
  104. [ECRC_POLICY_OFF] = "off",
  105. [ECRC_POLICY_ON] = "on"
  106. };
  107. /**
  108. * enable_ercr_checking - enable PCIe ECRC checking for a device
  109. * @dev: the PCI device
  110. *
  111. * Returns 0 on success, or negative on failure.
  112. */
  113. static int enable_ecrc_checking(struct pci_dev *dev)
  114. {
  115. int pos;
  116. u32 reg32;
  117. if (!pci_is_pcie(dev))
  118. return -ENODEV;
  119. pos = dev->aer_cap;
  120. if (!pos)
  121. return -ENODEV;
  122. pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
  123. if (reg32 & PCI_ERR_CAP_ECRC_GENC)
  124. reg32 |= PCI_ERR_CAP_ECRC_GENE;
  125. if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
  126. reg32 |= PCI_ERR_CAP_ECRC_CHKE;
  127. pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
  128. return 0;
  129. }
  130. /**
  131. * disable_ercr_checking - disables PCIe ECRC checking for a device
  132. * @dev: the PCI device
  133. *
  134. * Returns 0 on success, or negative on failure.
  135. */
  136. static int disable_ecrc_checking(struct pci_dev *dev)
  137. {
  138. int pos;
  139. u32 reg32;
  140. if (!pci_is_pcie(dev))
  141. return -ENODEV;
  142. pos = dev->aer_cap;
  143. if (!pos)
  144. return -ENODEV;
  145. pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
  146. reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
  147. pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
  148. return 0;
  149. }
  150. /**
  151. * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
  152. * @dev: the PCI device
  153. */
  154. void pcie_set_ecrc_checking(struct pci_dev *dev)
  155. {
  156. switch (ecrc_policy) {
  157. case ECRC_POLICY_DEFAULT:
  158. return;
  159. case ECRC_POLICY_OFF:
  160. disable_ecrc_checking(dev);
  161. break;
  162. case ECRC_POLICY_ON:
  163. enable_ecrc_checking(dev);
  164. break;
  165. default:
  166. return;
  167. }
  168. }
  169. /**
  170. * pcie_ecrc_get_policy - parse kernel command-line ecrc option
  171. */
  172. void pcie_ecrc_get_policy(char *str)
  173. {
  174. int i;
  175. for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
  176. if (!strncmp(str, ecrc_policy_str[i],
  177. strlen(ecrc_policy_str[i])))
  178. break;
  179. if (i >= ARRAY_SIZE(ecrc_policy_str))
  180. return;
  181. ecrc_policy = i;
  182. }
  183. #endif /* CONFIG_PCIE_ECRC */
  184. #ifdef CONFIG_ACPI_APEI
  185. static inline int hest_match_pci(struct acpi_hest_aer_common *p,
  186. struct pci_dev *pci)
  187. {
  188. return ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) &&
  189. ACPI_HEST_BUS(p->bus) == pci->bus->number &&
  190. p->device == PCI_SLOT(pci->devfn) &&
  191. p->function == PCI_FUNC(pci->devfn);
  192. }
  193. static inline bool hest_match_type(struct acpi_hest_header *hest_hdr,
  194. struct pci_dev *dev)
  195. {
  196. u16 hest_type = hest_hdr->type;
  197. u8 pcie_type = pci_pcie_type(dev);
  198. if ((hest_type == ACPI_HEST_TYPE_AER_ROOT_PORT &&
  199. pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
  200. (hest_type == ACPI_HEST_TYPE_AER_ENDPOINT &&
  201. pcie_type == PCI_EXP_TYPE_ENDPOINT) ||
  202. (hest_type == ACPI_HEST_TYPE_AER_BRIDGE &&
  203. (dev->class >> 16) == PCI_BASE_CLASS_BRIDGE))
  204. return true;
  205. return false;
  206. }
  207. struct aer_hest_parse_info {
  208. struct pci_dev *pci_dev;
  209. int firmware_first;
  210. };
  211. static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr)
  212. {
  213. if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
  214. hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
  215. hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE)
  216. return 1;
  217. return 0;
  218. }
  219. static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
  220. {
  221. struct aer_hest_parse_info *info = data;
  222. struct acpi_hest_aer_common *p;
  223. int ff;
  224. if (!hest_source_is_pcie_aer(hest_hdr))
  225. return 0;
  226. p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
  227. ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
  228. /*
  229. * If no specific device is supplied, determine whether
  230. * FIRMWARE_FIRST is set for *any* PCIe device.
  231. */
  232. if (!info->pci_dev) {
  233. info->firmware_first |= ff;
  234. return 0;
  235. }
  236. /* Otherwise, check the specific device */
  237. if (p->flags & ACPI_HEST_GLOBAL) {
  238. if (hest_match_type(hest_hdr, info->pci_dev))
  239. info->firmware_first = ff;
  240. } else
  241. if (hest_match_pci(p, info->pci_dev))
  242. info->firmware_first = ff;
  243. return 0;
  244. }
  245. static void aer_set_firmware_first(struct pci_dev *pci_dev)
  246. {
  247. int rc;
  248. struct aer_hest_parse_info info = {
  249. .pci_dev = pci_dev,
  250. .firmware_first = 0,
  251. };
  252. rc = apei_hest_parse(aer_hest_parse, &info);
  253. if (rc)
  254. pci_dev->__aer_firmware_first = 0;
  255. else
  256. pci_dev->__aer_firmware_first = info.firmware_first;
  257. pci_dev->__aer_firmware_first_valid = 1;
  258. }
  259. int pcie_aer_get_firmware_first(struct pci_dev *dev)
  260. {
  261. if (!pci_is_pcie(dev))
  262. return 0;
  263. if (pcie_ports_native)
  264. return 0;
  265. if (!dev->__aer_firmware_first_valid)
  266. aer_set_firmware_first(dev);
  267. return dev->__aer_firmware_first;
  268. }
  269. static bool aer_firmware_first;
  270. /**
  271. * aer_acpi_firmware_first - Check if APEI should control AER.
  272. */
  273. bool aer_acpi_firmware_first(void)
  274. {
  275. static bool parsed = false;
  276. struct aer_hest_parse_info info = {
  277. .pci_dev = NULL, /* Check all PCIe devices */
  278. .firmware_first = 0,
  279. };
  280. if (pcie_ports_native)
  281. return false;
  282. if (!parsed) {
  283. apei_hest_parse(aer_hest_parse, &info);
  284. aer_firmware_first = info.firmware_first;
  285. parsed = true;
  286. }
  287. return aer_firmware_first;
  288. }
  289. #endif
  290. #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
  291. PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
  292. int pci_enable_pcie_error_reporting(struct pci_dev *dev)
  293. {
  294. if (pcie_aer_get_firmware_first(dev))
  295. return -EIO;
  296. if (!dev->aer_cap)
  297. return -EIO;
  298. return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
  299. }
  300. EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
  301. int pci_disable_pcie_error_reporting(struct pci_dev *dev)
  302. {
  303. if (pcie_aer_get_firmware_first(dev))
  304. return -EIO;
  305. return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
  306. PCI_EXP_AER_FLAGS);
  307. }
  308. EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
  309. void pci_aer_clear_device_status(struct pci_dev *dev)
  310. {
  311. u16 sta;
  312. pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
  313. pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
  314. }
  315. int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
  316. {
  317. int pos;
  318. u32 status, sev;
  319. pos = dev->aer_cap;
  320. if (!pos)
  321. return -EIO;
  322. if (pcie_aer_get_firmware_first(dev))
  323. return -EIO;
  324. /* Clear status bits for ERR_NONFATAL errors only */
  325. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  326. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
  327. status &= ~sev;
  328. if (status)
  329. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  330. return 0;
  331. }
  332. EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
  333. void pci_aer_clear_fatal_status(struct pci_dev *dev)
  334. {
  335. int pos;
  336. u32 status, sev;
  337. pos = dev->aer_cap;
  338. if (!pos)
  339. return;
  340. if (pcie_aer_get_firmware_first(dev))
  341. return;
  342. /* Clear status bits for ERR_FATAL errors only */
  343. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  344. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
  345. status &= sev;
  346. if (status)
  347. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  348. }
  349. int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
  350. {
  351. int pos;
  352. u32 status;
  353. int port_type;
  354. if (!pci_is_pcie(dev))
  355. return -ENODEV;
  356. pos = dev->aer_cap;
  357. if (!pos)
  358. return -EIO;
  359. if (pcie_aer_get_firmware_first(dev))
  360. return -EIO;
  361. port_type = pci_pcie_type(dev);
  362. if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
  363. pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
  364. pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
  365. }
  366. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
  367. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
  368. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  369. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  370. return 0;
  371. }
  372. void pci_aer_init(struct pci_dev *dev)
  373. {
  374. dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  375. if (dev->aer_cap)
  376. dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
  377. pci_cleanup_aer_error_status_regs(dev);
  378. }
  379. void pci_aer_exit(struct pci_dev *dev)
  380. {
  381. kfree(dev->aer_stats);
  382. dev->aer_stats = NULL;
  383. }
  384. #define AER_AGENT_RECEIVER 0
  385. #define AER_AGENT_REQUESTER 1
  386. #define AER_AGENT_COMPLETER 2
  387. #define AER_AGENT_TRANSMITTER 3
  388. #define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \
  389. 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP))
  390. #define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \
  391. 0 : PCI_ERR_UNC_COMP_ABORT)
  392. #define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \
  393. (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0)
  394. #define AER_GET_AGENT(t, e) \
  395. ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \
  396. (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \
  397. (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \
  398. AER_AGENT_RECEIVER)
  399. #define AER_PHYSICAL_LAYER_ERROR 0
  400. #define AER_DATA_LINK_LAYER_ERROR 1
  401. #define AER_TRANSACTION_LAYER_ERROR 2
  402. #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
  403. PCI_ERR_COR_RCVR : 0)
  404. #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
  405. (PCI_ERR_COR_BAD_TLP| \
  406. PCI_ERR_COR_BAD_DLLP| \
  407. PCI_ERR_COR_REP_ROLL| \
  408. PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP)
  409. #define AER_GET_LAYER_ERROR(t, e) \
  410. ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
  411. (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
  412. AER_TRANSACTION_LAYER_ERROR)
  413. /*
  414. * AER error strings
  415. */
  416. static const char *aer_error_severity_string[] = {
  417. "Uncorrected (Non-Fatal)",
  418. "Uncorrected (Fatal)",
  419. "Corrected"
  420. };
  421. static const char *aer_error_layer[] = {
  422. "Physical Layer",
  423. "Data Link Layer",
  424. "Transaction Layer"
  425. };
  426. static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = {
  427. "RxErr", /* Bit Position 0 */
  428. NULL,
  429. NULL,
  430. NULL,
  431. NULL,
  432. NULL,
  433. "BadTLP", /* Bit Position 6 */
  434. "BadDLLP", /* Bit Position 7 */
  435. "Rollover", /* Bit Position 8 */
  436. NULL,
  437. NULL,
  438. NULL,
  439. "Timeout", /* Bit Position 12 */
  440. "NonFatalErr", /* Bit Position 13 */
  441. "CorrIntErr", /* Bit Position 14 */
  442. "HeaderOF", /* Bit Position 15 */
  443. };
  444. static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
  445. "Undefined", /* Bit Position 0 */
  446. NULL,
  447. NULL,
  448. NULL,
  449. "DLP", /* Bit Position 4 */
  450. "SDES", /* Bit Position 5 */
  451. NULL,
  452. NULL,
  453. NULL,
  454. NULL,
  455. NULL,
  456. NULL,
  457. "TLP", /* Bit Position 12 */
  458. "FCP", /* Bit Position 13 */
  459. "CmpltTO", /* Bit Position 14 */
  460. "CmpltAbrt", /* Bit Position 15 */
  461. "UnxCmplt", /* Bit Position 16 */
  462. "RxOF", /* Bit Position 17 */
  463. "MalfTLP", /* Bit Position 18 */
  464. "ECRC", /* Bit Position 19 */
  465. "UnsupReq", /* Bit Position 20 */
  466. "ACSViol", /* Bit Position 21 */
  467. "UncorrIntErr", /* Bit Position 22 */
  468. "BlockedTLP", /* Bit Position 23 */
  469. "AtomicOpBlocked", /* Bit Position 24 */
  470. "TLPBlockedErr", /* Bit Position 25 */
  471. };
  472. static const char *aer_agent_string[] = {
  473. "Receiver ID",
  474. "Requester ID",
  475. "Completer ID",
  476. "Transmitter ID"
  477. };
  478. #define aer_stats_dev_attr(name, stats_array, strings_array, \
  479. total_string, total_field) \
  480. static ssize_t \
  481. name##_show(struct device *dev, struct device_attribute *attr, \
  482. char *buf) \
  483. { \
  484. unsigned int i; \
  485. char *str = buf; \
  486. struct pci_dev *pdev = to_pci_dev(dev); \
  487. u64 *stats = pdev->aer_stats->stats_array; \
  488. \
  489. for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
  490. if (strings_array[i]) \
  491. str += sprintf(str, "%s %llu\n", \
  492. strings_array[i], stats[i]); \
  493. else if (stats[i]) \
  494. str += sprintf(str, #stats_array "_bit[%d] %llu\n",\
  495. i, stats[i]); \
  496. } \
  497. str += sprintf(str, "TOTAL_%s %llu\n", total_string, \
  498. pdev->aer_stats->total_field); \
  499. return str-buf; \
  500. } \
  501. static DEVICE_ATTR_RO(name)
  502. aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs,
  503. aer_correctable_error_string, "ERR_COR",
  504. dev_total_cor_errs);
  505. aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs,
  506. aer_uncorrectable_error_string, "ERR_FATAL",
  507. dev_total_fatal_errs);
  508. aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
  509. aer_uncorrectable_error_string, "ERR_NONFATAL",
  510. dev_total_nonfatal_errs);
  511. #define aer_stats_rootport_attr(name, field) \
  512. static ssize_t \
  513. name##_show(struct device *dev, struct device_attribute *attr, \
  514. char *buf) \
  515. { \
  516. struct pci_dev *pdev = to_pci_dev(dev); \
  517. return sprintf(buf, "%llu\n", pdev->aer_stats->field); \
  518. } \
  519. static DEVICE_ATTR_RO(name)
  520. aer_stats_rootport_attr(aer_rootport_total_err_cor,
  521. rootport_total_cor_errs);
  522. aer_stats_rootport_attr(aer_rootport_total_err_fatal,
  523. rootport_total_fatal_errs);
  524. aer_stats_rootport_attr(aer_rootport_total_err_nonfatal,
  525. rootport_total_nonfatal_errs);
  526. static struct attribute *aer_stats_attrs[] __ro_after_init = {
  527. &dev_attr_aer_dev_correctable.attr,
  528. &dev_attr_aer_dev_fatal.attr,
  529. &dev_attr_aer_dev_nonfatal.attr,
  530. &dev_attr_aer_rootport_total_err_cor.attr,
  531. &dev_attr_aer_rootport_total_err_fatal.attr,
  532. &dev_attr_aer_rootport_total_err_nonfatal.attr,
  533. NULL
  534. };
  535. static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
  536. struct attribute *a, int n)
  537. {
  538. struct device *dev = kobj_to_dev(kobj);
  539. struct pci_dev *pdev = to_pci_dev(dev);
  540. if (!pdev->aer_stats)
  541. return 0;
  542. if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
  543. a == &dev_attr_aer_rootport_total_err_fatal.attr ||
  544. a == &dev_attr_aer_rootport_total_err_nonfatal.attr) &&
  545. pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
  546. return 0;
  547. return a->mode;
  548. }
  549. const struct attribute_group aer_stats_attr_group = {
  550. .attrs = aer_stats_attrs,
  551. .is_visible = aer_stats_attrs_are_visible,
  552. };
  553. static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
  554. struct aer_err_info *info)
  555. {
  556. int status, i, max = -1;
  557. u64 *counter = NULL;
  558. struct aer_stats *aer_stats = pdev->aer_stats;
  559. if (!aer_stats)
  560. return;
  561. switch (info->severity) {
  562. case AER_CORRECTABLE:
  563. aer_stats->dev_total_cor_errs++;
  564. counter = &aer_stats->dev_cor_errs[0];
  565. max = AER_MAX_TYPEOF_COR_ERRS;
  566. break;
  567. case AER_NONFATAL:
  568. aer_stats->dev_total_nonfatal_errs++;
  569. counter = &aer_stats->dev_nonfatal_errs[0];
  570. max = AER_MAX_TYPEOF_UNCOR_ERRS;
  571. break;
  572. case AER_FATAL:
  573. aer_stats->dev_total_fatal_errs++;
  574. counter = &aer_stats->dev_fatal_errs[0];
  575. max = AER_MAX_TYPEOF_UNCOR_ERRS;
  576. break;
  577. }
  578. status = (info->status & ~info->mask);
  579. for (i = 0; i < max; i++)
  580. if (status & (1 << i))
  581. counter[i]++;
  582. }
  583. static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
  584. struct aer_err_source *e_src)
  585. {
  586. struct aer_stats *aer_stats = pdev->aer_stats;
  587. if (!aer_stats)
  588. return;
  589. if (e_src->status & PCI_ERR_ROOT_COR_RCV)
  590. aer_stats->rootport_total_cor_errs++;
  591. if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
  592. if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
  593. aer_stats->rootport_total_fatal_errs++;
  594. else
  595. aer_stats->rootport_total_nonfatal_errs++;
  596. }
  597. }
  598. static void __print_tlp_header(struct pci_dev *dev,
  599. struct aer_header_log_regs *t)
  600. {
  601. pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
  602. t->dw0, t->dw1, t->dw2, t->dw3);
  603. }
  604. static void __aer_print_error(struct pci_dev *dev,
  605. struct aer_err_info *info)
  606. {
  607. int i, status;
  608. const char *errmsg = NULL;
  609. status = (info->status & ~info->mask);
  610. for (i = 0; i < 32; i++) {
  611. if (!(status & (1 << i)))
  612. continue;
  613. if (info->severity == AER_CORRECTABLE)
  614. errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
  615. aer_correctable_error_string[i] : NULL;
  616. else
  617. errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ?
  618. aer_uncorrectable_error_string[i] : NULL;
  619. if (errmsg)
  620. pci_err(dev, " [%2d] %-22s%s\n", i, errmsg,
  621. info->first_error == i ? " (First)" : "");
  622. else
  623. pci_err(dev, " [%2d] Unknown Error Bit%s\n",
  624. i, info->first_error == i ? " (First)" : "");
  625. }
  626. pci_dev_aer_stats_incr(dev, info);
  627. }
  628. void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
  629. {
  630. int layer, agent;
  631. int id = ((dev->bus->number << 8) | dev->devfn);
  632. if (!info->status) {
  633. pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
  634. aer_error_severity_string[info->severity]);
  635. goto out;
  636. }
  637. layer = AER_GET_LAYER_ERROR(info->severity, info->status);
  638. agent = AER_GET_AGENT(info->severity, info->status);
  639. pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
  640. aer_error_severity_string[info->severity],
  641. aer_error_layer[layer], aer_agent_string[agent]);
  642. pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
  643. dev->vendor, dev->device,
  644. info->status, info->mask);
  645. __aer_print_error(dev, info);
  646. if (info->tlp_header_valid)
  647. __print_tlp_header(dev, &info->tlp);
  648. out:
  649. if (info->id && info->error_dev_num > 1 && info->id == id)
  650. pci_err(dev, " Error of this Agent is reported first\n");
  651. trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
  652. info->severity, info->tlp_header_valid, &info->tlp);
  653. }
  654. static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
  655. {
  656. u8 bus = info->id >> 8;
  657. u8 devfn = info->id & 0xff;
  658. pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n",
  659. info->multi_error_valid ? "Multiple " : "",
  660. aer_error_severity_string[info->severity],
  661. pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  662. }
  663. #ifdef CONFIG_ACPI_APEI_PCIEAER
  664. int cper_severity_to_aer(int cper_severity)
  665. {
  666. switch (cper_severity) {
  667. case CPER_SEV_RECOVERABLE:
  668. return AER_NONFATAL;
  669. case CPER_SEV_FATAL:
  670. return AER_FATAL;
  671. default:
  672. return AER_CORRECTABLE;
  673. }
  674. }
  675. EXPORT_SYMBOL_GPL(cper_severity_to_aer);
  676. void cper_print_aer(struct pci_dev *dev, int aer_severity,
  677. struct aer_capability_regs *aer)
  678. {
  679. int layer, agent, tlp_header_valid = 0;
  680. u32 status, mask;
  681. struct aer_err_info info;
  682. if (aer_severity == AER_CORRECTABLE) {
  683. status = aer->cor_status;
  684. mask = aer->cor_mask;
  685. } else {
  686. status = aer->uncor_status;
  687. mask = aer->uncor_mask;
  688. tlp_header_valid = status & AER_LOG_TLP_MASKS;
  689. }
  690. layer = AER_GET_LAYER_ERROR(aer_severity, status);
  691. agent = AER_GET_AGENT(aer_severity, status);
  692. memset(&info, 0, sizeof(info));
  693. info.severity = aer_severity;
  694. info.status = status;
  695. info.mask = mask;
  696. info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
  697. pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
  698. __aer_print_error(dev, &info);
  699. pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
  700. aer_error_layer[layer], aer_agent_string[agent]);
  701. if (aer_severity != AER_CORRECTABLE)
  702. pci_err(dev, "aer_uncor_severity: 0x%08x\n",
  703. aer->uncor_severity);
  704. if (tlp_header_valid)
  705. __print_tlp_header(dev, &aer->header_log);
  706. trace_aer_event(dev_name(&dev->dev), (status & ~mask),
  707. aer_severity, tlp_header_valid, &aer->header_log);
  708. }
  709. #endif
  710. /**
  711. * add_error_device - list device to be handled
  712. * @e_info: pointer to error info
  713. * @dev: pointer to pci_dev to be added
  714. */
  715. static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
  716. {
  717. if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
  718. e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
  719. e_info->error_dev_num++;
  720. return 0;
  721. }
  722. return -ENOSPC;
  723. }
  724. /**
  725. * is_error_source - check whether the device is source of reported error
  726. * @dev: pointer to pci_dev to be checked
  727. * @e_info: pointer to reported error info
  728. */
  729. static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
  730. {
  731. int pos;
  732. u32 status, mask;
  733. u16 reg16;
  734. /*
  735. * When bus id is equal to 0, it might be a bad id
  736. * reported by root port.
  737. */
  738. if ((PCI_BUS_NUM(e_info->id) != 0) &&
  739. !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
  740. /* Device ID match? */
  741. if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
  742. return true;
  743. /* Continue id comparing if there is no multiple error */
  744. if (!e_info->multi_error_valid)
  745. return false;
  746. }
  747. /*
  748. * When either
  749. * 1) bus id is equal to 0. Some ports might lose the bus
  750. * id of error source id;
  751. * 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
  752. * 3) There are multiple errors and prior ID comparing fails;
  753. * We check AER status registers to find possible reporter.
  754. */
  755. if (atomic_read(&dev->enable_cnt) == 0)
  756. return false;
  757. /* Check if AER is enabled */
  758. pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
  759. if (!(reg16 & PCI_EXP_AER_FLAGS))
  760. return false;
  761. pos = dev->aer_cap;
  762. if (!pos)
  763. return false;
  764. /* Check if error is recorded */
  765. if (e_info->severity == AER_CORRECTABLE) {
  766. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
  767. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
  768. } else {
  769. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  770. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
  771. }
  772. if (status & ~mask)
  773. return true;
  774. return false;
  775. }
  776. static int find_device_iter(struct pci_dev *dev, void *data)
  777. {
  778. struct aer_err_info *e_info = (struct aer_err_info *)data;
  779. if (is_error_source(dev, e_info)) {
  780. /* List this device */
  781. if (add_error_device(e_info, dev)) {
  782. /* We cannot handle more... Stop iteration */
  783. /* TODO: Should print error message here? */
  784. return 1;
  785. }
  786. /* If there is only a single error, stop iteration */
  787. if (!e_info->multi_error_valid)
  788. return 1;
  789. }
  790. return 0;
  791. }
  792. /**
  793. * find_source_device - search through device hierarchy for source device
  794. * @parent: pointer to Root Port pci_dev data structure
  795. * @e_info: including detailed error information such like id
  796. *
  797. * Return true if found.
  798. *
  799. * Invoked by DPC when error is detected at the Root Port.
  800. * Caller of this function must set id, severity, and multi_error_valid of
  801. * struct aer_err_info pointed by @e_info properly. This function must fill
  802. * e_info->error_dev_num and e_info->dev[], based on the given information.
  803. */
  804. static bool find_source_device(struct pci_dev *parent,
  805. struct aer_err_info *e_info)
  806. {
  807. struct pci_dev *dev = parent;
  808. int result;
  809. /* Must reset in this function */
  810. e_info->error_dev_num = 0;
  811. /* Is Root Port an agent that sends error message? */
  812. result = find_device_iter(dev, e_info);
  813. if (result)
  814. return true;
  815. pci_walk_bus(parent->subordinate, find_device_iter, e_info);
  816. if (!e_info->error_dev_num) {
  817. pci_printk(KERN_DEBUG, parent, "can't find device of ID%04x\n",
  818. e_info->id);
  819. return false;
  820. }
  821. return true;
  822. }
  823. /**
  824. * handle_error_source - handle logging error into an event log
  825. * @dev: pointer to pci_dev data structure of error source device
  826. * @info: comprehensive error information
  827. *
  828. * Invoked when an error being detected by Root Port.
  829. */
  830. static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
  831. {
  832. int pos;
  833. if (info->severity == AER_CORRECTABLE) {
  834. /*
  835. * Correctable error does not need software intervention.
  836. * No need to go through error recovery process.
  837. */
  838. pos = dev->aer_cap;
  839. if (pos)
  840. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  841. info->status);
  842. pci_aer_clear_device_status(dev);
  843. } else if (info->severity == AER_NONFATAL)
  844. pcie_do_recovery(dev, pci_channel_io_normal,
  845. PCIE_PORT_SERVICE_AER);
  846. else if (info->severity == AER_FATAL)
  847. pcie_do_recovery(dev, pci_channel_io_frozen,
  848. PCIE_PORT_SERVICE_AER);
  849. pci_dev_put(dev);
  850. }
  851. #ifdef CONFIG_ACPI_APEI_PCIEAER
  852. #define AER_RECOVER_RING_ORDER 4
  853. #define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
  854. struct aer_recover_entry {
  855. u8 bus;
  856. u8 devfn;
  857. u16 domain;
  858. int severity;
  859. struct aer_capability_regs *regs;
  860. };
  861. static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
  862. AER_RECOVER_RING_SIZE);
  863. static void aer_recover_work_func(struct work_struct *work)
  864. {
  865. struct aer_recover_entry entry;
  866. struct pci_dev *pdev;
  867. while (kfifo_get(&aer_recover_ring, &entry)) {
  868. pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
  869. entry.devfn);
  870. if (!pdev) {
  871. pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
  872. entry.domain, entry.bus,
  873. PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
  874. continue;
  875. }
  876. cper_print_aer(pdev, entry.severity, entry.regs);
  877. if (entry.severity == AER_NONFATAL)
  878. pcie_do_recovery(pdev, pci_channel_io_normal,
  879. PCIE_PORT_SERVICE_AER);
  880. else if (entry.severity == AER_FATAL)
  881. pcie_do_recovery(pdev, pci_channel_io_frozen,
  882. PCIE_PORT_SERVICE_AER);
  883. pci_dev_put(pdev);
  884. }
  885. }
  886. /*
  887. * Mutual exclusion for writers of aer_recover_ring, reader side don't
  888. * need lock, because there is only one reader and lock is not needed
  889. * between reader and writer.
  890. */
  891. static DEFINE_SPINLOCK(aer_recover_ring_lock);
  892. static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
  893. void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
  894. int severity, struct aer_capability_regs *aer_regs)
  895. {
  896. struct aer_recover_entry entry = {
  897. .bus = bus,
  898. .devfn = devfn,
  899. .domain = domain,
  900. .severity = severity,
  901. .regs = aer_regs,
  902. };
  903. if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1,
  904. &aer_recover_ring_lock))
  905. schedule_work(&aer_recover_work);
  906. else
  907. pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
  908. domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  909. }
  910. EXPORT_SYMBOL_GPL(aer_recover_queue);
  911. #endif
  912. /**
  913. * aer_get_device_error_info - read error status from dev and store it to info
  914. * @dev: pointer to the device expected to have a error record
  915. * @info: pointer to structure to store the error record
  916. *
  917. * Return 1 on success, 0 on error.
  918. *
  919. * Note that @info is reused among all error devices. Clear fields properly.
  920. */
  921. int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
  922. {
  923. int pos, temp;
  924. /* Must reset in this function */
  925. info->status = 0;
  926. info->tlp_header_valid = 0;
  927. pos = dev->aer_cap;
  928. /* The device might not support AER */
  929. if (!pos)
  930. return 0;
  931. if (info->severity == AER_CORRECTABLE) {
  932. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  933. &info->status);
  934. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
  935. &info->mask);
  936. if (!(info->status & ~info->mask))
  937. return 0;
  938. } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
  939. pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
  940. info->severity == AER_NONFATAL) {
  941. /* Link is still healthy for IO reads */
  942. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
  943. &info->status);
  944. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
  945. &info->mask);
  946. if (!(info->status & ~info->mask))
  947. return 0;
  948. /* Get First Error Pointer */
  949. pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
  950. info->first_error = PCI_ERR_CAP_FEP(temp);
  951. if (info->status & AER_LOG_TLP_MASKS) {
  952. info->tlp_header_valid = 1;
  953. pci_read_config_dword(dev,
  954. pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
  955. pci_read_config_dword(dev,
  956. pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
  957. pci_read_config_dword(dev,
  958. pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
  959. pci_read_config_dword(dev,
  960. pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
  961. }
  962. }
  963. return 1;
  964. }
  965. static inline void aer_process_err_devices(struct aer_err_info *e_info)
  966. {
  967. int i;
  968. /* Report all before handle them, not to lost records by reset etc. */
  969. for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
  970. if (aer_get_device_error_info(e_info->dev[i], e_info))
  971. aer_print_error(e_info->dev[i], e_info);
  972. }
  973. for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
  974. if (aer_get_device_error_info(e_info->dev[i], e_info))
  975. handle_error_source(e_info->dev[i], e_info);
  976. }
  977. }
  978. /**
  979. * aer_isr_one_error - consume an error detected by root port
  980. * @rpc: pointer to the root port which holds an error
  981. * @e_src: pointer to an error source
  982. */
  983. static void aer_isr_one_error(struct aer_rpc *rpc,
  984. struct aer_err_source *e_src)
  985. {
  986. struct pci_dev *pdev = rpc->rpd;
  987. struct aer_err_info e_info;
  988. pci_rootport_aer_stats_incr(pdev, e_src);
  989. /*
  990. * There is a possibility that both correctable error and
  991. * uncorrectable error being logged. Report correctable error first.
  992. */
  993. if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
  994. e_info.id = ERR_COR_ID(e_src->id);
  995. e_info.severity = AER_CORRECTABLE;
  996. if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
  997. e_info.multi_error_valid = 1;
  998. else
  999. e_info.multi_error_valid = 0;
  1000. aer_print_port_info(pdev, &e_info);
  1001. if (find_source_device(pdev, &e_info))
  1002. aer_process_err_devices(&e_info);
  1003. }
  1004. if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
  1005. e_info.id = ERR_UNCOR_ID(e_src->id);
  1006. if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
  1007. e_info.severity = AER_FATAL;
  1008. else
  1009. e_info.severity = AER_NONFATAL;
  1010. if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
  1011. e_info.multi_error_valid = 1;
  1012. else
  1013. e_info.multi_error_valid = 0;
  1014. aer_print_port_info(pdev, &e_info);
  1015. if (find_source_device(pdev, &e_info))
  1016. aer_process_err_devices(&e_info);
  1017. }
  1018. }
  1019. /**
  1020. * aer_isr - consume errors detected by root port
  1021. * @work: definition of this work item
  1022. *
  1023. * Invoked, as DPC, when root port records new detected error
  1024. */
  1025. static irqreturn_t aer_isr(int irq, void *context)
  1026. {
  1027. struct pcie_device *dev = (struct pcie_device *)context;
  1028. struct aer_rpc *rpc = get_service_data(dev);
  1029. struct aer_err_source uninitialized_var(e_src);
  1030. if (kfifo_is_empty(&rpc->aer_fifo))
  1031. return IRQ_NONE;
  1032. while (kfifo_get(&rpc->aer_fifo, &e_src))
  1033. aer_isr_one_error(rpc, &e_src);
  1034. return IRQ_HANDLED;
  1035. }
  1036. /**
  1037. * aer_irq - Root Port's ISR
  1038. * @irq: IRQ assigned to Root Port
  1039. * @context: pointer to Root Port data structure
  1040. *
  1041. * Invoked when Root Port detects AER messages.
  1042. */
  1043. static irqreturn_t aer_irq(int irq, void *context)
  1044. {
  1045. struct pcie_device *pdev = (struct pcie_device *)context;
  1046. struct aer_rpc *rpc = get_service_data(pdev);
  1047. struct pci_dev *rp = rpc->rpd;
  1048. struct aer_err_source e_src = {};
  1049. int pos = rp->aer_cap;
  1050. pci_read_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, &e_src.status);
  1051. if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV)))
  1052. return IRQ_NONE;
  1053. pci_read_config_dword(rp, pos + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
  1054. pci_write_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, e_src.status);
  1055. if (!kfifo_put(&rpc->aer_fifo, e_src))
  1056. return IRQ_HANDLED;
  1057. return IRQ_WAKE_THREAD;
  1058. }
  1059. static int set_device_error_reporting(struct pci_dev *dev, void *data)
  1060. {
  1061. bool enable = *((bool *)data);
  1062. int type = pci_pcie_type(dev);
  1063. if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
  1064. (type == PCI_EXP_TYPE_UPSTREAM) ||
  1065. (type == PCI_EXP_TYPE_DOWNSTREAM)) {
  1066. if (enable)
  1067. pci_enable_pcie_error_reporting(dev);
  1068. else
  1069. pci_disable_pcie_error_reporting(dev);
  1070. }
  1071. if (enable)
  1072. pcie_set_ecrc_checking(dev);
  1073. return 0;
  1074. }
  1075. /**
  1076. * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
  1077. * @dev: pointer to root port's pci_dev data structure
  1078. * @enable: true = enable error reporting, false = disable error reporting.
  1079. */
  1080. static void set_downstream_devices_error_reporting(struct pci_dev *dev,
  1081. bool enable)
  1082. {
  1083. set_device_error_reporting(dev, &enable);
  1084. if (!dev->subordinate)
  1085. return;
  1086. pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
  1087. }
  1088. /**
  1089. * aer_enable_rootport - enable Root Port's interrupts when receiving messages
  1090. * @rpc: pointer to a Root Port data structure
  1091. *
  1092. * Invoked when PCIe bus loads AER service driver.
  1093. */
  1094. static void aer_enable_rootport(struct aer_rpc *rpc)
  1095. {
  1096. struct pci_dev *pdev = rpc->rpd;
  1097. int aer_pos;
  1098. u16 reg16;
  1099. u32 reg32;
  1100. /* Clear PCIe Capability's Device Status */
  1101. pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, &reg16);
  1102. pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16);
  1103. /* Disable system error generation in response to error messages */
  1104. pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
  1105. SYSTEM_ERROR_INTR_ON_MESG_MASK);
  1106. aer_pos = pdev->aer_cap;
  1107. /* Clear error status */
  1108. pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
  1109. pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
  1110. pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
  1111. pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
  1112. pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
  1113. pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
  1114. /*
  1115. * Enable error reporting for the root port device and downstream port
  1116. * devices.
  1117. */
  1118. set_downstream_devices_error_reporting(pdev, true);
  1119. /* Enable Root Port's interrupt in response to error messages */
  1120. pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, &reg32);
  1121. reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
  1122. pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32);
  1123. }
  1124. /**
  1125. * aer_disable_rootport - disable Root Port's interrupts when receiving messages
  1126. * @rpc: pointer to a Root Port data structure
  1127. *
  1128. * Invoked when PCIe bus unloads AER service driver.
  1129. */
  1130. static void aer_disable_rootport(struct aer_rpc *rpc)
  1131. {
  1132. struct pci_dev *pdev = rpc->rpd;
  1133. u32 reg32;
  1134. int pos;
  1135. /*
  1136. * Disable error reporting for the root port device and downstream port
  1137. * devices.
  1138. */
  1139. set_downstream_devices_error_reporting(pdev, false);
  1140. pos = pdev->aer_cap;
  1141. /* Disable Root's interrupt in response to error messages */
  1142. pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
  1143. reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
  1144. pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32);
  1145. /* Clear Root's error status reg */
  1146. pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
  1147. pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
  1148. }
  1149. /**
  1150. * aer_remove - clean up resources
  1151. * @dev: pointer to the pcie_dev data structure
  1152. *
  1153. * Invoked when PCI Express bus unloads or AER probe fails.
  1154. */
  1155. static void aer_remove(struct pcie_device *dev)
  1156. {
  1157. struct aer_rpc *rpc = get_service_data(dev);
  1158. aer_disable_rootport(rpc);
  1159. }
  1160. /**
  1161. * aer_probe - initialize resources
  1162. * @dev: pointer to the pcie_dev data structure
  1163. *
  1164. * Invoked when PCI Express bus loads AER service driver.
  1165. */
  1166. static int aer_probe(struct pcie_device *dev)
  1167. {
  1168. int status;
  1169. struct aer_rpc *rpc;
  1170. struct device *device = &dev->device;
  1171. rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL);
  1172. if (!rpc) {
  1173. dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
  1174. return -ENOMEM;
  1175. }
  1176. rpc->rpd = dev->port;
  1177. set_service_data(dev, rpc);
  1178. status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
  1179. IRQF_SHARED, "aerdrv", dev);
  1180. if (status) {
  1181. dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
  1182. dev->irq);
  1183. return status;
  1184. }
  1185. aer_enable_rootport(rpc);
  1186. dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
  1187. return 0;
  1188. }
  1189. /**
  1190. * aer_root_reset - reset link on Root Port
  1191. * @dev: pointer to Root Port's pci_dev data structure
  1192. *
  1193. * Invoked by Port Bus driver when performing link reset at Root Port.
  1194. */
  1195. static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
  1196. {
  1197. u32 reg32;
  1198. int pos;
  1199. int rc;
  1200. pos = dev->aer_cap;
  1201. /* Disable Root's interrupt in response to error messages */
  1202. pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
  1203. reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
  1204. pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
  1205. rc = pci_bus_error_reset(dev);
  1206. pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
  1207. /* Clear Root Error Status */
  1208. pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
  1209. pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32);
  1210. /* Enable Root Port's interrupt in response to error messages */
  1211. pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
  1212. reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
  1213. pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
  1214. return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
  1215. }
  1216. static struct pcie_port_service_driver aerdriver = {
  1217. .name = "aer",
  1218. .port_type = PCI_EXP_TYPE_ROOT_PORT,
  1219. .service = PCIE_PORT_SERVICE_AER,
  1220. .probe = aer_probe,
  1221. .remove = aer_remove,
  1222. .reset_link = aer_root_reset,
  1223. };
  1224. /**
  1225. * aer_service_init - register AER root service driver
  1226. *
  1227. * Invoked when AER root service driver is loaded.
  1228. */
  1229. int __init pcie_aer_init(void)
  1230. {
  1231. if (!pci_aer_available() || aer_acpi_firmware_first())
  1232. return -ENXIO;
  1233. return pcie_port_service_register(&aerdriver);
  1234. }