aerdrv_core.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. /*
  2. * drivers/pci/pcie/aer/aerdrv_core.c
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * This file implements the core part of PCIe AER. When a PCIe
  9. * error is delivered, an error message will be collected and printed to
  10. * console, then, an error recovery procedure will be executed by following
  11. * the PCI error recovery rules.
  12. *
  13. * Copyright (C) 2006 Intel Corp.
  14. * Tom Long Nguyen (tom.l.nguyen@intel.com)
  15. * Zhang Yanmin (yanmin.zhang@intel.com)
  16. *
  17. */
  18. #include <linux/module.h>
  19. #include <linux/pci.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/pm.h>
  23. #include <linux/suspend.h>
  24. #include <linux/delay.h>
  25. #include <linux/slab.h>
  26. #include <linux/kfifo.h>
  27. #include "aerdrv.h"
  28. #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
  29. PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
  30. int pci_enable_pcie_error_reporting(struct pci_dev *dev)
  31. {
  32. if (pcie_aer_get_firmware_first(dev))
  33. return -EIO;
  34. if (!dev->aer_cap)
  35. return -EIO;
  36. return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
  37. }
  38. EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
  39. int pci_disable_pcie_error_reporting(struct pci_dev *dev)
  40. {
  41. if (pcie_aer_get_firmware_first(dev))
  42. return -EIO;
  43. return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
  44. PCI_EXP_AER_FLAGS);
  45. }
  46. EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
  47. int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
  48. {
  49. int pos;
  50. u32 status;
  51. pos = dev->aer_cap;
  52. if (!pos)
  53. return -EIO;
  54. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  55. if (status)
  56. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  57. return 0;
  58. }
  59. EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
  60. int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
  61. {
  62. int pos;
  63. u32 status;
  64. int port_type;
  65. if (!pci_is_pcie(dev))
  66. return -ENODEV;
  67. pos = dev->aer_cap;
  68. if (!pos)
  69. return -EIO;
  70. port_type = pci_pcie_type(dev);
  71. if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
  72. pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
  73. pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
  74. }
  75. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
  76. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
  77. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  78. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  79. return 0;
  80. }
  81. int pci_aer_init(struct pci_dev *dev)
  82. {
  83. dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  84. return pci_cleanup_aer_error_status_regs(dev);
  85. }
  86. /**
  87. * add_error_device - list device to be handled
  88. * @e_info: pointer to error info
  89. * @dev: pointer to pci_dev to be added
  90. */
  91. static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
  92. {
  93. if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
  94. e_info->dev[e_info->error_dev_num] = dev;
  95. e_info->error_dev_num++;
  96. return 0;
  97. }
  98. return -ENOSPC;
  99. }
  100. /**
  101. * is_error_source - check whether the device is source of reported error
  102. * @dev: pointer to pci_dev to be checked
  103. * @e_info: pointer to reported error info
  104. */
  105. static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
  106. {
  107. int pos;
  108. u32 status, mask;
  109. u16 reg16;
  110. /*
  111. * When bus id is equal to 0, it might be a bad id
  112. * reported by root port.
  113. */
  114. if ((PCI_BUS_NUM(e_info->id) != 0) &&
  115. !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
  116. /* Device ID match? */
  117. if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
  118. return true;
  119. /* Continue id comparing if there is no multiple error */
  120. if (!e_info->multi_error_valid)
  121. return false;
  122. }
  123. /*
  124. * When either
  125. * 1) bus id is equal to 0. Some ports might lose the bus
  126. * id of error source id;
  127. * 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
  128. * 3) There are multiple errors and prior ID comparing fails;
  129. * We check AER status registers to find possible reporter.
  130. */
  131. if (atomic_read(&dev->enable_cnt) == 0)
  132. return false;
  133. /* Check if AER is enabled */
  134. pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
  135. if (!(reg16 & PCI_EXP_AER_FLAGS))
  136. return false;
  137. pos = dev->aer_cap;
  138. if (!pos)
  139. return false;
  140. /* Check if error is recorded */
  141. if (e_info->severity == AER_CORRECTABLE) {
  142. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
  143. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
  144. } else {
  145. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  146. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
  147. }
  148. if (status & ~mask)
  149. return true;
  150. return false;
  151. }
  152. static int find_device_iter(struct pci_dev *dev, void *data)
  153. {
  154. struct aer_err_info *e_info = (struct aer_err_info *)data;
  155. if (is_error_source(dev, e_info)) {
  156. /* List this device */
  157. if (add_error_device(e_info, dev)) {
  158. /* We cannot handle more... Stop iteration */
  159. /* TODO: Should print error message here? */
  160. return 1;
  161. }
  162. /* If there is only a single error, stop iteration */
  163. if (!e_info->multi_error_valid)
  164. return 1;
  165. }
  166. return 0;
  167. }
  168. /**
  169. * find_source_device - search through device hierarchy for source device
  170. * @parent: pointer to Root Port pci_dev data structure
  171. * @e_info: including detailed error information such like id
  172. *
  173. * Return true if found.
  174. *
  175. * Invoked by DPC when error is detected at the Root Port.
  176. * Caller of this function must set id, severity, and multi_error_valid of
  177. * struct aer_err_info pointed by @e_info properly. This function must fill
  178. * e_info->error_dev_num and e_info->dev[], based on the given information.
  179. */
  180. static bool find_source_device(struct pci_dev *parent,
  181. struct aer_err_info *e_info)
  182. {
  183. struct pci_dev *dev = parent;
  184. int result;
  185. /* Must reset in this function */
  186. e_info->error_dev_num = 0;
  187. /* Is Root Port an agent that sends error message? */
  188. result = find_device_iter(dev, e_info);
  189. if (result)
  190. return true;
  191. pci_walk_bus(parent->subordinate, find_device_iter, e_info);
  192. if (!e_info->error_dev_num) {
  193. dev_printk(KERN_DEBUG, &parent->dev,
  194. "can't find device of ID%04x\n",
  195. e_info->id);
  196. return false;
  197. }
  198. return true;
  199. }
  200. static int report_error_detected(struct pci_dev *dev, void *data)
  201. {
  202. pci_ers_result_t vote;
  203. const struct pci_error_handlers *err_handler;
  204. struct aer_broadcast_data *result_data;
  205. result_data = (struct aer_broadcast_data *) data;
  206. device_lock(&dev->dev);
  207. dev->error_state = result_data->state;
  208. if (!dev->driver ||
  209. !dev->driver->err_handler ||
  210. !dev->driver->err_handler->error_detected) {
  211. if (result_data->state == pci_channel_io_frozen &&
  212. dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
  213. /*
  214. * In case of fatal recovery, if one of down-
  215. * stream device has no driver. We might be
  216. * unable to recover because a later insmod
  217. * of a driver for this device is unaware of
  218. * its hw state.
  219. */
  220. dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
  221. dev->driver ?
  222. "no AER-aware driver" : "no driver");
  223. }
  224. /*
  225. * If there's any device in the subtree that does not
  226. * have an error_detected callback, returning
  227. * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
  228. * the subsequent mmio_enabled/slot_reset/resume
  229. * callbacks of "any" device in the subtree. All the
  230. * devices in the subtree are left in the error state
  231. * without recovery.
  232. */
  233. if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
  234. vote = PCI_ERS_RESULT_NO_AER_DRIVER;
  235. else
  236. vote = PCI_ERS_RESULT_NONE;
  237. } else {
  238. err_handler = dev->driver->err_handler;
  239. vote = err_handler->error_detected(dev, result_data->state);
  240. }
  241. result_data->result = merge_result(result_data->result, vote);
  242. device_unlock(&dev->dev);
  243. return 0;
  244. }
  245. static int report_mmio_enabled(struct pci_dev *dev, void *data)
  246. {
  247. pci_ers_result_t vote;
  248. const struct pci_error_handlers *err_handler;
  249. struct aer_broadcast_data *result_data;
  250. result_data = (struct aer_broadcast_data *) data;
  251. device_lock(&dev->dev);
  252. if (!dev->driver ||
  253. !dev->driver->err_handler ||
  254. !dev->driver->err_handler->mmio_enabled)
  255. goto out;
  256. err_handler = dev->driver->err_handler;
  257. vote = err_handler->mmio_enabled(dev);
  258. result_data->result = merge_result(result_data->result, vote);
  259. out:
  260. device_unlock(&dev->dev);
  261. return 0;
  262. }
  263. static int report_slot_reset(struct pci_dev *dev, void *data)
  264. {
  265. pci_ers_result_t vote;
  266. const struct pci_error_handlers *err_handler;
  267. struct aer_broadcast_data *result_data;
  268. result_data = (struct aer_broadcast_data *) data;
  269. device_lock(&dev->dev);
  270. if (!dev->driver ||
  271. !dev->driver->err_handler ||
  272. !dev->driver->err_handler->slot_reset)
  273. goto out;
  274. err_handler = dev->driver->err_handler;
  275. vote = err_handler->slot_reset(dev);
  276. result_data->result = merge_result(result_data->result, vote);
  277. out:
  278. device_unlock(&dev->dev);
  279. return 0;
  280. }
  281. static int report_resume(struct pci_dev *dev, void *data)
  282. {
  283. const struct pci_error_handlers *err_handler;
  284. device_lock(&dev->dev);
  285. dev->error_state = pci_channel_io_normal;
  286. if (!dev->driver ||
  287. !dev->driver->err_handler ||
  288. !dev->driver->err_handler->resume)
  289. goto out;
  290. err_handler = dev->driver->err_handler;
  291. err_handler->resume(dev);
  292. out:
  293. device_unlock(&dev->dev);
  294. return 0;
  295. }
  296. /**
  297. * broadcast_error_message - handle message broadcast to downstream drivers
  298. * @dev: pointer to from where in a hierarchy message is broadcasted down
  299. * @state: error state
  300. * @error_mesg: message to print
  301. * @cb: callback to be broadcasted
  302. *
  303. * Invoked during error recovery process. Once being invoked, the content
  304. * of error severity will be broadcasted to all downstream drivers in a
  305. * hierarchy in question.
  306. */
  307. static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
  308. enum pci_channel_state state,
  309. char *error_mesg,
  310. int (*cb)(struct pci_dev *, void *))
  311. {
  312. struct aer_broadcast_data result_data;
  313. dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
  314. result_data.state = state;
  315. if (cb == report_error_detected)
  316. result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
  317. else
  318. result_data.result = PCI_ERS_RESULT_RECOVERED;
  319. if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
  320. /*
  321. * If the error is reported by a bridge, we think this error
  322. * is related to the downstream link of the bridge, so we
  323. * do error recovery on all subordinates of the bridge instead
  324. * of the bridge and clear the error status of the bridge.
  325. */
  326. if (cb == report_error_detected)
  327. dev->error_state = state;
  328. pci_walk_bus(dev->subordinate, cb, &result_data);
  329. if (cb == report_resume) {
  330. pci_cleanup_aer_uncorrect_error_status(dev);
  331. dev->error_state = pci_channel_io_normal;
  332. }
  333. } else {
  334. /*
  335. * If the error is reported by an end point, we think this
  336. * error is related to the upstream link of the end point.
  337. */
  338. pci_walk_bus(dev->bus, cb, &result_data);
  339. }
  340. return result_data.result;
  341. }
  342. /**
  343. * default_reset_link - default reset function
  344. * @dev: pointer to pci_dev data structure
  345. *
  346. * Invoked when performing link reset on a Downstream Port or a
  347. * Root Port with no aer driver.
  348. */
  349. static pci_ers_result_t default_reset_link(struct pci_dev *dev)
  350. {
  351. pci_reset_bridge_secondary_bus(dev);
  352. dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n");
  353. return PCI_ERS_RESULT_RECOVERED;
  354. }
  355. static int find_aer_service_iter(struct device *device, void *data)
  356. {
  357. struct pcie_port_service_driver *service_driver, **drv;
  358. drv = (struct pcie_port_service_driver **) data;
  359. if (device->bus == &pcie_port_bus_type && device->driver) {
  360. service_driver = to_service_driver(device->driver);
  361. if (service_driver->service == PCIE_PORT_SERVICE_AER) {
  362. *drv = service_driver;
  363. return 1;
  364. }
  365. }
  366. return 0;
  367. }
  368. static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
  369. {
  370. struct pcie_port_service_driver *drv = NULL;
  371. device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
  372. return drv;
  373. }
  374. static pci_ers_result_t reset_link(struct pci_dev *dev)
  375. {
  376. struct pci_dev *udev;
  377. pci_ers_result_t status;
  378. struct pcie_port_service_driver *driver;
  379. if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
  380. /* Reset this port for all subordinates */
  381. udev = dev;
  382. } else {
  383. /* Reset the upstream component (likely downstream port) */
  384. udev = dev->bus->self;
  385. }
  386. /* Use the aer driver of the component firstly */
  387. driver = find_aer_service(udev);
  388. if (driver && driver->reset_link) {
  389. status = driver->reset_link(udev);
  390. } else if (udev->has_secondary_link) {
  391. status = default_reset_link(udev);
  392. } else {
  393. dev_printk(KERN_DEBUG, &dev->dev,
  394. "no link-reset support at upstream device %s\n",
  395. pci_name(udev));
  396. return PCI_ERS_RESULT_DISCONNECT;
  397. }
  398. if (status != PCI_ERS_RESULT_RECOVERED) {
  399. dev_printk(KERN_DEBUG, &dev->dev,
  400. "link reset at upstream device %s failed\n",
  401. pci_name(udev));
  402. return PCI_ERS_RESULT_DISCONNECT;
  403. }
  404. return status;
  405. }
  406. /**
  407. * do_recovery - handle nonfatal/fatal error recovery process
  408. * @dev: pointer to a pci_dev data structure of agent detecting an error
  409. * @severity: error severity type
  410. *
  411. * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
  412. * error detected message to all downstream drivers within a hierarchy in
  413. * question and return the returned code.
  414. */
  415. static void do_recovery(struct pci_dev *dev, int severity)
  416. {
  417. pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
  418. enum pci_channel_state state;
  419. if (severity == AER_FATAL)
  420. state = pci_channel_io_frozen;
  421. else
  422. state = pci_channel_io_normal;
  423. status = broadcast_error_message(dev,
  424. state,
  425. "error_detected",
  426. report_error_detected);
  427. if (severity == AER_FATAL) {
  428. result = reset_link(dev);
  429. if (result != PCI_ERS_RESULT_RECOVERED)
  430. goto failed;
  431. }
  432. if (status == PCI_ERS_RESULT_CAN_RECOVER)
  433. status = broadcast_error_message(dev,
  434. state,
  435. "mmio_enabled",
  436. report_mmio_enabled);
  437. if (status == PCI_ERS_RESULT_NEED_RESET) {
  438. /*
  439. * TODO: Should call platform-specific
  440. * functions to reset slot before calling
  441. * drivers' slot_reset callbacks?
  442. */
  443. status = broadcast_error_message(dev,
  444. state,
  445. "slot_reset",
  446. report_slot_reset);
  447. }
  448. if (status != PCI_ERS_RESULT_RECOVERED)
  449. goto failed;
  450. broadcast_error_message(dev,
  451. state,
  452. "resume",
  453. report_resume);
  454. dev_info(&dev->dev, "AER: Device recovery successful\n");
  455. return;
  456. failed:
  457. /* TODO: Should kernel panic here? */
  458. dev_info(&dev->dev, "AER: Device recovery failed\n");
  459. }
  460. /**
  461. * handle_error_source - handle logging error into an event log
  462. * @aerdev: pointer to pcie_device data structure of the root port
  463. * @dev: pointer to pci_dev data structure of error source device
  464. * @info: comprehensive error information
  465. *
  466. * Invoked when an error being detected by Root Port.
  467. */
  468. static void handle_error_source(struct pcie_device *aerdev,
  469. struct pci_dev *dev,
  470. struct aer_err_info *info)
  471. {
  472. int pos;
  473. if (info->severity == AER_CORRECTABLE) {
  474. /*
  475. * Correctable error does not need software intervention.
  476. * No need to go through error recovery process.
  477. */
  478. pos = dev->aer_cap;
  479. if (pos)
  480. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  481. info->status);
  482. } else
  483. do_recovery(dev, info->severity);
  484. }
  485. #ifdef CONFIG_ACPI_APEI_PCIEAER
  486. static void aer_recover_work_func(struct work_struct *work);
  487. #define AER_RECOVER_RING_ORDER 4
  488. #define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
  489. struct aer_recover_entry {
  490. u8 bus;
  491. u8 devfn;
  492. u16 domain;
  493. int severity;
  494. struct aer_capability_regs *regs;
  495. };
  496. static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
  497. AER_RECOVER_RING_SIZE);
  498. /*
  499. * Mutual exclusion for writers of aer_recover_ring, reader side don't
  500. * need lock, because there is only one reader and lock is not needed
  501. * between reader and writer.
  502. */
  503. static DEFINE_SPINLOCK(aer_recover_ring_lock);
  504. static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
  505. void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
  506. int severity, struct aer_capability_regs *aer_regs)
  507. {
  508. unsigned long flags;
  509. struct aer_recover_entry entry = {
  510. .bus = bus,
  511. .devfn = devfn,
  512. .domain = domain,
  513. .severity = severity,
  514. .regs = aer_regs,
  515. };
  516. spin_lock_irqsave(&aer_recover_ring_lock, flags);
  517. if (kfifo_put(&aer_recover_ring, entry))
  518. schedule_work(&aer_recover_work);
  519. else
  520. pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
  521. domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  522. spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
  523. }
  524. EXPORT_SYMBOL_GPL(aer_recover_queue);
  525. static void aer_recover_work_func(struct work_struct *work)
  526. {
  527. struct aer_recover_entry entry;
  528. struct pci_dev *pdev;
  529. while (kfifo_get(&aer_recover_ring, &entry)) {
  530. pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
  531. entry.devfn);
  532. if (!pdev) {
  533. pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
  534. entry.domain, entry.bus,
  535. PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
  536. continue;
  537. }
  538. cper_print_aer(pdev, entry.severity, entry.regs);
  539. do_recovery(pdev, entry.severity);
  540. pci_dev_put(pdev);
  541. }
  542. }
  543. #endif
  544. /**
  545. * get_device_error_info - read error status from dev and store it to info
  546. * @dev: pointer to the device expected to have a error record
  547. * @info: pointer to structure to store the error record
  548. *
  549. * Return 1 on success, 0 on error.
  550. *
  551. * Note that @info is reused among all error devices. Clear fields properly.
  552. */
  553. static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
  554. {
  555. int pos, temp;
  556. /* Must reset in this function */
  557. info->status = 0;
  558. info->tlp_header_valid = 0;
  559. pos = dev->aer_cap;
  560. /* The device might not support AER */
  561. if (!pos)
  562. return 1;
  563. if (info->severity == AER_CORRECTABLE) {
  564. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  565. &info->status);
  566. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
  567. &info->mask);
  568. if (!(info->status & ~info->mask))
  569. return 0;
  570. } else if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
  571. info->severity == AER_NONFATAL) {
  572. /* Link is still healthy for IO reads */
  573. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
  574. &info->status);
  575. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
  576. &info->mask);
  577. if (!(info->status & ~info->mask))
  578. return 0;
  579. /* Get First Error Pointer */
  580. pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
  581. info->first_error = PCI_ERR_CAP_FEP(temp);
  582. if (info->status & AER_LOG_TLP_MASKS) {
  583. info->tlp_header_valid = 1;
  584. pci_read_config_dword(dev,
  585. pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
  586. pci_read_config_dword(dev,
  587. pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
  588. pci_read_config_dword(dev,
  589. pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
  590. pci_read_config_dword(dev,
  591. pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
  592. }
  593. }
  594. return 1;
  595. }
  596. static inline void aer_process_err_devices(struct pcie_device *p_device,
  597. struct aer_err_info *e_info)
  598. {
  599. int i;
  600. /* Report all before handle them, not to lost records by reset etc. */
  601. for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
  602. if (get_device_error_info(e_info->dev[i], e_info))
  603. aer_print_error(e_info->dev[i], e_info);
  604. }
  605. for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
  606. if (get_device_error_info(e_info->dev[i], e_info))
  607. handle_error_source(p_device, e_info->dev[i], e_info);
  608. }
  609. }
  610. /**
  611. * aer_isr_one_error - consume an error detected by root port
  612. * @p_device: pointer to error root port service device
  613. * @e_src: pointer to an error source
  614. */
  615. static void aer_isr_one_error(struct pcie_device *p_device,
  616. struct aer_err_source *e_src)
  617. {
  618. struct aer_rpc *rpc = get_service_data(p_device);
  619. struct aer_err_info *e_info = &rpc->e_info;
  620. /*
  621. * There is a possibility that both correctable error and
  622. * uncorrectable error being logged. Report correctable error first.
  623. */
  624. if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
  625. e_info->id = ERR_COR_ID(e_src->id);
  626. e_info->severity = AER_CORRECTABLE;
  627. if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
  628. e_info->multi_error_valid = 1;
  629. else
  630. e_info->multi_error_valid = 0;
  631. aer_print_port_info(p_device->port, e_info);
  632. if (find_source_device(p_device->port, e_info))
  633. aer_process_err_devices(p_device, e_info);
  634. }
  635. if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
  636. e_info->id = ERR_UNCOR_ID(e_src->id);
  637. if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
  638. e_info->severity = AER_FATAL;
  639. else
  640. e_info->severity = AER_NONFATAL;
  641. if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
  642. e_info->multi_error_valid = 1;
  643. else
  644. e_info->multi_error_valid = 0;
  645. aer_print_port_info(p_device->port, e_info);
  646. if (find_source_device(p_device->port, e_info))
  647. aer_process_err_devices(p_device, e_info);
  648. }
  649. }
  650. /**
  651. * get_e_source - retrieve an error source
  652. * @rpc: pointer to the root port which holds an error
  653. * @e_src: pointer to store retrieved error source
  654. *
  655. * Return 1 if an error source is retrieved, otherwise 0.
  656. *
  657. * Invoked by DPC handler to consume an error.
  658. */
  659. static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
  660. {
  661. unsigned long flags;
  662. /* Lock access to Root error producer/consumer index */
  663. spin_lock_irqsave(&rpc->e_lock, flags);
  664. if (rpc->prod_idx == rpc->cons_idx) {
  665. spin_unlock_irqrestore(&rpc->e_lock, flags);
  666. return 0;
  667. }
  668. *e_src = rpc->e_sources[rpc->cons_idx];
  669. rpc->cons_idx++;
  670. if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
  671. rpc->cons_idx = 0;
  672. spin_unlock_irqrestore(&rpc->e_lock, flags);
  673. return 1;
  674. }
  675. /**
  676. * aer_isr - consume errors detected by root port
  677. * @work: definition of this work item
  678. *
  679. * Invoked, as DPC, when root port records new detected error
  680. */
  681. void aer_isr(struct work_struct *work)
  682. {
  683. struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
  684. struct pcie_device *p_device = rpc->rpd;
  685. struct aer_err_source uninitialized_var(e_src);
  686. mutex_lock(&rpc->rpc_mutex);
  687. while (get_e_source(rpc, &e_src))
  688. aer_isr_one_error(p_device, &e_src);
  689. mutex_unlock(&rpc->rpc_mutex);
  690. }