pciehp_hpc.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * PCI Express PCI Hot Plug Driver
  4. *
  5. * Copyright (C) 1995,2001 Compaq Computer Corporation
  6. * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
  7. * Copyright (C) 2001 IBM Corp.
  8. * Copyright (C) 2003-2004 Intel Corporation
  9. *
  10. * All rights reserved.
  11. *
  12. * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/types.h>
  17. #include <linux/signal.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/kthread.h>
  20. #include <linux/pci.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/time.h>
  24. #include <linux/slab.h>
  25. #include "../pci.h"
  26. #include "pciehp.h"
  27. static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
  28. {
  29. return ctrl->pcie->port;
  30. }
  31. static irqreturn_t pciehp_isr(int irq, void *dev_id);
  32. static irqreturn_t pciehp_ist(int irq, void *dev_id);
  33. static int pciehp_poll(void *data);
  34. static inline int pciehp_request_irq(struct controller *ctrl)
  35. {
  36. int retval, irq = ctrl->pcie->irq;
  37. if (pciehp_poll_mode) {
  38. ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
  39. "pciehp_poll-%s",
  40. slot_name(ctrl->slot));
  41. return PTR_ERR_OR_ZERO(ctrl->poll_thread);
  42. }
  43. /* Installs the interrupt handler */
  44. retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
  45. IRQF_SHARED, MY_NAME, ctrl);
  46. if (retval)
  47. ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
  48. irq);
  49. return retval;
  50. }
  51. static inline void pciehp_free_irq(struct controller *ctrl)
  52. {
  53. if (pciehp_poll_mode)
  54. kthread_stop(ctrl->poll_thread);
  55. else
  56. free_irq(ctrl->pcie->irq, ctrl);
  57. }
  58. static int pcie_poll_cmd(struct controller *ctrl, int timeout)
  59. {
  60. struct pci_dev *pdev = ctrl_dev(ctrl);
  61. u16 slot_status;
  62. while (true) {
  63. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  64. if (slot_status == (u16) ~0) {
  65. ctrl_info(ctrl, "%s: no response from device\n",
  66. __func__);
  67. return 0;
  68. }
  69. if (slot_status & PCI_EXP_SLTSTA_CC) {
  70. pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
  71. PCI_EXP_SLTSTA_CC);
  72. return 1;
  73. }
  74. if (timeout < 0)
  75. break;
  76. msleep(10);
  77. timeout -= 10;
  78. }
  79. return 0; /* timeout */
  80. }
  81. static void pcie_wait_cmd(struct controller *ctrl)
  82. {
  83. unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
  84. unsigned long duration = msecs_to_jiffies(msecs);
  85. unsigned long cmd_timeout = ctrl->cmd_started + duration;
  86. unsigned long now, timeout;
  87. int rc;
  88. /*
  89. * If the controller does not generate notifications for command
  90. * completions, we never need to wait between writes.
  91. */
  92. if (NO_CMD_CMPL(ctrl))
  93. return;
  94. if (!ctrl->cmd_busy)
  95. return;
  96. /*
  97. * Even if the command has already timed out, we want to call
  98. * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
  99. */
  100. now = jiffies;
  101. if (time_before_eq(cmd_timeout, now))
  102. timeout = 1;
  103. else
  104. timeout = cmd_timeout - now;
  105. if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
  106. ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
  107. rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
  108. else
  109. rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
  110. if (!rc)
  111. ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
  112. ctrl->slot_ctrl,
  113. jiffies_to_msecs(jiffies - ctrl->cmd_started));
  114. }
  115. #define CC_ERRATUM_MASK (PCI_EXP_SLTCTL_PCC | \
  116. PCI_EXP_SLTCTL_PIC | \
  117. PCI_EXP_SLTCTL_AIC | \
  118. PCI_EXP_SLTCTL_EIC)
  119. static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
  120. u16 mask, bool wait)
  121. {
  122. struct pci_dev *pdev = ctrl_dev(ctrl);
  123. u16 slot_ctrl_orig, slot_ctrl;
  124. mutex_lock(&ctrl->ctrl_lock);
  125. /*
  126. * Always wait for any previous command that might still be in progress
  127. */
  128. pcie_wait_cmd(ctrl);
  129. pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
  130. if (slot_ctrl == (u16) ~0) {
  131. ctrl_info(ctrl, "%s: no response from device\n", __func__);
  132. goto out;
  133. }
  134. slot_ctrl_orig = slot_ctrl;
  135. slot_ctrl &= ~mask;
  136. slot_ctrl |= (cmd & mask);
  137. ctrl->cmd_busy = 1;
  138. smp_mb();
  139. pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
  140. ctrl->cmd_started = jiffies;
  141. ctrl->slot_ctrl = slot_ctrl;
  142. /*
  143. * Controllers with the Intel CF118 and similar errata advertise
  144. * Command Completed support, but they only set Command Completed
  145. * if we change the "Control" bits for power, power indicator,
  146. * attention indicator, or interlock. If we only change the
  147. * "Enable" bits, they never set the Command Completed bit.
  148. */
  149. if (pdev->broken_cmd_compl &&
  150. (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
  151. ctrl->cmd_busy = 0;
  152. /*
  153. * Optionally wait for the hardware to be ready for a new command,
  154. * indicating completion of the above issued command.
  155. */
  156. if (wait)
  157. pcie_wait_cmd(ctrl);
  158. out:
  159. mutex_unlock(&ctrl->ctrl_lock);
  160. }
  161. /**
  162. * pcie_write_cmd - Issue controller command
  163. * @ctrl: controller to which the command is issued
  164. * @cmd: command value written to slot control register
  165. * @mask: bitmask of slot control register to be modified
  166. */
  167. static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
  168. {
  169. pcie_do_write_cmd(ctrl, cmd, mask, true);
  170. }
  171. /* Same as above without waiting for the hardware to latch */
  172. static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
  173. {
  174. pcie_do_write_cmd(ctrl, cmd, mask, false);
  175. }
  176. bool pciehp_check_link_active(struct controller *ctrl)
  177. {
  178. struct pci_dev *pdev = ctrl_dev(ctrl);
  179. u16 lnk_status;
  180. bool ret;
  181. pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
  182. ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
  183. if (ret)
  184. ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
  185. return ret;
  186. }
  187. static void pcie_wait_link_active(struct controller *ctrl)
  188. {
  189. struct pci_dev *pdev = ctrl_dev(ctrl);
  190. pcie_wait_for_link(pdev, true);
  191. }
  192. static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
  193. {
  194. u32 l;
  195. int count = 0;
  196. int delay = 1000, step = 20;
  197. bool found = false;
  198. do {
  199. found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
  200. count++;
  201. if (found)
  202. break;
  203. msleep(step);
  204. delay -= step;
  205. } while (delay > 0);
  206. if (count > 1 && pciehp_debug)
  207. printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
  208. pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
  209. PCI_FUNC(devfn), count, step, l);
  210. return found;
  211. }
  212. int pciehp_check_link_status(struct controller *ctrl)
  213. {
  214. struct pci_dev *pdev = ctrl_dev(ctrl);
  215. bool found;
  216. u16 lnk_status;
  217. /*
  218. * Data Link Layer Link Active Reporting must be capable for
  219. * hot-plug capable downstream port. But old controller might
  220. * not implement it. In this case, we wait for 1000 ms.
  221. */
  222. if (ctrl->link_active_reporting)
  223. pcie_wait_link_active(ctrl);
  224. else
  225. msleep(1000);
  226. /* wait 100ms before read pci conf, and try in 1s */
  227. msleep(100);
  228. found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
  229. PCI_DEVFN(0, 0));
  230. /* ignore link or presence changes up to this point */
  231. if (found)
  232. atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
  233. &ctrl->pending_events);
  234. pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
  235. ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
  236. if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
  237. !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
  238. ctrl_err(ctrl, "link training error: status %#06x\n",
  239. lnk_status);
  240. return -1;
  241. }
  242. pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
  243. if (!found)
  244. return -1;
  245. return 0;
  246. }
  247. static int __pciehp_link_set(struct controller *ctrl, bool enable)
  248. {
  249. struct pci_dev *pdev = ctrl_dev(ctrl);
  250. u16 lnk_ctrl;
  251. pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
  252. if (enable)
  253. lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
  254. else
  255. lnk_ctrl |= PCI_EXP_LNKCTL_LD;
  256. pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
  257. ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
  258. return 0;
  259. }
  260. static int pciehp_link_enable(struct controller *ctrl)
  261. {
  262. return __pciehp_link_set(ctrl, true);
  263. }
  264. int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
  265. u8 *status)
  266. {
  267. struct slot *slot = hotplug_slot->private;
  268. struct pci_dev *pdev = ctrl_dev(slot->ctrl);
  269. u16 slot_ctrl;
  270. pci_config_pm_runtime_get(pdev);
  271. pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
  272. pci_config_pm_runtime_put(pdev);
  273. *status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
  274. return 0;
  275. }
  276. void pciehp_get_attention_status(struct slot *slot, u8 *status)
  277. {
  278. struct controller *ctrl = slot->ctrl;
  279. struct pci_dev *pdev = ctrl_dev(ctrl);
  280. u16 slot_ctrl;
  281. pci_config_pm_runtime_get(pdev);
  282. pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
  283. pci_config_pm_runtime_put(pdev);
  284. ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
  285. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
  286. switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
  287. case PCI_EXP_SLTCTL_ATTN_IND_ON:
  288. *status = 1; /* On */
  289. break;
  290. case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
  291. *status = 2; /* Blink */
  292. break;
  293. case PCI_EXP_SLTCTL_ATTN_IND_OFF:
  294. *status = 0; /* Off */
  295. break;
  296. default:
  297. *status = 0xFF;
  298. break;
  299. }
  300. }
  301. void pciehp_get_power_status(struct slot *slot, u8 *status)
  302. {
  303. struct controller *ctrl = slot->ctrl;
  304. struct pci_dev *pdev = ctrl_dev(ctrl);
  305. u16 slot_ctrl;
  306. pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
  307. ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
  308. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
  309. switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
  310. case PCI_EXP_SLTCTL_PWR_ON:
  311. *status = 1; /* On */
  312. break;
  313. case PCI_EXP_SLTCTL_PWR_OFF:
  314. *status = 0; /* Off */
  315. break;
  316. default:
  317. *status = 0xFF;
  318. break;
  319. }
  320. }
  321. void pciehp_get_latch_status(struct slot *slot, u8 *status)
  322. {
  323. struct pci_dev *pdev = ctrl_dev(slot->ctrl);
  324. u16 slot_status;
  325. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  326. *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
  327. }
  328. void pciehp_get_adapter_status(struct slot *slot, u8 *status)
  329. {
  330. struct pci_dev *pdev = ctrl_dev(slot->ctrl);
  331. u16 slot_status;
  332. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  333. *status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
  334. }
  335. int pciehp_query_power_fault(struct slot *slot)
  336. {
  337. struct pci_dev *pdev = ctrl_dev(slot->ctrl);
  338. u16 slot_status;
  339. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  340. return !!(slot_status & PCI_EXP_SLTSTA_PFD);
  341. }
  342. int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
  343. u8 status)
  344. {
  345. struct slot *slot = hotplug_slot->private;
  346. struct controller *ctrl = slot->ctrl;
  347. struct pci_dev *pdev = ctrl_dev(ctrl);
  348. pci_config_pm_runtime_get(pdev);
  349. pcie_write_cmd_nowait(ctrl, status << 6,
  350. PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
  351. pci_config_pm_runtime_put(pdev);
  352. return 0;
  353. }
  354. void pciehp_set_attention_status(struct slot *slot, u8 value)
  355. {
  356. struct controller *ctrl = slot->ctrl;
  357. u16 slot_cmd;
  358. if (!ATTN_LED(ctrl))
  359. return;
  360. switch (value) {
  361. case 0: /* turn off */
  362. slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF;
  363. break;
  364. case 1: /* turn on */
  365. slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON;
  366. break;
  367. case 2: /* turn blink */
  368. slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK;
  369. break;
  370. default:
  371. return;
  372. }
  373. pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
  374. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  375. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
  376. }
  377. void pciehp_green_led_on(struct slot *slot)
  378. {
  379. struct controller *ctrl = slot->ctrl;
  380. if (!PWR_LED(ctrl))
  381. return;
  382. pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON,
  383. PCI_EXP_SLTCTL_PIC);
  384. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  385. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
  386. PCI_EXP_SLTCTL_PWR_IND_ON);
  387. }
  388. void pciehp_green_led_off(struct slot *slot)
  389. {
  390. struct controller *ctrl = slot->ctrl;
  391. if (!PWR_LED(ctrl))
  392. return;
  393. pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
  394. PCI_EXP_SLTCTL_PIC);
  395. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  396. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
  397. PCI_EXP_SLTCTL_PWR_IND_OFF);
  398. }
  399. void pciehp_green_led_blink(struct slot *slot)
  400. {
  401. struct controller *ctrl = slot->ctrl;
  402. if (!PWR_LED(ctrl))
  403. return;
  404. pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK,
  405. PCI_EXP_SLTCTL_PIC);
  406. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  407. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
  408. PCI_EXP_SLTCTL_PWR_IND_BLINK);
  409. }
  410. int pciehp_power_on_slot(struct slot *slot)
  411. {
  412. struct controller *ctrl = slot->ctrl;
  413. struct pci_dev *pdev = ctrl_dev(ctrl);
  414. u16 slot_status;
  415. int retval;
  416. /* Clear power-fault bit from previous power failures */
  417. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  418. if (slot_status & PCI_EXP_SLTSTA_PFD)
  419. pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
  420. PCI_EXP_SLTSTA_PFD);
  421. ctrl->power_fault_detected = 0;
  422. pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
  423. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  424. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
  425. PCI_EXP_SLTCTL_PWR_ON);
  426. retval = pciehp_link_enable(ctrl);
  427. if (retval)
  428. ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
  429. return retval;
  430. }
  431. void pciehp_power_off_slot(struct slot *slot)
  432. {
  433. struct controller *ctrl = slot->ctrl;
  434. pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
  435. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  436. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
  437. PCI_EXP_SLTCTL_PWR_OFF);
  438. }
  439. static irqreturn_t pciehp_isr(int irq, void *dev_id)
  440. {
  441. struct controller *ctrl = (struct controller *)dev_id;
  442. struct pci_dev *pdev = ctrl_dev(ctrl);
  443. struct device *parent = pdev->dev.parent;
  444. u16 status, events;
  445. /*
  446. * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
  447. */
  448. if (pdev->current_state == PCI_D3cold)
  449. return IRQ_NONE;
  450. /*
  451. * Keep the port accessible by holding a runtime PM ref on its parent.
  452. * Defer resume of the parent to the IRQ thread if it's suspended.
  453. * Mask the interrupt until then.
  454. */
  455. if (parent) {
  456. pm_runtime_get_noresume(parent);
  457. if (!pm_runtime_active(parent)) {
  458. pm_runtime_put(parent);
  459. disable_irq_nosync(irq);
  460. atomic_or(RERUN_ISR, &ctrl->pending_events);
  461. return IRQ_WAKE_THREAD;
  462. }
  463. }
  464. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
  465. if (status == (u16) ~0) {
  466. ctrl_info(ctrl, "%s: no response from device\n", __func__);
  467. if (parent)
  468. pm_runtime_put(parent);
  469. return IRQ_NONE;
  470. }
  471. /*
  472. * Slot Status contains plain status bits as well as event
  473. * notification bits; right now we only want the event bits.
  474. */
  475. events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
  476. PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
  477. PCI_EXP_SLTSTA_DLLSC);
  478. /*
  479. * If we've already reported a power fault, don't report it again
  480. * until we've done something to handle it.
  481. */
  482. if (ctrl->power_fault_detected)
  483. events &= ~PCI_EXP_SLTSTA_PFD;
  484. if (!events) {
  485. if (parent)
  486. pm_runtime_put(parent);
  487. return IRQ_NONE;
  488. }
  489. pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
  490. ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
  491. if (parent)
  492. pm_runtime_put(parent);
  493. /*
  494. * Command Completed notifications are not deferred to the
  495. * IRQ thread because it may be waiting for their arrival.
  496. */
  497. if (events & PCI_EXP_SLTSTA_CC) {
  498. ctrl->cmd_busy = 0;
  499. smp_mb();
  500. wake_up(&ctrl->queue);
  501. if (events == PCI_EXP_SLTSTA_CC)
  502. return IRQ_HANDLED;
  503. events &= ~PCI_EXP_SLTSTA_CC;
  504. }
  505. if (pdev->ignore_hotplug) {
  506. ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
  507. return IRQ_HANDLED;
  508. }
  509. /* Save pending events for consumption by IRQ thread. */
  510. atomic_or(events, &ctrl->pending_events);
  511. return IRQ_WAKE_THREAD;
  512. }
  513. static irqreturn_t pciehp_ist(int irq, void *dev_id)
  514. {
  515. struct controller *ctrl = (struct controller *)dev_id;
  516. struct pci_dev *pdev = ctrl_dev(ctrl);
  517. struct slot *slot = ctrl->slot;
  518. irqreturn_t ret;
  519. u32 events;
  520. pci_config_pm_runtime_get(pdev);
  521. /* rerun pciehp_isr() if the port was inaccessible on interrupt */
  522. if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
  523. ret = pciehp_isr(irq, dev_id);
  524. enable_irq(irq);
  525. if (ret != IRQ_WAKE_THREAD) {
  526. pci_config_pm_runtime_put(pdev);
  527. return ret;
  528. }
  529. }
  530. synchronize_hardirq(irq);
  531. events = atomic_xchg(&ctrl->pending_events, 0);
  532. if (!events) {
  533. pci_config_pm_runtime_put(pdev);
  534. return IRQ_NONE;
  535. }
  536. /* Check Attention Button Pressed */
  537. if (events & PCI_EXP_SLTSTA_ABP) {
  538. ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
  539. slot_name(slot));
  540. pciehp_handle_button_press(slot);
  541. }
  542. /* Check Power Fault Detected */
  543. if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
  544. ctrl->power_fault_detected = 1;
  545. ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
  546. pciehp_set_attention_status(slot, 1);
  547. pciehp_green_led_off(slot);
  548. }
  549. /*
  550. * Disable requests have higher priority than Presence Detect Changed
  551. * or Data Link Layer State Changed events.
  552. */
  553. down_read(&ctrl->reset_lock);
  554. if (events & DISABLE_SLOT)
  555. pciehp_handle_disable_request(slot);
  556. else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
  557. pciehp_handle_presence_or_link_change(slot, events);
  558. up_read(&ctrl->reset_lock);
  559. pci_config_pm_runtime_put(pdev);
  560. wake_up(&ctrl->requester);
  561. return IRQ_HANDLED;
  562. }
  563. static int pciehp_poll(void *data)
  564. {
  565. struct controller *ctrl = data;
  566. schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
  567. while (!kthread_should_stop()) {
  568. /* poll for interrupt events or user requests */
  569. while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
  570. atomic_read(&ctrl->pending_events))
  571. pciehp_ist(IRQ_NOTCONNECTED, ctrl);
  572. if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
  573. pciehp_poll_time = 2; /* clamp to sane value */
  574. schedule_timeout_idle(pciehp_poll_time * HZ);
  575. }
  576. return 0;
  577. }
  578. static void pcie_enable_notification(struct controller *ctrl)
  579. {
  580. u16 cmd, mask;
  581. /*
  582. * TBD: Power fault detected software notification support.
  583. *
  584. * Power fault detected software notification is not enabled
  585. * now, because it caused power fault detected interrupt storm
  586. * on some machines. On those machines, power fault detected
  587. * bit in the slot status register was set again immediately
  588. * when it is cleared in the interrupt service routine, and
  589. * next power fault detected interrupt was notified again.
  590. */
  591. /*
  592. * Always enable link events: thus link-up and link-down shall
  593. * always be treated as hotplug and unplug respectively. Enable
  594. * presence detect only if Attention Button is not present.
  595. */
  596. cmd = PCI_EXP_SLTCTL_DLLSCE;
  597. if (ATTN_BUTTN(ctrl))
  598. cmd |= PCI_EXP_SLTCTL_ABPE;
  599. else
  600. cmd |= PCI_EXP_SLTCTL_PDCE;
  601. if (!pciehp_poll_mode)
  602. cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
  603. mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
  604. PCI_EXP_SLTCTL_PFDE |
  605. PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
  606. PCI_EXP_SLTCTL_DLLSCE);
  607. pcie_write_cmd_nowait(ctrl, cmd, mask);
  608. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  609. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
  610. }
  611. static void pcie_disable_notification(struct controller *ctrl)
  612. {
  613. u16 mask;
  614. mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
  615. PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
  616. PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
  617. PCI_EXP_SLTCTL_DLLSCE);
  618. pcie_write_cmd(ctrl, 0, mask);
  619. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  620. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
  621. }
  622. void pcie_clear_hotplug_events(struct controller *ctrl)
  623. {
  624. pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
  625. PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
  626. }
  627. /*
  628. * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
  629. * bus reset of the bridge, but at the same time we want to ensure that it is
  630. * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
  631. * disable link state notification and presence detection change notification
  632. * momentarily, if we see that they could interfere. Also, clear any spurious
  633. * events after.
  634. */
  635. int pciehp_reset_slot(struct slot *slot, int probe)
  636. {
  637. struct controller *ctrl = slot->ctrl;
  638. struct pci_dev *pdev = ctrl_dev(ctrl);
  639. u16 stat_mask = 0, ctrl_mask = 0;
  640. int rc;
  641. if (probe)
  642. return 0;
  643. down_write(&ctrl->reset_lock);
  644. if (!ATTN_BUTTN(ctrl)) {
  645. ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
  646. stat_mask |= PCI_EXP_SLTSTA_PDC;
  647. }
  648. ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
  649. stat_mask |= PCI_EXP_SLTSTA_DLLSC;
  650. pcie_write_cmd(ctrl, 0, ctrl_mask);
  651. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  652. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
  653. rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
  654. pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
  655. pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
  656. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  657. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
  658. up_write(&ctrl->reset_lock);
  659. return rc;
  660. }
  661. int pcie_init_notification(struct controller *ctrl)
  662. {
  663. if (pciehp_request_irq(ctrl))
  664. return -1;
  665. pcie_enable_notification(ctrl);
  666. ctrl->notification_enabled = 1;
  667. return 0;
  668. }
  669. void pcie_shutdown_notification(struct controller *ctrl)
  670. {
  671. if (ctrl->notification_enabled) {
  672. pcie_disable_notification(ctrl);
  673. pciehp_free_irq(ctrl);
  674. ctrl->notification_enabled = 0;
  675. }
  676. }
  677. static int pcie_init_slot(struct controller *ctrl)
  678. {
  679. struct pci_bus *subordinate = ctrl_dev(ctrl)->subordinate;
  680. struct slot *slot;
  681. slot = kzalloc(sizeof(*slot), GFP_KERNEL);
  682. if (!slot)
  683. return -ENOMEM;
  684. down_read(&pci_bus_sem);
  685. slot->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
  686. up_read(&pci_bus_sem);
  687. slot->ctrl = ctrl;
  688. mutex_init(&slot->lock);
  689. INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
  690. ctrl->slot = slot;
  691. return 0;
  692. }
  693. static void pcie_cleanup_slot(struct controller *ctrl)
  694. {
  695. struct slot *slot = ctrl->slot;
  696. cancel_delayed_work_sync(&slot->work);
  697. kfree(slot);
  698. }
  699. static inline void dbg_ctrl(struct controller *ctrl)
  700. {
  701. struct pci_dev *pdev = ctrl->pcie->port;
  702. u16 reg16;
  703. if (!pciehp_debug)
  704. return;
  705. ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
  706. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
  707. ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
  708. pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
  709. ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
  710. }
  711. #define FLAG(x, y) (((x) & (y)) ? '+' : '-')
  712. struct controller *pcie_init(struct pcie_device *dev)
  713. {
  714. struct controller *ctrl;
  715. u32 slot_cap, link_cap;
  716. u8 occupied, poweron;
  717. struct pci_dev *pdev = dev->port;
  718. ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
  719. if (!ctrl)
  720. goto abort;
  721. ctrl->pcie = dev;
  722. pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
  723. if (pdev->hotplug_user_indicators)
  724. slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
  725. /*
  726. * We assume no Thunderbolt controllers support Command Complete events,
  727. * but some controllers falsely claim they do.
  728. */
  729. if (pdev->is_thunderbolt)
  730. slot_cap |= PCI_EXP_SLTCAP_NCCS;
  731. ctrl->slot_cap = slot_cap;
  732. mutex_init(&ctrl->ctrl_lock);
  733. init_rwsem(&ctrl->reset_lock);
  734. init_waitqueue_head(&ctrl->requester);
  735. init_waitqueue_head(&ctrl->queue);
  736. dbg_ctrl(ctrl);
  737. /* Check if Data Link Layer Link Active Reporting is implemented */
  738. pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
  739. if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
  740. ctrl->link_active_reporting = 1;
  741. /* Clear all remaining event bits in Slot Status register. */
  742. pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
  743. PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
  744. PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
  745. PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
  746. ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
  747. (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
  748. FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
  749. FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
  750. FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
  751. FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
  752. FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
  753. FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
  754. FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
  755. FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
  756. FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
  757. FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
  758. pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
  759. if (pcie_init_slot(ctrl))
  760. goto abort_ctrl;
  761. /*
  762. * If empty slot's power status is on, turn power off. The IRQ isn't
  763. * requested yet, so avoid triggering a notification with this command.
  764. */
  765. if (POWER_CTRL(ctrl)) {
  766. pciehp_get_adapter_status(ctrl->slot, &occupied);
  767. pciehp_get_power_status(ctrl->slot, &poweron);
  768. if (!occupied && poweron) {
  769. pcie_disable_notification(ctrl);
  770. pciehp_power_off_slot(ctrl->slot);
  771. }
  772. }
  773. return ctrl;
  774. abort_ctrl:
  775. kfree(ctrl);
  776. abort:
  777. return NULL;
  778. }
  779. void pciehp_release_ctrl(struct controller *ctrl)
  780. {
  781. pcie_cleanup_slot(ctrl);
  782. kfree(ctrl);
  783. }
  784. static void quirk_cmd_compl(struct pci_dev *pdev)
  785. {
  786. u32 slot_cap;
  787. if (pci_is_pcie(pdev)) {
  788. pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
  789. if (slot_cap & PCI_EXP_SLTCAP_HPC &&
  790. !(slot_cap & PCI_EXP_SLTCAP_NCCS))
  791. pdev->broken_cmd_compl = 1;
  792. }
  793. }
  794. DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
  795. PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
  796. DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
  797. PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
  798. DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
  799. PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);