pci-driver.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
  4. * (C) Copyright 2007 Novell Inc.
  5. */
  6. #include <linux/pci.h>
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/device.h>
  10. #include <linux/mempolicy.h>
  11. #include <linux/string.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/cpu.h>
  15. #include <linux/pm_runtime.h>
  16. #include <linux/suspend.h>
  17. #include <linux/kexec.h>
  18. #include <linux/of_device.h>
  19. #include <linux/acpi.h>
  20. #include "pci.h"
  21. #include "pcie/portdrv.h"
  22. struct pci_dynid {
  23. struct list_head node;
  24. struct pci_device_id id;
  25. };
  26. /**
  27. * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
  28. * @drv: target pci driver
  29. * @vendor: PCI vendor ID
  30. * @device: PCI device ID
  31. * @subvendor: PCI subvendor ID
  32. * @subdevice: PCI subdevice ID
  33. * @class: PCI class
  34. * @class_mask: PCI class mask
  35. * @driver_data: private driver data
  36. *
  37. * Adds a new dynamic pci device ID to this driver and causes the
  38. * driver to probe for all devices again. @drv must have been
  39. * registered prior to calling this function.
  40. *
  41. * CONTEXT:
  42. * Does GFP_KERNEL allocation.
  43. *
  44. * RETURNS:
  45. * 0 on success, -errno on failure.
  46. */
  47. int pci_add_dynid(struct pci_driver *drv,
  48. unsigned int vendor, unsigned int device,
  49. unsigned int subvendor, unsigned int subdevice,
  50. unsigned int class, unsigned int class_mask,
  51. unsigned long driver_data)
  52. {
  53. struct pci_dynid *dynid;
  54. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  55. if (!dynid)
  56. return -ENOMEM;
  57. dynid->id.vendor = vendor;
  58. dynid->id.device = device;
  59. dynid->id.subvendor = subvendor;
  60. dynid->id.subdevice = subdevice;
  61. dynid->id.class = class;
  62. dynid->id.class_mask = class_mask;
  63. dynid->id.driver_data = driver_data;
  64. spin_lock(&drv->dynids.lock);
  65. list_add_tail(&dynid->node, &drv->dynids.list);
  66. spin_unlock(&drv->dynids.lock);
  67. return driver_attach(&drv->driver);
  68. }
  69. EXPORT_SYMBOL_GPL(pci_add_dynid);
  70. static void pci_free_dynids(struct pci_driver *drv)
  71. {
  72. struct pci_dynid *dynid, *n;
  73. spin_lock(&drv->dynids.lock);
  74. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  75. list_del(&dynid->node);
  76. kfree(dynid);
  77. }
  78. spin_unlock(&drv->dynids.lock);
  79. }
  80. /**
  81. * store_new_id - sysfs frontend to pci_add_dynid()
  82. * @driver: target device driver
  83. * @buf: buffer for scanning device ID data
  84. * @count: input size
  85. *
  86. * Allow PCI IDs to be added to an existing driver via sysfs.
  87. */
  88. static ssize_t new_id_store(struct device_driver *driver, const char *buf,
  89. size_t count)
  90. {
  91. struct pci_driver *pdrv = to_pci_driver(driver);
  92. const struct pci_device_id *ids = pdrv->id_table;
  93. __u32 vendor, device, subvendor = PCI_ANY_ID,
  94. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  95. unsigned long driver_data = 0;
  96. int fields = 0;
  97. int retval = 0;
  98. fields = sscanf(buf, "%x %x %x %x %x %x %lx",
  99. &vendor, &device, &subvendor, &subdevice,
  100. &class, &class_mask, &driver_data);
  101. if (fields < 2)
  102. return -EINVAL;
  103. if (fields != 7) {
  104. struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
  105. if (!pdev)
  106. return -ENOMEM;
  107. pdev->vendor = vendor;
  108. pdev->device = device;
  109. pdev->subsystem_vendor = subvendor;
  110. pdev->subsystem_device = subdevice;
  111. pdev->class = class;
  112. if (pci_match_id(pdrv->id_table, pdev))
  113. retval = -EEXIST;
  114. kfree(pdev);
  115. if (retval)
  116. return retval;
  117. }
  118. /* Only accept driver_data values that match an existing id_table
  119. entry */
  120. if (ids) {
  121. retval = -EINVAL;
  122. while (ids->vendor || ids->subvendor || ids->class_mask) {
  123. if (driver_data == ids->driver_data) {
  124. retval = 0;
  125. break;
  126. }
  127. ids++;
  128. }
  129. if (retval) /* No match */
  130. return retval;
  131. }
  132. retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
  133. class, class_mask, driver_data);
  134. if (retval)
  135. return retval;
  136. return count;
  137. }
  138. static DRIVER_ATTR_WO(new_id);
  139. /**
  140. * store_remove_id - remove a PCI device ID from this driver
  141. * @driver: target device driver
  142. * @buf: buffer for scanning device ID data
  143. * @count: input size
  144. *
  145. * Removes a dynamic pci device ID to this driver.
  146. */
  147. static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
  148. size_t count)
  149. {
  150. struct pci_dynid *dynid, *n;
  151. struct pci_driver *pdrv = to_pci_driver(driver);
  152. __u32 vendor, device, subvendor = PCI_ANY_ID,
  153. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  154. int fields = 0;
  155. size_t retval = -ENODEV;
  156. fields = sscanf(buf, "%x %x %x %x %x %x",
  157. &vendor, &device, &subvendor, &subdevice,
  158. &class, &class_mask);
  159. if (fields < 2)
  160. return -EINVAL;
  161. spin_lock(&pdrv->dynids.lock);
  162. list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
  163. struct pci_device_id *id = &dynid->id;
  164. if ((id->vendor == vendor) &&
  165. (id->device == device) &&
  166. (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
  167. (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
  168. !((id->class ^ class) & class_mask)) {
  169. list_del(&dynid->node);
  170. kfree(dynid);
  171. retval = count;
  172. break;
  173. }
  174. }
  175. spin_unlock(&pdrv->dynids.lock);
  176. return retval;
  177. }
  178. static DRIVER_ATTR_WO(remove_id);
  179. static struct attribute *pci_drv_attrs[] = {
  180. &driver_attr_new_id.attr,
  181. &driver_attr_remove_id.attr,
  182. NULL,
  183. };
  184. ATTRIBUTE_GROUPS(pci_drv);
  185. /**
  186. * pci_match_id - See if a pci device matches a given pci_id table
  187. * @ids: array of PCI device id structures to search in
  188. * @dev: the PCI device structure to match against.
  189. *
  190. * Used by a driver to check whether a PCI device present in the
  191. * system is in its list of supported devices. Returns the matching
  192. * pci_device_id structure or %NULL if there is no match.
  193. *
  194. * Deprecated, don't use this as it will not catch any dynamic ids
  195. * that a driver might want to check for.
  196. */
  197. const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
  198. struct pci_dev *dev)
  199. {
  200. if (ids) {
  201. while (ids->vendor || ids->subvendor || ids->class_mask) {
  202. if (pci_match_one_device(ids, dev))
  203. return ids;
  204. ids++;
  205. }
  206. }
  207. return NULL;
  208. }
  209. EXPORT_SYMBOL(pci_match_id);
  210. static const struct pci_device_id pci_device_id_any = {
  211. .vendor = PCI_ANY_ID,
  212. .device = PCI_ANY_ID,
  213. .subvendor = PCI_ANY_ID,
  214. .subdevice = PCI_ANY_ID,
  215. };
  216. /**
  217. * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
  218. * @drv: the PCI driver to match against
  219. * @dev: the PCI device structure to match against
  220. *
  221. * Used by a driver to check whether a PCI device present in the
  222. * system is in its list of supported devices. Returns the matching
  223. * pci_device_id structure or %NULL if there is no match.
  224. */
  225. static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
  226. struct pci_dev *dev)
  227. {
  228. struct pci_dynid *dynid;
  229. const struct pci_device_id *found_id = NULL;
  230. /* When driver_override is set, only bind to the matching driver */
  231. if (dev->driver_override && strcmp(dev->driver_override, drv->name))
  232. return NULL;
  233. /* Look at the dynamic ids first, before the static ones */
  234. spin_lock(&drv->dynids.lock);
  235. list_for_each_entry(dynid, &drv->dynids.list, node) {
  236. if (pci_match_one_device(&dynid->id, dev)) {
  237. found_id = &dynid->id;
  238. break;
  239. }
  240. }
  241. spin_unlock(&drv->dynids.lock);
  242. if (!found_id)
  243. found_id = pci_match_id(drv->id_table, dev);
  244. /* driver_override will always match, send a dummy id */
  245. if (!found_id && dev->driver_override)
  246. found_id = &pci_device_id_any;
  247. return found_id;
  248. }
  249. struct drv_dev_and_id {
  250. struct pci_driver *drv;
  251. struct pci_dev *dev;
  252. const struct pci_device_id *id;
  253. };
  254. static long local_pci_probe(void *_ddi)
  255. {
  256. struct drv_dev_and_id *ddi = _ddi;
  257. struct pci_dev *pci_dev = ddi->dev;
  258. struct pci_driver *pci_drv = ddi->drv;
  259. struct device *dev = &pci_dev->dev;
  260. int rc;
  261. /*
  262. * Unbound PCI devices are always put in D0, regardless of
  263. * runtime PM status. During probe, the device is set to
  264. * active and the usage count is incremented. If the driver
  265. * supports runtime PM, it should call pm_runtime_put_noidle(),
  266. * or any other runtime PM helper function decrementing the usage
  267. * count, in its probe routine and pm_runtime_get_noresume() in
  268. * its remove routine.
  269. */
  270. pm_runtime_get_sync(dev);
  271. pci_dev->driver = pci_drv;
  272. rc = pci_drv->probe(pci_dev, ddi->id);
  273. if (!rc)
  274. return rc;
  275. if (rc < 0) {
  276. pci_dev->driver = NULL;
  277. pm_runtime_put_sync(dev);
  278. return rc;
  279. }
  280. /*
  281. * Probe function should return < 0 for failure, 0 for success
  282. * Treat values > 0 as success, but warn.
  283. */
  284. dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
  285. return 0;
  286. }
  287. static bool pci_physfn_is_probed(struct pci_dev *dev)
  288. {
  289. #ifdef CONFIG_PCI_IOV
  290. return dev->is_virtfn && dev->physfn->is_probed;
  291. #else
  292. return false;
  293. #endif
  294. }
  295. static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
  296. const struct pci_device_id *id)
  297. {
  298. int error, node, cpu;
  299. struct drv_dev_and_id ddi = { drv, dev, id };
  300. /*
  301. * Execute driver initialization on node where the device is
  302. * attached. This way the driver likely allocates its local memory
  303. * on the right node.
  304. */
  305. node = dev_to_node(&dev->dev);
  306. dev->is_probed = 1;
  307. cpu_hotplug_disable();
  308. /*
  309. * Prevent nesting work_on_cpu() for the case where a Virtual Function
  310. * device is probed from work_on_cpu() of the Physical device.
  311. */
  312. if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
  313. pci_physfn_is_probed(dev))
  314. cpu = nr_cpu_ids;
  315. else
  316. cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
  317. if (cpu < nr_cpu_ids)
  318. error = work_on_cpu(cpu, local_pci_probe, &ddi);
  319. else
  320. error = local_pci_probe(&ddi);
  321. dev->is_probed = 0;
  322. cpu_hotplug_enable();
  323. return error;
  324. }
  325. /**
  326. * __pci_device_probe - check if a driver wants to claim a specific PCI device
  327. * @drv: driver to call to check if it wants the PCI device
  328. * @pci_dev: PCI device being probed
  329. *
  330. * returns 0 on success, else error.
  331. * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
  332. */
  333. static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
  334. {
  335. const struct pci_device_id *id;
  336. int error = 0;
  337. if (!pci_dev->driver && drv->probe) {
  338. error = -ENODEV;
  339. id = pci_match_device(drv, pci_dev);
  340. if (id)
  341. error = pci_call_probe(drv, pci_dev, id);
  342. }
  343. return error;
  344. }
  345. int __weak pcibios_alloc_irq(struct pci_dev *dev)
  346. {
  347. return 0;
  348. }
  349. void __weak pcibios_free_irq(struct pci_dev *dev)
  350. {
  351. }
  352. #ifdef CONFIG_PCI_IOV
  353. static inline bool pci_device_can_probe(struct pci_dev *pdev)
  354. {
  355. return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
  356. }
  357. #else
  358. static inline bool pci_device_can_probe(struct pci_dev *pdev)
  359. {
  360. return true;
  361. }
  362. #endif
  363. static int pci_device_probe(struct device *dev)
  364. {
  365. int error;
  366. struct pci_dev *pci_dev = to_pci_dev(dev);
  367. struct pci_driver *drv = to_pci_driver(dev->driver);
  368. pci_assign_irq(pci_dev);
  369. error = pcibios_alloc_irq(pci_dev);
  370. if (error < 0)
  371. return error;
  372. pci_dev_get(pci_dev);
  373. if (pci_device_can_probe(pci_dev)) {
  374. error = __pci_device_probe(drv, pci_dev);
  375. if (error) {
  376. pcibios_free_irq(pci_dev);
  377. pci_dev_put(pci_dev);
  378. }
  379. }
  380. return error;
  381. }
  382. static int pci_device_remove(struct device *dev)
  383. {
  384. struct pci_dev *pci_dev = to_pci_dev(dev);
  385. struct pci_driver *drv = pci_dev->driver;
  386. if (drv) {
  387. if (drv->remove) {
  388. pm_runtime_get_sync(dev);
  389. drv->remove(pci_dev);
  390. pm_runtime_put_noidle(dev);
  391. }
  392. pcibios_free_irq(pci_dev);
  393. pci_dev->driver = NULL;
  394. }
  395. /* Undo the runtime PM settings in local_pci_probe() */
  396. pm_runtime_put_sync(dev);
  397. /*
  398. * If the device is still on, set the power state as "unknown",
  399. * since it might change by the next time we load the driver.
  400. */
  401. if (pci_dev->current_state == PCI_D0)
  402. pci_dev->current_state = PCI_UNKNOWN;
  403. /*
  404. * We would love to complain here if pci_dev->is_enabled is set, that
  405. * the driver should have called pci_disable_device(), but the
  406. * unfortunate fact is there are too many odd BIOS and bridge setups
  407. * that don't like drivers doing that all of the time.
  408. * Oh well, we can dream of sane hardware when we sleep, no matter how
  409. * horrible the crap we have to deal with is when we are awake...
  410. */
  411. pci_dev_put(pci_dev);
  412. return 0;
  413. }
  414. static void pci_device_shutdown(struct device *dev)
  415. {
  416. struct pci_dev *pci_dev = to_pci_dev(dev);
  417. struct pci_driver *drv = pci_dev->driver;
  418. pm_runtime_resume(dev);
  419. if (drv && drv->shutdown)
  420. drv->shutdown(pci_dev);
  421. /*
  422. * If this is a kexec reboot, turn off Bus Master bit on the
  423. * device to tell it to not continue to do DMA. Don't touch
  424. * devices in D3cold or unknown states.
  425. * If it is not a kexec reboot, firmware will hit the PCI
  426. * devices with big hammer and stop their DMA any way.
  427. */
  428. if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
  429. pci_clear_master(pci_dev);
  430. }
  431. #ifdef CONFIG_PM
  432. /* Auxiliary functions used for system resume and run-time resume. */
  433. /**
  434. * pci_restore_standard_config - restore standard config registers of PCI device
  435. * @pci_dev: PCI device to handle
  436. */
  437. static int pci_restore_standard_config(struct pci_dev *pci_dev)
  438. {
  439. pci_update_current_state(pci_dev, PCI_UNKNOWN);
  440. if (pci_dev->current_state != PCI_D0) {
  441. int error = pci_set_power_state(pci_dev, PCI_D0);
  442. if (error)
  443. return error;
  444. }
  445. pci_restore_state(pci_dev);
  446. pci_pme_restore(pci_dev);
  447. return 0;
  448. }
  449. #endif
  450. #ifdef CONFIG_PM_SLEEP
  451. static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
  452. {
  453. pci_power_up(pci_dev);
  454. pci_restore_state(pci_dev);
  455. pci_pme_restore(pci_dev);
  456. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  457. }
  458. /*
  459. * Default "suspend" method for devices that have no driver provided suspend,
  460. * or not even a driver at all (second part).
  461. */
  462. static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
  463. {
  464. /*
  465. * mark its power state as "unknown", since we don't know if
  466. * e.g. the BIOS will change its device state when we suspend.
  467. */
  468. if (pci_dev->current_state == PCI_D0)
  469. pci_dev->current_state = PCI_UNKNOWN;
  470. }
  471. /*
  472. * Default "resume" method for devices that have no driver provided resume,
  473. * or not even a driver at all (second part).
  474. */
  475. static int pci_pm_reenable_device(struct pci_dev *pci_dev)
  476. {
  477. int retval;
  478. /* if the device was enabled before suspend, reenable */
  479. retval = pci_reenable_device(pci_dev);
  480. /*
  481. * if the device was busmaster before the suspend, make it busmaster
  482. * again
  483. */
  484. if (pci_dev->is_busmaster)
  485. pci_set_master(pci_dev);
  486. return retval;
  487. }
  488. static int pci_legacy_suspend(struct device *dev, pm_message_t state)
  489. {
  490. struct pci_dev *pci_dev = to_pci_dev(dev);
  491. struct pci_driver *drv = pci_dev->driver;
  492. if (drv && drv->suspend) {
  493. pci_power_t prev = pci_dev->current_state;
  494. int error;
  495. error = drv->suspend(pci_dev, state);
  496. suspend_report_result(drv->suspend, error);
  497. if (error)
  498. return error;
  499. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  500. && pci_dev->current_state != PCI_UNKNOWN) {
  501. WARN_ONCE(pci_dev->current_state != prev,
  502. "PCI PM: Device state not saved by %pF\n",
  503. drv->suspend);
  504. }
  505. }
  506. pci_fixup_device(pci_fixup_suspend, pci_dev);
  507. return 0;
  508. }
  509. static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
  510. {
  511. struct pci_dev *pci_dev = to_pci_dev(dev);
  512. struct pci_driver *drv = pci_dev->driver;
  513. if (drv && drv->suspend_late) {
  514. pci_power_t prev = pci_dev->current_state;
  515. int error;
  516. error = drv->suspend_late(pci_dev, state);
  517. suspend_report_result(drv->suspend_late, error);
  518. if (error)
  519. return error;
  520. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  521. && pci_dev->current_state != PCI_UNKNOWN) {
  522. WARN_ONCE(pci_dev->current_state != prev,
  523. "PCI PM: Device state not saved by %pF\n",
  524. drv->suspend_late);
  525. goto Fixup;
  526. }
  527. }
  528. if (!pci_dev->state_saved)
  529. pci_save_state(pci_dev);
  530. pci_pm_set_unknown_state(pci_dev);
  531. Fixup:
  532. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  533. return 0;
  534. }
  535. static int pci_legacy_resume_early(struct device *dev)
  536. {
  537. struct pci_dev *pci_dev = to_pci_dev(dev);
  538. struct pci_driver *drv = pci_dev->driver;
  539. return drv && drv->resume_early ?
  540. drv->resume_early(pci_dev) : 0;
  541. }
  542. static int pci_legacy_resume(struct device *dev)
  543. {
  544. struct pci_dev *pci_dev = to_pci_dev(dev);
  545. struct pci_driver *drv = pci_dev->driver;
  546. pci_fixup_device(pci_fixup_resume, pci_dev);
  547. return drv && drv->resume ?
  548. drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
  549. }
  550. /* Auxiliary functions used by the new power management framework */
  551. static void pci_pm_default_resume(struct pci_dev *pci_dev)
  552. {
  553. pci_fixup_device(pci_fixup_resume, pci_dev);
  554. pci_enable_wake(pci_dev, PCI_D0, false);
  555. }
  556. static void pci_pm_default_suspend(struct pci_dev *pci_dev)
  557. {
  558. /* Disable non-bridge devices without PM support */
  559. if (!pci_has_subordinate(pci_dev))
  560. pci_disable_enabled_device(pci_dev);
  561. }
  562. static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
  563. {
  564. struct pci_driver *drv = pci_dev->driver;
  565. bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
  566. || drv->resume_early);
  567. /*
  568. * Legacy PM support is used by default, so warn if the new framework is
  569. * supported as well. Drivers are supposed to support either the
  570. * former, or the latter, but not both at the same time.
  571. */
  572. WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
  573. drv->name, pci_dev->vendor, pci_dev->device);
  574. return ret;
  575. }
  576. /* New power management framework */
  577. static int pci_pm_prepare(struct device *dev)
  578. {
  579. struct device_driver *drv = dev->driver;
  580. if (drv && drv->pm && drv->pm->prepare) {
  581. int error = drv->pm->prepare(dev);
  582. if (error < 0)
  583. return error;
  584. if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
  585. return 0;
  586. }
  587. return pci_dev_keep_suspended(to_pci_dev(dev));
  588. }
  589. static void pci_pm_complete(struct device *dev)
  590. {
  591. struct pci_dev *pci_dev = to_pci_dev(dev);
  592. pci_dev_complete_resume(pci_dev);
  593. pm_generic_complete(dev);
  594. /* Resume device if platform firmware has put it in reset-power-on */
  595. if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
  596. pci_power_t pre_sleep_state = pci_dev->current_state;
  597. pci_update_current_state(pci_dev, pci_dev->current_state);
  598. if (pci_dev->current_state < pre_sleep_state)
  599. pm_request_resume(dev);
  600. }
  601. }
  602. #else /* !CONFIG_PM_SLEEP */
  603. #define pci_pm_prepare NULL
  604. #define pci_pm_complete NULL
  605. #endif /* !CONFIG_PM_SLEEP */
  606. #ifdef CONFIG_SUSPEND
  607. static void pcie_pme_root_status_cleanup(struct pci_dev *pci_dev)
  608. {
  609. /*
  610. * Some BIOSes forget to clear Root PME Status bits after system
  611. * wakeup, which breaks ACPI-based runtime wakeup on PCI Express.
  612. * Clear those bits now just in case (shouldn't hurt).
  613. */
  614. if (pci_is_pcie(pci_dev) &&
  615. (pci_pcie_type(pci_dev) == PCI_EXP_TYPE_ROOT_PORT ||
  616. pci_pcie_type(pci_dev) == PCI_EXP_TYPE_RC_EC))
  617. pcie_clear_root_pme_status(pci_dev);
  618. }
  619. static int pci_pm_suspend(struct device *dev)
  620. {
  621. struct pci_dev *pci_dev = to_pci_dev(dev);
  622. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  623. if (pci_has_legacy_pm_support(pci_dev))
  624. return pci_legacy_suspend(dev, PMSG_SUSPEND);
  625. if (!pm) {
  626. pci_pm_default_suspend(pci_dev);
  627. return 0;
  628. }
  629. /*
  630. * PCI devices suspended at run time may need to be resumed at this
  631. * point, because in general it may be necessary to reconfigure them for
  632. * system suspend. Namely, if the device is expected to wake up the
  633. * system from the sleep state, it may have to be reconfigured for this
  634. * purpose, or if the device is not expected to wake up the system from
  635. * the sleep state, it should be prevented from signaling wakeup events
  636. * going forward.
  637. *
  638. * Also if the driver of the device does not indicate that its system
  639. * suspend callbacks can cope with runtime-suspended devices, it is
  640. * better to resume the device from runtime suspend here.
  641. */
  642. if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
  643. !pci_dev_keep_suspended(pci_dev))
  644. pm_runtime_resume(dev);
  645. pci_dev->state_saved = false;
  646. if (pm->suspend) {
  647. pci_power_t prev = pci_dev->current_state;
  648. int error;
  649. error = pm->suspend(dev);
  650. suspend_report_result(pm->suspend, error);
  651. if (error)
  652. return error;
  653. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  654. && pci_dev->current_state != PCI_UNKNOWN) {
  655. WARN_ONCE(pci_dev->current_state != prev,
  656. "PCI PM: State of device not saved by %pF\n",
  657. pm->suspend);
  658. }
  659. }
  660. return 0;
  661. }
  662. static int pci_pm_suspend_late(struct device *dev)
  663. {
  664. if (dev_pm_smart_suspend_and_suspended(dev))
  665. return 0;
  666. pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
  667. return pm_generic_suspend_late(dev);
  668. }
  669. static int pci_pm_suspend_noirq(struct device *dev)
  670. {
  671. struct pci_dev *pci_dev = to_pci_dev(dev);
  672. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  673. if (dev_pm_smart_suspend_and_suspended(dev)) {
  674. dev->power.may_skip_resume = true;
  675. return 0;
  676. }
  677. if (pci_has_legacy_pm_support(pci_dev))
  678. return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
  679. if (!pm) {
  680. pci_save_state(pci_dev);
  681. goto Fixup;
  682. }
  683. if (pm->suspend_noirq) {
  684. pci_power_t prev = pci_dev->current_state;
  685. int error;
  686. error = pm->suspend_noirq(dev);
  687. suspend_report_result(pm->suspend_noirq, error);
  688. if (error)
  689. return error;
  690. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  691. && pci_dev->current_state != PCI_UNKNOWN) {
  692. WARN_ONCE(pci_dev->current_state != prev,
  693. "PCI PM: State of device not saved by %pF\n",
  694. pm->suspend_noirq);
  695. goto Fixup;
  696. }
  697. }
  698. if (!pci_dev->state_saved) {
  699. pci_save_state(pci_dev);
  700. if (pci_power_manageable(pci_dev))
  701. pci_prepare_to_sleep(pci_dev);
  702. }
  703. dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
  704. pci_power_name(pci_dev->current_state));
  705. pci_pm_set_unknown_state(pci_dev);
  706. /*
  707. * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
  708. * PCI COMMAND register isn't 0, the BIOS assumes that the controller
  709. * hasn't been quiesced and tries to turn it off. If the controller
  710. * is already in D3, this can hang or cause memory corruption.
  711. *
  712. * Since the value of the COMMAND register doesn't matter once the
  713. * device has been suspended, we can safely set it to 0 here.
  714. */
  715. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  716. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  717. Fixup:
  718. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  719. /*
  720. * If the target system sleep state is suspend-to-idle, it is sufficient
  721. * to check whether or not the device's wakeup settings are good for
  722. * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
  723. * pci_pm_complete() to take care of fixing up the device's state
  724. * anyway, if need be.
  725. */
  726. dev->power.may_skip_resume = device_may_wakeup(dev) ||
  727. !device_can_wakeup(dev);
  728. return 0;
  729. }
  730. static int pci_pm_resume_noirq(struct device *dev)
  731. {
  732. struct pci_dev *pci_dev = to_pci_dev(dev);
  733. struct device_driver *drv = dev->driver;
  734. int error = 0;
  735. if (dev_pm_may_skip_resume(dev))
  736. return 0;
  737. /*
  738. * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
  739. * during system suspend, so update their runtime PM status to "active"
  740. * as they are going to be put into D0 shortly.
  741. */
  742. if (dev_pm_smart_suspend_and_suspended(dev))
  743. pm_runtime_set_active(dev);
  744. pci_pm_default_resume_early(pci_dev);
  745. if (pci_has_legacy_pm_support(pci_dev))
  746. return pci_legacy_resume_early(dev);
  747. pcie_pme_root_status_cleanup(pci_dev);
  748. if (drv && drv->pm && drv->pm->resume_noirq)
  749. error = drv->pm->resume_noirq(dev);
  750. return error;
  751. }
  752. static int pci_pm_resume(struct device *dev)
  753. {
  754. struct pci_dev *pci_dev = to_pci_dev(dev);
  755. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  756. int error = 0;
  757. /*
  758. * This is necessary for the suspend error path in which resume is
  759. * called without restoring the standard config registers of the device.
  760. */
  761. if (pci_dev->state_saved)
  762. pci_restore_standard_config(pci_dev);
  763. if (pci_has_legacy_pm_support(pci_dev))
  764. return pci_legacy_resume(dev);
  765. pci_pm_default_resume(pci_dev);
  766. if (pm) {
  767. if (pm->resume)
  768. error = pm->resume(dev);
  769. } else {
  770. pci_pm_reenable_device(pci_dev);
  771. }
  772. return error;
  773. }
  774. #else /* !CONFIG_SUSPEND */
  775. #define pci_pm_suspend NULL
  776. #define pci_pm_suspend_late NULL
  777. #define pci_pm_suspend_noirq NULL
  778. #define pci_pm_resume NULL
  779. #define pci_pm_resume_noirq NULL
  780. #endif /* !CONFIG_SUSPEND */
  781. #ifdef CONFIG_HIBERNATE_CALLBACKS
  782. /*
  783. * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
  784. * a hibernate transition
  785. */
  786. struct dev_pm_ops __weak pcibios_pm_ops;
  787. static int pci_pm_freeze(struct device *dev)
  788. {
  789. struct pci_dev *pci_dev = to_pci_dev(dev);
  790. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  791. if (pci_has_legacy_pm_support(pci_dev))
  792. return pci_legacy_suspend(dev, PMSG_FREEZE);
  793. if (!pm) {
  794. pci_pm_default_suspend(pci_dev);
  795. return 0;
  796. }
  797. /*
  798. * This used to be done in pci_pm_prepare() for all devices and some
  799. * drivers may depend on it, so do it here. Ideally, runtime-suspended
  800. * devices should not be touched during freeze/thaw transitions,
  801. * however.
  802. */
  803. if (!dev_pm_smart_suspend_and_suspended(dev)) {
  804. pm_runtime_resume(dev);
  805. pci_dev->state_saved = false;
  806. }
  807. if (pm->freeze) {
  808. int error;
  809. error = pm->freeze(dev);
  810. suspend_report_result(pm->freeze, error);
  811. if (error)
  812. return error;
  813. }
  814. return 0;
  815. }
  816. static int pci_pm_freeze_late(struct device *dev)
  817. {
  818. if (dev_pm_smart_suspend_and_suspended(dev))
  819. return 0;
  820. return pm_generic_freeze_late(dev);
  821. }
  822. static int pci_pm_freeze_noirq(struct device *dev)
  823. {
  824. struct pci_dev *pci_dev = to_pci_dev(dev);
  825. struct device_driver *drv = dev->driver;
  826. if (dev_pm_smart_suspend_and_suspended(dev))
  827. return 0;
  828. if (pci_has_legacy_pm_support(pci_dev))
  829. return pci_legacy_suspend_late(dev, PMSG_FREEZE);
  830. if (drv && drv->pm && drv->pm->freeze_noirq) {
  831. int error;
  832. error = drv->pm->freeze_noirq(dev);
  833. suspend_report_result(drv->pm->freeze_noirq, error);
  834. if (error)
  835. return error;
  836. }
  837. if (!pci_dev->state_saved)
  838. pci_save_state(pci_dev);
  839. pci_pm_set_unknown_state(pci_dev);
  840. if (pcibios_pm_ops.freeze_noirq)
  841. return pcibios_pm_ops.freeze_noirq(dev);
  842. return 0;
  843. }
  844. static int pci_pm_thaw_noirq(struct device *dev)
  845. {
  846. struct pci_dev *pci_dev = to_pci_dev(dev);
  847. struct device_driver *drv = dev->driver;
  848. int error = 0;
  849. /*
  850. * If the device is in runtime suspend, the code below may not work
  851. * correctly with it, so skip that code and make the PM core skip all of
  852. * the subsequent "thaw" callbacks for the device.
  853. */
  854. if (dev_pm_smart_suspend_and_suspended(dev)) {
  855. dev_pm_skip_next_resume_phases(dev);
  856. return 0;
  857. }
  858. if (pcibios_pm_ops.thaw_noirq) {
  859. error = pcibios_pm_ops.thaw_noirq(dev);
  860. if (error)
  861. return error;
  862. }
  863. if (pci_has_legacy_pm_support(pci_dev))
  864. return pci_legacy_resume_early(dev);
  865. /*
  866. * pci_restore_state() requires the device to be in D0 (because of MSI
  867. * restoration among other things), so force it into D0 in case the
  868. * driver's "freeze" callbacks put it into a low-power state directly.
  869. */
  870. pci_set_power_state(pci_dev, PCI_D0);
  871. pci_restore_state(pci_dev);
  872. if (drv && drv->pm && drv->pm->thaw_noirq)
  873. error = drv->pm->thaw_noirq(dev);
  874. return error;
  875. }
  876. static int pci_pm_thaw(struct device *dev)
  877. {
  878. struct pci_dev *pci_dev = to_pci_dev(dev);
  879. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  880. int error = 0;
  881. if (pci_has_legacy_pm_support(pci_dev))
  882. return pci_legacy_resume(dev);
  883. if (pm) {
  884. if (pm->thaw)
  885. error = pm->thaw(dev);
  886. } else {
  887. pci_pm_reenable_device(pci_dev);
  888. }
  889. pci_dev->state_saved = false;
  890. return error;
  891. }
  892. static int pci_pm_poweroff(struct device *dev)
  893. {
  894. struct pci_dev *pci_dev = to_pci_dev(dev);
  895. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  896. if (pci_has_legacy_pm_support(pci_dev))
  897. return pci_legacy_suspend(dev, PMSG_HIBERNATE);
  898. if (!pm) {
  899. pci_pm_default_suspend(pci_dev);
  900. return 0;
  901. }
  902. /* The reason to do that is the same as in pci_pm_suspend(). */
  903. if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
  904. !pci_dev_keep_suspended(pci_dev))
  905. pm_runtime_resume(dev);
  906. pci_dev->state_saved = false;
  907. if (pm->poweroff) {
  908. int error;
  909. error = pm->poweroff(dev);
  910. suspend_report_result(pm->poweroff, error);
  911. if (error)
  912. return error;
  913. }
  914. return 0;
  915. }
  916. static int pci_pm_poweroff_late(struct device *dev)
  917. {
  918. if (dev_pm_smart_suspend_and_suspended(dev))
  919. return 0;
  920. pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
  921. return pm_generic_poweroff_late(dev);
  922. }
  923. static int pci_pm_poweroff_noirq(struct device *dev)
  924. {
  925. struct pci_dev *pci_dev = to_pci_dev(dev);
  926. struct device_driver *drv = dev->driver;
  927. if (dev_pm_smart_suspend_and_suspended(dev))
  928. return 0;
  929. if (pci_has_legacy_pm_support(to_pci_dev(dev)))
  930. return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
  931. if (!drv || !drv->pm) {
  932. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  933. return 0;
  934. }
  935. if (drv->pm->poweroff_noirq) {
  936. int error;
  937. error = drv->pm->poweroff_noirq(dev);
  938. suspend_report_result(drv->pm->poweroff_noirq, error);
  939. if (error)
  940. return error;
  941. }
  942. if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev))
  943. pci_prepare_to_sleep(pci_dev);
  944. /*
  945. * The reason for doing this here is the same as for the analogous code
  946. * in pci_pm_suspend_noirq().
  947. */
  948. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  949. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  950. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  951. if (pcibios_pm_ops.poweroff_noirq)
  952. return pcibios_pm_ops.poweroff_noirq(dev);
  953. return 0;
  954. }
  955. static int pci_pm_restore_noirq(struct device *dev)
  956. {
  957. struct pci_dev *pci_dev = to_pci_dev(dev);
  958. struct device_driver *drv = dev->driver;
  959. int error = 0;
  960. /* This is analogous to the pci_pm_resume_noirq() case. */
  961. if (dev_pm_smart_suspend_and_suspended(dev))
  962. pm_runtime_set_active(dev);
  963. if (pcibios_pm_ops.restore_noirq) {
  964. error = pcibios_pm_ops.restore_noirq(dev);
  965. if (error)
  966. return error;
  967. }
  968. pci_pm_default_resume_early(pci_dev);
  969. if (pci_has_legacy_pm_support(pci_dev))
  970. return pci_legacy_resume_early(dev);
  971. if (drv && drv->pm && drv->pm->restore_noirq)
  972. error = drv->pm->restore_noirq(dev);
  973. return error;
  974. }
  975. static int pci_pm_restore(struct device *dev)
  976. {
  977. struct pci_dev *pci_dev = to_pci_dev(dev);
  978. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  979. int error = 0;
  980. /*
  981. * This is necessary for the hibernation error path in which restore is
  982. * called without restoring the standard config registers of the device.
  983. */
  984. if (pci_dev->state_saved)
  985. pci_restore_standard_config(pci_dev);
  986. if (pci_has_legacy_pm_support(pci_dev))
  987. return pci_legacy_resume(dev);
  988. pci_pm_default_resume(pci_dev);
  989. if (pm) {
  990. if (pm->restore)
  991. error = pm->restore(dev);
  992. } else {
  993. pci_pm_reenable_device(pci_dev);
  994. }
  995. return error;
  996. }
  997. #else /* !CONFIG_HIBERNATE_CALLBACKS */
  998. #define pci_pm_freeze NULL
  999. #define pci_pm_freeze_late NULL
  1000. #define pci_pm_freeze_noirq NULL
  1001. #define pci_pm_thaw NULL
  1002. #define pci_pm_thaw_noirq NULL
  1003. #define pci_pm_poweroff NULL
  1004. #define pci_pm_poweroff_late NULL
  1005. #define pci_pm_poweroff_noirq NULL
  1006. #define pci_pm_restore NULL
  1007. #define pci_pm_restore_noirq NULL
  1008. #endif /* !CONFIG_HIBERNATE_CALLBACKS */
  1009. #ifdef CONFIG_PM
  1010. static int pci_pm_runtime_suspend(struct device *dev)
  1011. {
  1012. struct pci_dev *pci_dev = to_pci_dev(dev);
  1013. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1014. pci_power_t prev = pci_dev->current_state;
  1015. int error;
  1016. /*
  1017. * If pci_dev->driver is not set (unbound), we leave the device in D0,
  1018. * but it may go to D3cold when the bridge above it runtime suspends.
  1019. * Save its config space in case that happens.
  1020. */
  1021. if (!pci_dev->driver) {
  1022. pci_save_state(pci_dev);
  1023. return 0;
  1024. }
  1025. if (!pm || !pm->runtime_suspend)
  1026. return -ENOSYS;
  1027. pci_dev->state_saved = false;
  1028. error = pm->runtime_suspend(dev);
  1029. if (error) {
  1030. /*
  1031. * -EBUSY and -EAGAIN is used to request the runtime PM core
  1032. * to schedule a new suspend, so log the event only with debug
  1033. * log level.
  1034. */
  1035. if (error == -EBUSY || error == -EAGAIN)
  1036. dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
  1037. pm->runtime_suspend, error);
  1038. else
  1039. dev_err(dev, "can't suspend (%pf returned %d)\n",
  1040. pm->runtime_suspend, error);
  1041. return error;
  1042. }
  1043. pci_fixup_device(pci_fixup_suspend, pci_dev);
  1044. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  1045. && pci_dev->current_state != PCI_UNKNOWN) {
  1046. WARN_ONCE(pci_dev->current_state != prev,
  1047. "PCI PM: State of device not saved by %pF\n",
  1048. pm->runtime_suspend);
  1049. return 0;
  1050. }
  1051. if (!pci_dev->state_saved) {
  1052. pci_save_state(pci_dev);
  1053. pci_finish_runtime_suspend(pci_dev);
  1054. }
  1055. return 0;
  1056. }
  1057. static int pci_pm_runtime_resume(struct device *dev)
  1058. {
  1059. int rc;
  1060. struct pci_dev *pci_dev = to_pci_dev(dev);
  1061. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1062. /*
  1063. * Restoring config space is necessary even if the device is not bound
  1064. * to a driver because although we left it in D0, it may have gone to
  1065. * D3cold when the bridge above it runtime suspended.
  1066. */
  1067. pci_restore_standard_config(pci_dev);
  1068. if (!pci_dev->driver)
  1069. return 0;
  1070. if (!pm || !pm->runtime_resume)
  1071. return -ENOSYS;
  1072. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  1073. pci_enable_wake(pci_dev, PCI_D0, false);
  1074. pci_fixup_device(pci_fixup_resume, pci_dev);
  1075. rc = pm->runtime_resume(dev);
  1076. pci_dev->runtime_d3cold = false;
  1077. return rc;
  1078. }
  1079. static int pci_pm_runtime_idle(struct device *dev)
  1080. {
  1081. struct pci_dev *pci_dev = to_pci_dev(dev);
  1082. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1083. int ret = 0;
  1084. /*
  1085. * If pci_dev->driver is not set (unbound), the device should
  1086. * always remain in D0 regardless of the runtime PM status
  1087. */
  1088. if (!pci_dev->driver)
  1089. return 0;
  1090. if (!pm)
  1091. return -ENOSYS;
  1092. if (pm->runtime_idle)
  1093. ret = pm->runtime_idle(dev);
  1094. return ret;
  1095. }
  1096. static const struct dev_pm_ops pci_dev_pm_ops = {
  1097. .prepare = pci_pm_prepare,
  1098. .complete = pci_pm_complete,
  1099. .suspend = pci_pm_suspend,
  1100. .suspend_late = pci_pm_suspend_late,
  1101. .resume = pci_pm_resume,
  1102. .freeze = pci_pm_freeze,
  1103. .freeze_late = pci_pm_freeze_late,
  1104. .thaw = pci_pm_thaw,
  1105. .poweroff = pci_pm_poweroff,
  1106. .poweroff_late = pci_pm_poweroff_late,
  1107. .restore = pci_pm_restore,
  1108. .suspend_noirq = pci_pm_suspend_noirq,
  1109. .resume_noirq = pci_pm_resume_noirq,
  1110. .freeze_noirq = pci_pm_freeze_noirq,
  1111. .thaw_noirq = pci_pm_thaw_noirq,
  1112. .poweroff_noirq = pci_pm_poweroff_noirq,
  1113. .restore_noirq = pci_pm_restore_noirq,
  1114. .runtime_suspend = pci_pm_runtime_suspend,
  1115. .runtime_resume = pci_pm_runtime_resume,
  1116. .runtime_idle = pci_pm_runtime_idle,
  1117. };
  1118. #define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
  1119. #else /* !CONFIG_PM */
  1120. #define pci_pm_runtime_suspend NULL
  1121. #define pci_pm_runtime_resume NULL
  1122. #define pci_pm_runtime_idle NULL
  1123. #define PCI_PM_OPS_PTR NULL
  1124. #endif /* !CONFIG_PM */
  1125. /**
  1126. * __pci_register_driver - register a new pci driver
  1127. * @drv: the driver structure to register
  1128. * @owner: owner module of drv
  1129. * @mod_name: module name string
  1130. *
  1131. * Adds the driver structure to the list of registered drivers.
  1132. * Returns a negative value on error, otherwise 0.
  1133. * If no error occurred, the driver remains registered even if
  1134. * no device was claimed during registration.
  1135. */
  1136. int __pci_register_driver(struct pci_driver *drv, struct module *owner,
  1137. const char *mod_name)
  1138. {
  1139. /* initialize common driver fields */
  1140. drv->driver.name = drv->name;
  1141. drv->driver.bus = &pci_bus_type;
  1142. drv->driver.owner = owner;
  1143. drv->driver.mod_name = mod_name;
  1144. drv->driver.groups = drv->groups;
  1145. spin_lock_init(&drv->dynids.lock);
  1146. INIT_LIST_HEAD(&drv->dynids.list);
  1147. /* register with core */
  1148. return driver_register(&drv->driver);
  1149. }
  1150. EXPORT_SYMBOL(__pci_register_driver);
  1151. /**
  1152. * pci_unregister_driver - unregister a pci driver
  1153. * @drv: the driver structure to unregister
  1154. *
  1155. * Deletes the driver structure from the list of registered PCI drivers,
  1156. * gives it a chance to clean up by calling its remove() function for
  1157. * each device it was responsible for, and marks those devices as
  1158. * driverless.
  1159. */
  1160. void pci_unregister_driver(struct pci_driver *drv)
  1161. {
  1162. driver_unregister(&drv->driver);
  1163. pci_free_dynids(drv);
  1164. }
  1165. EXPORT_SYMBOL(pci_unregister_driver);
  1166. static struct pci_driver pci_compat_driver = {
  1167. .name = "compat"
  1168. };
  1169. /**
  1170. * pci_dev_driver - get the pci_driver of a device
  1171. * @dev: the device to query
  1172. *
  1173. * Returns the appropriate pci_driver structure or %NULL if there is no
  1174. * registered driver for the device.
  1175. */
  1176. struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
  1177. {
  1178. if (dev->driver)
  1179. return dev->driver;
  1180. else {
  1181. int i;
  1182. for (i = 0; i <= PCI_ROM_RESOURCE; i++)
  1183. if (dev->resource[i].flags & IORESOURCE_BUSY)
  1184. return &pci_compat_driver;
  1185. }
  1186. return NULL;
  1187. }
  1188. EXPORT_SYMBOL(pci_dev_driver);
  1189. /**
  1190. * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
  1191. * @dev: the PCI device structure to match against
  1192. * @drv: the device driver to search for matching PCI device id structures
  1193. *
  1194. * Used by a driver to check whether a PCI device present in the
  1195. * system is in its list of supported devices. Returns the matching
  1196. * pci_device_id structure or %NULL if there is no match.
  1197. */
  1198. static int pci_bus_match(struct device *dev, struct device_driver *drv)
  1199. {
  1200. struct pci_dev *pci_dev = to_pci_dev(dev);
  1201. struct pci_driver *pci_drv;
  1202. const struct pci_device_id *found_id;
  1203. if (!pci_dev->match_driver)
  1204. return 0;
  1205. pci_drv = to_pci_driver(drv);
  1206. found_id = pci_match_device(pci_drv, pci_dev);
  1207. if (found_id)
  1208. return 1;
  1209. return 0;
  1210. }
  1211. /**
  1212. * pci_dev_get - increments the reference count of the pci device structure
  1213. * @dev: the device being referenced
  1214. *
  1215. * Each live reference to a device should be refcounted.
  1216. *
  1217. * Drivers for PCI devices should normally record such references in
  1218. * their probe() methods, when they bind to a device, and release
  1219. * them by calling pci_dev_put(), in their disconnect() methods.
  1220. *
  1221. * A pointer to the device with the incremented reference counter is returned.
  1222. */
  1223. struct pci_dev *pci_dev_get(struct pci_dev *dev)
  1224. {
  1225. if (dev)
  1226. get_device(&dev->dev);
  1227. return dev;
  1228. }
  1229. EXPORT_SYMBOL(pci_dev_get);
  1230. /**
  1231. * pci_dev_put - release a use of the pci device structure
  1232. * @dev: device that's been disconnected
  1233. *
  1234. * Must be called when a user of a device is finished with it. When the last
  1235. * user of the device calls this function, the memory of the device is freed.
  1236. */
  1237. void pci_dev_put(struct pci_dev *dev)
  1238. {
  1239. if (dev)
  1240. put_device(&dev->dev);
  1241. }
  1242. EXPORT_SYMBOL(pci_dev_put);
  1243. static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
  1244. {
  1245. struct pci_dev *pdev;
  1246. if (!dev)
  1247. return -ENODEV;
  1248. pdev = to_pci_dev(dev);
  1249. if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
  1250. return -ENOMEM;
  1251. if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
  1252. return -ENOMEM;
  1253. if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
  1254. pdev->subsystem_device))
  1255. return -ENOMEM;
  1256. if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
  1257. return -ENOMEM;
  1258. if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
  1259. pdev->vendor, pdev->device,
  1260. pdev->subsystem_vendor, pdev->subsystem_device,
  1261. (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
  1262. (u8)(pdev->class)))
  1263. return -ENOMEM;
  1264. return 0;
  1265. }
  1266. #if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
  1267. /**
  1268. * pci_uevent_ers - emit a uevent during recovery path of PCI device
  1269. * @pdev: PCI device undergoing error recovery
  1270. * @err_type: type of error event
  1271. */
  1272. void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
  1273. {
  1274. int idx = 0;
  1275. char *envp[3];
  1276. switch (err_type) {
  1277. case PCI_ERS_RESULT_NONE:
  1278. case PCI_ERS_RESULT_CAN_RECOVER:
  1279. envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
  1280. envp[idx++] = "DEVICE_ONLINE=0";
  1281. break;
  1282. case PCI_ERS_RESULT_RECOVERED:
  1283. envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY";
  1284. envp[idx++] = "DEVICE_ONLINE=1";
  1285. break;
  1286. case PCI_ERS_RESULT_DISCONNECT:
  1287. envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY";
  1288. envp[idx++] = "DEVICE_ONLINE=0";
  1289. break;
  1290. default:
  1291. break;
  1292. }
  1293. if (idx > 0) {
  1294. envp[idx++] = NULL;
  1295. kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
  1296. }
  1297. }
  1298. #endif
  1299. static int pci_bus_num_vf(struct device *dev)
  1300. {
  1301. return pci_num_vf(to_pci_dev(dev));
  1302. }
  1303. /**
  1304. * pci_dma_configure - Setup DMA configuration
  1305. * @dev: ptr to dev structure
  1306. *
  1307. * Function to update PCI devices's DMA configuration using the same
  1308. * info from the OF node or ACPI node of host bridge's parent (if any).
  1309. */
  1310. static int pci_dma_configure(struct device *dev)
  1311. {
  1312. struct device *bridge;
  1313. int ret = 0;
  1314. bridge = pci_get_host_bridge_device(to_pci_dev(dev));
  1315. if (IS_ENABLED(CONFIG_OF) && bridge->parent &&
  1316. bridge->parent->of_node) {
  1317. ret = of_dma_configure(dev, bridge->parent->of_node, true);
  1318. } else if (has_acpi_companion(bridge)) {
  1319. struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
  1320. enum dev_dma_attr attr = acpi_get_dma_attr(adev);
  1321. if (attr != DEV_DMA_NOT_SUPPORTED)
  1322. ret = acpi_dma_configure(dev, attr);
  1323. }
  1324. pci_put_host_bridge_device(bridge);
  1325. return ret;
  1326. }
  1327. struct bus_type pci_bus_type = {
  1328. .name = "pci",
  1329. .match = pci_bus_match,
  1330. .uevent = pci_uevent,
  1331. .probe = pci_device_probe,
  1332. .remove = pci_device_remove,
  1333. .shutdown = pci_device_shutdown,
  1334. .dev_groups = pci_dev_groups,
  1335. .bus_groups = pci_bus_groups,
  1336. .drv_groups = pci_drv_groups,
  1337. .pm = PCI_PM_OPS_PTR,
  1338. .num_vf = pci_bus_num_vf,
  1339. .dma_configure = pci_dma_configure,
  1340. };
  1341. EXPORT_SYMBOL(pci_bus_type);
  1342. #ifdef CONFIG_PCIEPORTBUS
  1343. static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
  1344. {
  1345. struct pcie_device *pciedev;
  1346. struct pcie_port_service_driver *driver;
  1347. if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
  1348. return 0;
  1349. pciedev = to_pcie_device(dev);
  1350. driver = to_service_driver(drv);
  1351. if (driver->service != pciedev->service)
  1352. return 0;
  1353. if (driver->port_type != PCIE_ANY_PORT &&
  1354. driver->port_type != pci_pcie_type(pciedev->port))
  1355. return 0;
  1356. return 1;
  1357. }
  1358. struct bus_type pcie_port_bus_type = {
  1359. .name = "pci_express",
  1360. .match = pcie_port_bus_match,
  1361. };
  1362. EXPORT_SYMBOL_GPL(pcie_port_bus_type);
  1363. #endif
  1364. static int __init pci_driver_init(void)
  1365. {
  1366. int ret;
  1367. ret = bus_register(&pci_bus_type);
  1368. if (ret)
  1369. return ret;
  1370. #ifdef CONFIG_PCIEPORTBUS
  1371. ret = bus_register(&pcie_port_bus_type);
  1372. if (ret)
  1373. return ret;
  1374. #endif
  1375. return 0;
  1376. }
  1377. postcore_initcall(pci_driver_init);