pci-driver.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
  4. * (C) Copyright 2007 Novell Inc.
  5. */
  6. #include <linux/pci.h>
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/device.h>
  10. #include <linux/mempolicy.h>
  11. #include <linux/string.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/cpu.h>
  15. #include <linux/pm_runtime.h>
  16. #include <linux/suspend.h>
  17. #include <linux/kexec.h>
  18. #include <linux/of_device.h>
  19. #include <linux/acpi.h>
  20. #include "pci.h"
  21. #include "pcie/portdrv.h"
  22. struct pci_dynid {
  23. struct list_head node;
  24. struct pci_device_id id;
  25. };
  26. /**
  27. * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
  28. * @drv: target pci driver
  29. * @vendor: PCI vendor ID
  30. * @device: PCI device ID
  31. * @subvendor: PCI subvendor ID
  32. * @subdevice: PCI subdevice ID
  33. * @class: PCI class
  34. * @class_mask: PCI class mask
  35. * @driver_data: private driver data
  36. *
  37. * Adds a new dynamic pci device ID to this driver and causes the
  38. * driver to probe for all devices again. @drv must have been
  39. * registered prior to calling this function.
  40. *
  41. * CONTEXT:
  42. * Does GFP_KERNEL allocation.
  43. *
  44. * RETURNS:
  45. * 0 on success, -errno on failure.
  46. */
  47. int pci_add_dynid(struct pci_driver *drv,
  48. unsigned int vendor, unsigned int device,
  49. unsigned int subvendor, unsigned int subdevice,
  50. unsigned int class, unsigned int class_mask,
  51. unsigned long driver_data)
  52. {
  53. struct pci_dynid *dynid;
  54. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  55. if (!dynid)
  56. return -ENOMEM;
  57. dynid->id.vendor = vendor;
  58. dynid->id.device = device;
  59. dynid->id.subvendor = subvendor;
  60. dynid->id.subdevice = subdevice;
  61. dynid->id.class = class;
  62. dynid->id.class_mask = class_mask;
  63. dynid->id.driver_data = driver_data;
  64. spin_lock(&drv->dynids.lock);
  65. list_add_tail(&dynid->node, &drv->dynids.list);
  66. spin_unlock(&drv->dynids.lock);
  67. return driver_attach(&drv->driver);
  68. }
  69. EXPORT_SYMBOL_GPL(pci_add_dynid);
  70. static void pci_free_dynids(struct pci_driver *drv)
  71. {
  72. struct pci_dynid *dynid, *n;
  73. spin_lock(&drv->dynids.lock);
  74. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  75. list_del(&dynid->node);
  76. kfree(dynid);
  77. }
  78. spin_unlock(&drv->dynids.lock);
  79. }
  80. /**
  81. * store_new_id - sysfs frontend to pci_add_dynid()
  82. * @driver: target device driver
  83. * @buf: buffer for scanning device ID data
  84. * @count: input size
  85. *
  86. * Allow PCI IDs to be added to an existing driver via sysfs.
  87. */
  88. static ssize_t new_id_store(struct device_driver *driver, const char *buf,
  89. size_t count)
  90. {
  91. struct pci_driver *pdrv = to_pci_driver(driver);
  92. const struct pci_device_id *ids = pdrv->id_table;
  93. __u32 vendor, device, subvendor = PCI_ANY_ID,
  94. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  95. unsigned long driver_data = 0;
  96. int fields = 0;
  97. int retval = 0;
  98. fields = sscanf(buf, "%x %x %x %x %x %x %lx",
  99. &vendor, &device, &subvendor, &subdevice,
  100. &class, &class_mask, &driver_data);
  101. if (fields < 2)
  102. return -EINVAL;
  103. if (fields != 7) {
  104. struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
  105. if (!pdev)
  106. return -ENOMEM;
  107. pdev->vendor = vendor;
  108. pdev->device = device;
  109. pdev->subsystem_vendor = subvendor;
  110. pdev->subsystem_device = subdevice;
  111. pdev->class = class;
  112. if (pci_match_id(pdrv->id_table, pdev))
  113. retval = -EEXIST;
  114. kfree(pdev);
  115. if (retval)
  116. return retval;
  117. }
  118. /* Only accept driver_data values that match an existing id_table
  119. entry */
  120. if (ids) {
  121. retval = -EINVAL;
  122. while (ids->vendor || ids->subvendor || ids->class_mask) {
  123. if (driver_data == ids->driver_data) {
  124. retval = 0;
  125. break;
  126. }
  127. ids++;
  128. }
  129. if (retval) /* No match */
  130. return retval;
  131. }
  132. retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
  133. class, class_mask, driver_data);
  134. if (retval)
  135. return retval;
  136. return count;
  137. }
  138. static DRIVER_ATTR_WO(new_id);
  139. /**
  140. * store_remove_id - remove a PCI device ID from this driver
  141. * @driver: target device driver
  142. * @buf: buffer for scanning device ID data
  143. * @count: input size
  144. *
  145. * Removes a dynamic pci device ID to this driver.
  146. */
  147. static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
  148. size_t count)
  149. {
  150. struct pci_dynid *dynid, *n;
  151. struct pci_driver *pdrv = to_pci_driver(driver);
  152. __u32 vendor, device, subvendor = PCI_ANY_ID,
  153. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  154. int fields = 0;
  155. size_t retval = -ENODEV;
  156. fields = sscanf(buf, "%x %x %x %x %x %x",
  157. &vendor, &device, &subvendor, &subdevice,
  158. &class, &class_mask);
  159. if (fields < 2)
  160. return -EINVAL;
  161. spin_lock(&pdrv->dynids.lock);
  162. list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
  163. struct pci_device_id *id = &dynid->id;
  164. if ((id->vendor == vendor) &&
  165. (id->device == device) &&
  166. (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
  167. (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
  168. !((id->class ^ class) & class_mask)) {
  169. list_del(&dynid->node);
  170. kfree(dynid);
  171. retval = count;
  172. break;
  173. }
  174. }
  175. spin_unlock(&pdrv->dynids.lock);
  176. return retval;
  177. }
  178. static DRIVER_ATTR_WO(remove_id);
  179. static struct attribute *pci_drv_attrs[] = {
  180. &driver_attr_new_id.attr,
  181. &driver_attr_remove_id.attr,
  182. NULL,
  183. };
  184. ATTRIBUTE_GROUPS(pci_drv);
  185. /**
  186. * pci_match_id - See if a pci device matches a given pci_id table
  187. * @ids: array of PCI device id structures to search in
  188. * @dev: the PCI device structure to match against.
  189. *
  190. * Used by a driver to check whether a PCI device present in the
  191. * system is in its list of supported devices. Returns the matching
  192. * pci_device_id structure or %NULL if there is no match.
  193. *
  194. * Deprecated, don't use this as it will not catch any dynamic ids
  195. * that a driver might want to check for.
  196. */
  197. const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
  198. struct pci_dev *dev)
  199. {
  200. if (ids) {
  201. while (ids->vendor || ids->subvendor || ids->class_mask) {
  202. if (pci_match_one_device(ids, dev))
  203. return ids;
  204. ids++;
  205. }
  206. }
  207. return NULL;
  208. }
  209. EXPORT_SYMBOL(pci_match_id);
  210. static const struct pci_device_id pci_device_id_any = {
  211. .vendor = PCI_ANY_ID,
  212. .device = PCI_ANY_ID,
  213. .subvendor = PCI_ANY_ID,
  214. .subdevice = PCI_ANY_ID,
  215. };
  216. /**
  217. * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
  218. * @drv: the PCI driver to match against
  219. * @dev: the PCI device structure to match against
  220. *
  221. * Used by a driver to check whether a PCI device present in the
  222. * system is in its list of supported devices. Returns the matching
  223. * pci_device_id structure or %NULL if there is no match.
  224. */
  225. static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
  226. struct pci_dev *dev)
  227. {
  228. struct pci_dynid *dynid;
  229. const struct pci_device_id *found_id = NULL;
  230. /* When driver_override is set, only bind to the matching driver */
  231. if (dev->driver_override && strcmp(dev->driver_override, drv->name))
  232. return NULL;
  233. /* Look at the dynamic ids first, before the static ones */
  234. spin_lock(&drv->dynids.lock);
  235. list_for_each_entry(dynid, &drv->dynids.list, node) {
  236. if (pci_match_one_device(&dynid->id, dev)) {
  237. found_id = &dynid->id;
  238. break;
  239. }
  240. }
  241. spin_unlock(&drv->dynids.lock);
  242. if (!found_id)
  243. found_id = pci_match_id(drv->id_table, dev);
  244. /* driver_override will always match, send a dummy id */
  245. if (!found_id && dev->driver_override)
  246. found_id = &pci_device_id_any;
  247. return found_id;
  248. }
  249. struct drv_dev_and_id {
  250. struct pci_driver *drv;
  251. struct pci_dev *dev;
  252. const struct pci_device_id *id;
  253. };
  254. static long local_pci_probe(void *_ddi)
  255. {
  256. struct drv_dev_and_id *ddi = _ddi;
  257. struct pci_dev *pci_dev = ddi->dev;
  258. struct pci_driver *pci_drv = ddi->drv;
  259. struct device *dev = &pci_dev->dev;
  260. int rc;
  261. /*
  262. * Unbound PCI devices are always put in D0, regardless of
  263. * runtime PM status. During probe, the device is set to
  264. * active and the usage count is incremented. If the driver
  265. * supports runtime PM, it should call pm_runtime_put_noidle(),
  266. * or any other runtime PM helper function decrementing the usage
  267. * count, in its probe routine and pm_runtime_get_noresume() in
  268. * its remove routine.
  269. */
  270. pm_runtime_get_sync(dev);
  271. pci_dev->driver = pci_drv;
  272. rc = pci_drv->probe(pci_dev, ddi->id);
  273. if (!rc)
  274. return rc;
  275. if (rc < 0) {
  276. pci_dev->driver = NULL;
  277. pm_runtime_put_sync(dev);
  278. return rc;
  279. }
  280. /*
  281. * Probe function should return < 0 for failure, 0 for success
  282. * Treat values > 0 as success, but warn.
  283. */
  284. dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
  285. return 0;
  286. }
  287. static bool pci_physfn_is_probed(struct pci_dev *dev)
  288. {
  289. #ifdef CONFIG_PCI_IOV
  290. return dev->is_virtfn && dev->physfn->is_probed;
  291. #else
  292. return false;
  293. #endif
  294. }
  295. static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
  296. const struct pci_device_id *id)
  297. {
  298. int error, node, cpu;
  299. struct drv_dev_and_id ddi = { drv, dev, id };
  300. /*
  301. * Execute driver initialization on node where the device is
  302. * attached. This way the driver likely allocates its local memory
  303. * on the right node.
  304. */
  305. node = dev_to_node(&dev->dev);
  306. dev->is_probed = 1;
  307. cpu_hotplug_disable();
  308. /*
  309. * Prevent nesting work_on_cpu() for the case where a Virtual Function
  310. * device is probed from work_on_cpu() of the Physical device.
  311. */
  312. if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
  313. pci_physfn_is_probed(dev))
  314. cpu = nr_cpu_ids;
  315. else
  316. cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
  317. if (cpu < nr_cpu_ids)
  318. error = work_on_cpu(cpu, local_pci_probe, &ddi);
  319. else
  320. error = local_pci_probe(&ddi);
  321. dev->is_probed = 0;
  322. cpu_hotplug_enable();
  323. return error;
  324. }
  325. /**
  326. * __pci_device_probe - check if a driver wants to claim a specific PCI device
  327. * @drv: driver to call to check if it wants the PCI device
  328. * @pci_dev: PCI device being probed
  329. *
  330. * returns 0 on success, else error.
  331. * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
  332. */
  333. static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
  334. {
  335. const struct pci_device_id *id;
  336. int error = 0;
  337. if (!pci_dev->driver && drv->probe) {
  338. error = -ENODEV;
  339. id = pci_match_device(drv, pci_dev);
  340. if (id)
  341. error = pci_call_probe(drv, pci_dev, id);
  342. }
  343. return error;
  344. }
  345. int __weak pcibios_alloc_irq(struct pci_dev *dev)
  346. {
  347. return 0;
  348. }
  349. void __weak pcibios_free_irq(struct pci_dev *dev)
  350. {
  351. }
  352. #ifdef CONFIG_PCI_IOV
  353. static inline bool pci_device_can_probe(struct pci_dev *pdev)
  354. {
  355. return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
  356. }
  357. #else
  358. static inline bool pci_device_can_probe(struct pci_dev *pdev)
  359. {
  360. return true;
  361. }
  362. #endif
  363. static int pci_device_probe(struct device *dev)
  364. {
  365. int error;
  366. struct pci_dev *pci_dev = to_pci_dev(dev);
  367. struct pci_driver *drv = to_pci_driver(dev->driver);
  368. pci_assign_irq(pci_dev);
  369. error = pcibios_alloc_irq(pci_dev);
  370. if (error < 0)
  371. return error;
  372. pci_dev_get(pci_dev);
  373. if (pci_device_can_probe(pci_dev)) {
  374. error = __pci_device_probe(drv, pci_dev);
  375. if (error) {
  376. pcibios_free_irq(pci_dev);
  377. pci_dev_put(pci_dev);
  378. }
  379. }
  380. return error;
  381. }
  382. static int pci_device_remove(struct device *dev)
  383. {
  384. struct pci_dev *pci_dev = to_pci_dev(dev);
  385. struct pci_driver *drv = pci_dev->driver;
  386. if (drv) {
  387. if (drv->remove) {
  388. pm_runtime_get_sync(dev);
  389. drv->remove(pci_dev);
  390. pm_runtime_put_noidle(dev);
  391. }
  392. pcibios_free_irq(pci_dev);
  393. pci_dev->driver = NULL;
  394. pci_iov_remove(pci_dev);
  395. }
  396. /* Undo the runtime PM settings in local_pci_probe() */
  397. pm_runtime_put_sync(dev);
  398. /*
  399. * If the device is still on, set the power state as "unknown",
  400. * since it might change by the next time we load the driver.
  401. */
  402. if (pci_dev->current_state == PCI_D0)
  403. pci_dev->current_state = PCI_UNKNOWN;
  404. /*
  405. * We would love to complain here if pci_dev->is_enabled is set, that
  406. * the driver should have called pci_disable_device(), but the
  407. * unfortunate fact is there are too many odd BIOS and bridge setups
  408. * that don't like drivers doing that all of the time.
  409. * Oh well, we can dream of sane hardware when we sleep, no matter how
  410. * horrible the crap we have to deal with is when we are awake...
  411. */
  412. pci_dev_put(pci_dev);
  413. return 0;
  414. }
  415. static void pci_device_shutdown(struct device *dev)
  416. {
  417. struct pci_dev *pci_dev = to_pci_dev(dev);
  418. struct pci_driver *drv = pci_dev->driver;
  419. pm_runtime_resume(dev);
  420. if (drv && drv->shutdown)
  421. drv->shutdown(pci_dev);
  422. /*
  423. * If this is a kexec reboot, turn off Bus Master bit on the
  424. * device to tell it to not continue to do DMA. Don't touch
  425. * devices in D3cold or unknown states.
  426. * If it is not a kexec reboot, firmware will hit the PCI
  427. * devices with big hammer and stop their DMA any way.
  428. */
  429. if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
  430. pci_clear_master(pci_dev);
  431. }
  432. #ifdef CONFIG_PM
  433. /* Auxiliary functions used for system resume and run-time resume. */
  434. /**
  435. * pci_restore_standard_config - restore standard config registers of PCI device
  436. * @pci_dev: PCI device to handle
  437. */
  438. static int pci_restore_standard_config(struct pci_dev *pci_dev)
  439. {
  440. pci_update_current_state(pci_dev, PCI_UNKNOWN);
  441. if (pci_dev->current_state != PCI_D0) {
  442. int error = pci_set_power_state(pci_dev, PCI_D0);
  443. if (error)
  444. return error;
  445. }
  446. pci_restore_state(pci_dev);
  447. pci_pme_restore(pci_dev);
  448. return 0;
  449. }
  450. #endif
  451. #ifdef CONFIG_PM_SLEEP
  452. static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
  453. {
  454. pci_power_up(pci_dev);
  455. pci_restore_state(pci_dev);
  456. pci_pme_restore(pci_dev);
  457. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  458. }
  459. /*
  460. * Default "suspend" method for devices that have no driver provided suspend,
  461. * or not even a driver at all (second part).
  462. */
  463. static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
  464. {
  465. /*
  466. * mark its power state as "unknown", since we don't know if
  467. * e.g. the BIOS will change its device state when we suspend.
  468. */
  469. if (pci_dev->current_state == PCI_D0)
  470. pci_dev->current_state = PCI_UNKNOWN;
  471. }
  472. /*
  473. * Default "resume" method for devices that have no driver provided resume,
  474. * or not even a driver at all (second part).
  475. */
  476. static int pci_pm_reenable_device(struct pci_dev *pci_dev)
  477. {
  478. int retval;
  479. /* if the device was enabled before suspend, reenable */
  480. retval = pci_reenable_device(pci_dev);
  481. /*
  482. * if the device was busmaster before the suspend, make it busmaster
  483. * again
  484. */
  485. if (pci_dev->is_busmaster)
  486. pci_set_master(pci_dev);
  487. return retval;
  488. }
  489. static int pci_legacy_suspend(struct device *dev, pm_message_t state)
  490. {
  491. struct pci_dev *pci_dev = to_pci_dev(dev);
  492. struct pci_driver *drv = pci_dev->driver;
  493. if (drv && drv->suspend) {
  494. pci_power_t prev = pci_dev->current_state;
  495. int error;
  496. error = drv->suspend(pci_dev, state);
  497. suspend_report_result(drv->suspend, error);
  498. if (error)
  499. return error;
  500. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  501. && pci_dev->current_state != PCI_UNKNOWN) {
  502. WARN_ONCE(pci_dev->current_state != prev,
  503. "PCI PM: Device state not saved by %pF\n",
  504. drv->suspend);
  505. }
  506. }
  507. pci_fixup_device(pci_fixup_suspend, pci_dev);
  508. return 0;
  509. }
  510. static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
  511. {
  512. struct pci_dev *pci_dev = to_pci_dev(dev);
  513. struct pci_driver *drv = pci_dev->driver;
  514. if (drv && drv->suspend_late) {
  515. pci_power_t prev = pci_dev->current_state;
  516. int error;
  517. error = drv->suspend_late(pci_dev, state);
  518. suspend_report_result(drv->suspend_late, error);
  519. if (error)
  520. return error;
  521. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  522. && pci_dev->current_state != PCI_UNKNOWN) {
  523. WARN_ONCE(pci_dev->current_state != prev,
  524. "PCI PM: Device state not saved by %pF\n",
  525. drv->suspend_late);
  526. goto Fixup;
  527. }
  528. }
  529. if (!pci_dev->state_saved)
  530. pci_save_state(pci_dev);
  531. pci_pm_set_unknown_state(pci_dev);
  532. Fixup:
  533. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  534. return 0;
  535. }
  536. static int pci_legacy_resume_early(struct device *dev)
  537. {
  538. struct pci_dev *pci_dev = to_pci_dev(dev);
  539. struct pci_driver *drv = pci_dev->driver;
  540. return drv && drv->resume_early ?
  541. drv->resume_early(pci_dev) : 0;
  542. }
  543. static int pci_legacy_resume(struct device *dev)
  544. {
  545. struct pci_dev *pci_dev = to_pci_dev(dev);
  546. struct pci_driver *drv = pci_dev->driver;
  547. pci_fixup_device(pci_fixup_resume, pci_dev);
  548. return drv && drv->resume ?
  549. drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
  550. }
  551. /* Auxiliary functions used by the new power management framework */
  552. static void pci_pm_default_resume(struct pci_dev *pci_dev)
  553. {
  554. pci_fixup_device(pci_fixup_resume, pci_dev);
  555. pci_enable_wake(pci_dev, PCI_D0, false);
  556. }
  557. static void pci_pm_default_suspend(struct pci_dev *pci_dev)
  558. {
  559. /* Disable non-bridge devices without PM support */
  560. if (!pci_has_subordinate(pci_dev))
  561. pci_disable_enabled_device(pci_dev);
  562. }
  563. static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
  564. {
  565. struct pci_driver *drv = pci_dev->driver;
  566. bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
  567. || drv->resume_early);
  568. /*
  569. * Legacy PM support is used by default, so warn if the new framework is
  570. * supported as well. Drivers are supposed to support either the
  571. * former, or the latter, but not both at the same time.
  572. */
  573. WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
  574. drv->name, pci_dev->vendor, pci_dev->device);
  575. return ret;
  576. }
  577. /* New power management framework */
  578. static int pci_pm_prepare(struct device *dev)
  579. {
  580. struct device_driver *drv = dev->driver;
  581. if (drv && drv->pm && drv->pm->prepare) {
  582. int error = drv->pm->prepare(dev);
  583. if (error < 0)
  584. return error;
  585. if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
  586. return 0;
  587. }
  588. return pci_dev_keep_suspended(to_pci_dev(dev));
  589. }
  590. static void pci_pm_complete(struct device *dev)
  591. {
  592. struct pci_dev *pci_dev = to_pci_dev(dev);
  593. pci_dev_complete_resume(pci_dev);
  594. pm_generic_complete(dev);
  595. /* Resume device if platform firmware has put it in reset-power-on */
  596. if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
  597. pci_power_t pre_sleep_state = pci_dev->current_state;
  598. pci_update_current_state(pci_dev, pci_dev->current_state);
  599. if (pci_dev->current_state < pre_sleep_state)
  600. pm_request_resume(dev);
  601. }
  602. }
  603. #else /* !CONFIG_PM_SLEEP */
  604. #define pci_pm_prepare NULL
  605. #define pci_pm_complete NULL
  606. #endif /* !CONFIG_PM_SLEEP */
  607. #ifdef CONFIG_SUSPEND
  608. static void pcie_pme_root_status_cleanup(struct pci_dev *pci_dev)
  609. {
  610. /*
  611. * Some BIOSes forget to clear Root PME Status bits after system
  612. * wakeup, which breaks ACPI-based runtime wakeup on PCI Express.
  613. * Clear those bits now just in case (shouldn't hurt).
  614. */
  615. if (pci_is_pcie(pci_dev) &&
  616. (pci_pcie_type(pci_dev) == PCI_EXP_TYPE_ROOT_PORT ||
  617. pci_pcie_type(pci_dev) == PCI_EXP_TYPE_RC_EC))
  618. pcie_clear_root_pme_status(pci_dev);
  619. }
  620. static int pci_pm_suspend(struct device *dev)
  621. {
  622. struct pci_dev *pci_dev = to_pci_dev(dev);
  623. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  624. if (pci_has_legacy_pm_support(pci_dev))
  625. return pci_legacy_suspend(dev, PMSG_SUSPEND);
  626. if (!pm) {
  627. pci_pm_default_suspend(pci_dev);
  628. return 0;
  629. }
  630. /*
  631. * PCI devices suspended at run time may need to be resumed at this
  632. * point, because in general it may be necessary to reconfigure them for
  633. * system suspend. Namely, if the device is expected to wake up the
  634. * system from the sleep state, it may have to be reconfigured for this
  635. * purpose, or if the device is not expected to wake up the system from
  636. * the sleep state, it should be prevented from signaling wakeup events
  637. * going forward.
  638. *
  639. * Also if the driver of the device does not indicate that its system
  640. * suspend callbacks can cope with runtime-suspended devices, it is
  641. * better to resume the device from runtime suspend here.
  642. */
  643. if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
  644. !pci_dev_keep_suspended(pci_dev)) {
  645. pm_runtime_resume(dev);
  646. pci_dev->state_saved = false;
  647. }
  648. if (pm->suspend) {
  649. pci_power_t prev = pci_dev->current_state;
  650. int error;
  651. error = pm->suspend(dev);
  652. suspend_report_result(pm->suspend, error);
  653. if (error)
  654. return error;
  655. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  656. && pci_dev->current_state != PCI_UNKNOWN) {
  657. WARN_ONCE(pci_dev->current_state != prev,
  658. "PCI PM: State of device not saved by %pF\n",
  659. pm->suspend);
  660. }
  661. }
  662. return 0;
  663. }
  664. static int pci_pm_suspend_late(struct device *dev)
  665. {
  666. if (dev_pm_smart_suspend_and_suspended(dev))
  667. return 0;
  668. pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
  669. return pm_generic_suspend_late(dev);
  670. }
  671. static int pci_pm_suspend_noirq(struct device *dev)
  672. {
  673. struct pci_dev *pci_dev = to_pci_dev(dev);
  674. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  675. if (dev_pm_smart_suspend_and_suspended(dev)) {
  676. dev->power.may_skip_resume = true;
  677. return 0;
  678. }
  679. if (pci_has_legacy_pm_support(pci_dev))
  680. return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
  681. if (!pm) {
  682. pci_save_state(pci_dev);
  683. goto Fixup;
  684. }
  685. if (pm->suspend_noirq) {
  686. pci_power_t prev = pci_dev->current_state;
  687. int error;
  688. error = pm->suspend_noirq(dev);
  689. suspend_report_result(pm->suspend_noirq, error);
  690. if (error)
  691. return error;
  692. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  693. && pci_dev->current_state != PCI_UNKNOWN) {
  694. WARN_ONCE(pci_dev->current_state != prev,
  695. "PCI PM: State of device not saved by %pF\n",
  696. pm->suspend_noirq);
  697. goto Fixup;
  698. }
  699. }
  700. if (!pci_dev->state_saved) {
  701. pci_save_state(pci_dev);
  702. if (pci_power_manageable(pci_dev))
  703. pci_prepare_to_sleep(pci_dev);
  704. }
  705. dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
  706. pci_power_name(pci_dev->current_state));
  707. pci_pm_set_unknown_state(pci_dev);
  708. /*
  709. * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
  710. * PCI COMMAND register isn't 0, the BIOS assumes that the controller
  711. * hasn't been quiesced and tries to turn it off. If the controller
  712. * is already in D3, this can hang or cause memory corruption.
  713. *
  714. * Since the value of the COMMAND register doesn't matter once the
  715. * device has been suspended, we can safely set it to 0 here.
  716. */
  717. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  718. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  719. Fixup:
  720. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  721. /*
  722. * If the target system sleep state is suspend-to-idle, it is sufficient
  723. * to check whether or not the device's wakeup settings are good for
  724. * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
  725. * pci_pm_complete() to take care of fixing up the device's state
  726. * anyway, if need be.
  727. */
  728. dev->power.may_skip_resume = device_may_wakeup(dev) ||
  729. !device_can_wakeup(dev);
  730. return 0;
  731. }
  732. static int pci_pm_resume_noirq(struct device *dev)
  733. {
  734. struct pci_dev *pci_dev = to_pci_dev(dev);
  735. struct device_driver *drv = dev->driver;
  736. int error = 0;
  737. if (dev_pm_may_skip_resume(dev))
  738. return 0;
  739. /*
  740. * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
  741. * during system suspend, so update their runtime PM status to "active"
  742. * as they are going to be put into D0 shortly.
  743. */
  744. if (dev_pm_smart_suspend_and_suspended(dev))
  745. pm_runtime_set_active(dev);
  746. pci_pm_default_resume_early(pci_dev);
  747. if (pci_has_legacy_pm_support(pci_dev))
  748. return pci_legacy_resume_early(dev);
  749. pcie_pme_root_status_cleanup(pci_dev);
  750. if (drv && drv->pm && drv->pm->resume_noirq)
  751. error = drv->pm->resume_noirq(dev);
  752. return error;
  753. }
  754. static int pci_pm_resume(struct device *dev)
  755. {
  756. struct pci_dev *pci_dev = to_pci_dev(dev);
  757. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  758. int error = 0;
  759. /*
  760. * This is necessary for the suspend error path in which resume is
  761. * called without restoring the standard config registers of the device.
  762. */
  763. if (pci_dev->state_saved)
  764. pci_restore_standard_config(pci_dev);
  765. if (pci_has_legacy_pm_support(pci_dev))
  766. return pci_legacy_resume(dev);
  767. pci_pm_default_resume(pci_dev);
  768. if (pm) {
  769. if (pm->resume)
  770. error = pm->resume(dev);
  771. } else {
  772. pci_pm_reenable_device(pci_dev);
  773. }
  774. return error;
  775. }
  776. #else /* !CONFIG_SUSPEND */
  777. #define pci_pm_suspend NULL
  778. #define pci_pm_suspend_late NULL
  779. #define pci_pm_suspend_noirq NULL
  780. #define pci_pm_resume NULL
  781. #define pci_pm_resume_noirq NULL
  782. #endif /* !CONFIG_SUSPEND */
  783. #ifdef CONFIG_HIBERNATE_CALLBACKS
  784. /*
  785. * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
  786. * a hibernate transition
  787. */
  788. struct dev_pm_ops __weak pcibios_pm_ops;
  789. static int pci_pm_freeze(struct device *dev)
  790. {
  791. struct pci_dev *pci_dev = to_pci_dev(dev);
  792. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  793. if (pci_has_legacy_pm_support(pci_dev))
  794. return pci_legacy_suspend(dev, PMSG_FREEZE);
  795. if (!pm) {
  796. pci_pm_default_suspend(pci_dev);
  797. return 0;
  798. }
  799. /*
  800. * This used to be done in pci_pm_prepare() for all devices and some
  801. * drivers may depend on it, so do it here. Ideally, runtime-suspended
  802. * devices should not be touched during freeze/thaw transitions,
  803. * however.
  804. */
  805. if (!dev_pm_smart_suspend_and_suspended(dev)) {
  806. pm_runtime_resume(dev);
  807. pci_dev->state_saved = false;
  808. }
  809. if (pm->freeze) {
  810. int error;
  811. error = pm->freeze(dev);
  812. suspend_report_result(pm->freeze, error);
  813. if (error)
  814. return error;
  815. }
  816. return 0;
  817. }
  818. static int pci_pm_freeze_late(struct device *dev)
  819. {
  820. if (dev_pm_smart_suspend_and_suspended(dev))
  821. return 0;
  822. return pm_generic_freeze_late(dev);
  823. }
  824. static int pci_pm_freeze_noirq(struct device *dev)
  825. {
  826. struct pci_dev *pci_dev = to_pci_dev(dev);
  827. struct device_driver *drv = dev->driver;
  828. if (dev_pm_smart_suspend_and_suspended(dev))
  829. return 0;
  830. if (pci_has_legacy_pm_support(pci_dev))
  831. return pci_legacy_suspend_late(dev, PMSG_FREEZE);
  832. if (drv && drv->pm && drv->pm->freeze_noirq) {
  833. int error;
  834. error = drv->pm->freeze_noirq(dev);
  835. suspend_report_result(drv->pm->freeze_noirq, error);
  836. if (error)
  837. return error;
  838. }
  839. if (!pci_dev->state_saved)
  840. pci_save_state(pci_dev);
  841. pci_pm_set_unknown_state(pci_dev);
  842. if (pcibios_pm_ops.freeze_noirq)
  843. return pcibios_pm_ops.freeze_noirq(dev);
  844. return 0;
  845. }
  846. static int pci_pm_thaw_noirq(struct device *dev)
  847. {
  848. struct pci_dev *pci_dev = to_pci_dev(dev);
  849. struct device_driver *drv = dev->driver;
  850. int error = 0;
  851. /*
  852. * If the device is in runtime suspend, the code below may not work
  853. * correctly with it, so skip that code and make the PM core skip all of
  854. * the subsequent "thaw" callbacks for the device.
  855. */
  856. if (dev_pm_smart_suspend_and_suspended(dev)) {
  857. dev_pm_skip_next_resume_phases(dev);
  858. return 0;
  859. }
  860. if (pcibios_pm_ops.thaw_noirq) {
  861. error = pcibios_pm_ops.thaw_noirq(dev);
  862. if (error)
  863. return error;
  864. }
  865. if (pci_has_legacy_pm_support(pci_dev))
  866. return pci_legacy_resume_early(dev);
  867. /*
  868. * pci_restore_state() requires the device to be in D0 (because of MSI
  869. * restoration among other things), so force it into D0 in case the
  870. * driver's "freeze" callbacks put it into a low-power state directly.
  871. */
  872. pci_set_power_state(pci_dev, PCI_D0);
  873. pci_restore_state(pci_dev);
  874. if (drv && drv->pm && drv->pm->thaw_noirq)
  875. error = drv->pm->thaw_noirq(dev);
  876. return error;
  877. }
  878. static int pci_pm_thaw(struct device *dev)
  879. {
  880. struct pci_dev *pci_dev = to_pci_dev(dev);
  881. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  882. int error = 0;
  883. if (pci_has_legacy_pm_support(pci_dev))
  884. return pci_legacy_resume(dev);
  885. if (pm) {
  886. if (pm->thaw)
  887. error = pm->thaw(dev);
  888. } else {
  889. pci_pm_reenable_device(pci_dev);
  890. }
  891. pci_dev->state_saved = false;
  892. return error;
  893. }
  894. static int pci_pm_poweroff(struct device *dev)
  895. {
  896. struct pci_dev *pci_dev = to_pci_dev(dev);
  897. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  898. if (pci_has_legacy_pm_support(pci_dev))
  899. return pci_legacy_suspend(dev, PMSG_HIBERNATE);
  900. if (!pm) {
  901. pci_pm_default_suspend(pci_dev);
  902. return 0;
  903. }
  904. /* The reason to do that is the same as in pci_pm_suspend(). */
  905. if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
  906. !pci_dev_keep_suspended(pci_dev))
  907. pm_runtime_resume(dev);
  908. pci_dev->state_saved = false;
  909. if (pm->poweroff) {
  910. int error;
  911. error = pm->poweroff(dev);
  912. suspend_report_result(pm->poweroff, error);
  913. if (error)
  914. return error;
  915. }
  916. return 0;
  917. }
  918. static int pci_pm_poweroff_late(struct device *dev)
  919. {
  920. if (dev_pm_smart_suspend_and_suspended(dev))
  921. return 0;
  922. pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
  923. return pm_generic_poweroff_late(dev);
  924. }
  925. static int pci_pm_poweroff_noirq(struct device *dev)
  926. {
  927. struct pci_dev *pci_dev = to_pci_dev(dev);
  928. struct device_driver *drv = dev->driver;
  929. if (dev_pm_smart_suspend_and_suspended(dev))
  930. return 0;
  931. if (pci_has_legacy_pm_support(to_pci_dev(dev)))
  932. return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
  933. if (!drv || !drv->pm) {
  934. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  935. return 0;
  936. }
  937. if (drv->pm->poweroff_noirq) {
  938. int error;
  939. error = drv->pm->poweroff_noirq(dev);
  940. suspend_report_result(drv->pm->poweroff_noirq, error);
  941. if (error)
  942. return error;
  943. }
  944. if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev))
  945. pci_prepare_to_sleep(pci_dev);
  946. /*
  947. * The reason for doing this here is the same as for the analogous code
  948. * in pci_pm_suspend_noirq().
  949. */
  950. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  951. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  952. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  953. if (pcibios_pm_ops.poweroff_noirq)
  954. return pcibios_pm_ops.poweroff_noirq(dev);
  955. return 0;
  956. }
  957. static int pci_pm_restore_noirq(struct device *dev)
  958. {
  959. struct pci_dev *pci_dev = to_pci_dev(dev);
  960. struct device_driver *drv = dev->driver;
  961. int error = 0;
  962. /* This is analogous to the pci_pm_resume_noirq() case. */
  963. if (dev_pm_smart_suspend_and_suspended(dev))
  964. pm_runtime_set_active(dev);
  965. if (pcibios_pm_ops.restore_noirq) {
  966. error = pcibios_pm_ops.restore_noirq(dev);
  967. if (error)
  968. return error;
  969. }
  970. pci_pm_default_resume_early(pci_dev);
  971. if (pci_has_legacy_pm_support(pci_dev))
  972. return pci_legacy_resume_early(dev);
  973. if (drv && drv->pm && drv->pm->restore_noirq)
  974. error = drv->pm->restore_noirq(dev);
  975. return error;
  976. }
  977. static int pci_pm_restore(struct device *dev)
  978. {
  979. struct pci_dev *pci_dev = to_pci_dev(dev);
  980. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  981. int error = 0;
  982. /*
  983. * This is necessary for the hibernation error path in which restore is
  984. * called without restoring the standard config registers of the device.
  985. */
  986. if (pci_dev->state_saved)
  987. pci_restore_standard_config(pci_dev);
  988. if (pci_has_legacy_pm_support(pci_dev))
  989. return pci_legacy_resume(dev);
  990. pci_pm_default_resume(pci_dev);
  991. if (pm) {
  992. if (pm->restore)
  993. error = pm->restore(dev);
  994. } else {
  995. pci_pm_reenable_device(pci_dev);
  996. }
  997. return error;
  998. }
  999. #else /* !CONFIG_HIBERNATE_CALLBACKS */
  1000. #define pci_pm_freeze NULL
  1001. #define pci_pm_freeze_late NULL
  1002. #define pci_pm_freeze_noirq NULL
  1003. #define pci_pm_thaw NULL
  1004. #define pci_pm_thaw_noirq NULL
  1005. #define pci_pm_poweroff NULL
  1006. #define pci_pm_poweroff_late NULL
  1007. #define pci_pm_poweroff_noirq NULL
  1008. #define pci_pm_restore NULL
  1009. #define pci_pm_restore_noirq NULL
  1010. #endif /* !CONFIG_HIBERNATE_CALLBACKS */
  1011. #ifdef CONFIG_PM
  1012. static int pci_pm_runtime_suspend(struct device *dev)
  1013. {
  1014. struct pci_dev *pci_dev = to_pci_dev(dev);
  1015. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1016. pci_power_t prev = pci_dev->current_state;
  1017. int error;
  1018. /*
  1019. * If pci_dev->driver is not set (unbound), we leave the device in D0,
  1020. * but it may go to D3cold when the bridge above it runtime suspends.
  1021. * Save its config space in case that happens.
  1022. */
  1023. if (!pci_dev->driver) {
  1024. pci_save_state(pci_dev);
  1025. return 0;
  1026. }
  1027. if (!pm || !pm->runtime_suspend)
  1028. return -ENOSYS;
  1029. pci_dev->state_saved = false;
  1030. error = pm->runtime_suspend(dev);
  1031. if (error) {
  1032. /*
  1033. * -EBUSY and -EAGAIN is used to request the runtime PM core
  1034. * to schedule a new suspend, so log the event only with debug
  1035. * log level.
  1036. */
  1037. if (error == -EBUSY || error == -EAGAIN)
  1038. dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
  1039. pm->runtime_suspend, error);
  1040. else
  1041. dev_err(dev, "can't suspend (%pf returned %d)\n",
  1042. pm->runtime_suspend, error);
  1043. return error;
  1044. }
  1045. pci_fixup_device(pci_fixup_suspend, pci_dev);
  1046. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  1047. && pci_dev->current_state != PCI_UNKNOWN) {
  1048. WARN_ONCE(pci_dev->current_state != prev,
  1049. "PCI PM: State of device not saved by %pF\n",
  1050. pm->runtime_suspend);
  1051. return 0;
  1052. }
  1053. if (!pci_dev->state_saved) {
  1054. pci_save_state(pci_dev);
  1055. pci_finish_runtime_suspend(pci_dev);
  1056. }
  1057. return 0;
  1058. }
  1059. static int pci_pm_runtime_resume(struct device *dev)
  1060. {
  1061. int rc;
  1062. struct pci_dev *pci_dev = to_pci_dev(dev);
  1063. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1064. /*
  1065. * Restoring config space is necessary even if the device is not bound
  1066. * to a driver because although we left it in D0, it may have gone to
  1067. * D3cold when the bridge above it runtime suspended.
  1068. */
  1069. pci_restore_standard_config(pci_dev);
  1070. if (!pci_dev->driver)
  1071. return 0;
  1072. if (!pm || !pm->runtime_resume)
  1073. return -ENOSYS;
  1074. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  1075. pci_enable_wake(pci_dev, PCI_D0, false);
  1076. pci_fixup_device(pci_fixup_resume, pci_dev);
  1077. rc = pm->runtime_resume(dev);
  1078. pci_dev->runtime_d3cold = false;
  1079. return rc;
  1080. }
  1081. static int pci_pm_runtime_idle(struct device *dev)
  1082. {
  1083. struct pci_dev *pci_dev = to_pci_dev(dev);
  1084. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1085. int ret = 0;
  1086. /*
  1087. * If pci_dev->driver is not set (unbound), the device should
  1088. * always remain in D0 regardless of the runtime PM status
  1089. */
  1090. if (!pci_dev->driver)
  1091. return 0;
  1092. if (!pm)
  1093. return -ENOSYS;
  1094. if (pm->runtime_idle)
  1095. ret = pm->runtime_idle(dev);
  1096. return ret;
  1097. }
  1098. static const struct dev_pm_ops pci_dev_pm_ops = {
  1099. .prepare = pci_pm_prepare,
  1100. .complete = pci_pm_complete,
  1101. .suspend = pci_pm_suspend,
  1102. .suspend_late = pci_pm_suspend_late,
  1103. .resume = pci_pm_resume,
  1104. .freeze = pci_pm_freeze,
  1105. .freeze_late = pci_pm_freeze_late,
  1106. .thaw = pci_pm_thaw,
  1107. .poweroff = pci_pm_poweroff,
  1108. .poweroff_late = pci_pm_poweroff_late,
  1109. .restore = pci_pm_restore,
  1110. .suspend_noirq = pci_pm_suspend_noirq,
  1111. .resume_noirq = pci_pm_resume_noirq,
  1112. .freeze_noirq = pci_pm_freeze_noirq,
  1113. .thaw_noirq = pci_pm_thaw_noirq,
  1114. .poweroff_noirq = pci_pm_poweroff_noirq,
  1115. .restore_noirq = pci_pm_restore_noirq,
  1116. .runtime_suspend = pci_pm_runtime_suspend,
  1117. .runtime_resume = pci_pm_runtime_resume,
  1118. .runtime_idle = pci_pm_runtime_idle,
  1119. };
  1120. #define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
  1121. #else /* !CONFIG_PM */
  1122. #define pci_pm_runtime_suspend NULL
  1123. #define pci_pm_runtime_resume NULL
  1124. #define pci_pm_runtime_idle NULL
  1125. #define PCI_PM_OPS_PTR NULL
  1126. #endif /* !CONFIG_PM */
  1127. /**
  1128. * __pci_register_driver - register a new pci driver
  1129. * @drv: the driver structure to register
  1130. * @owner: owner module of drv
  1131. * @mod_name: module name string
  1132. *
  1133. * Adds the driver structure to the list of registered drivers.
  1134. * Returns a negative value on error, otherwise 0.
  1135. * If no error occurred, the driver remains registered even if
  1136. * no device was claimed during registration.
  1137. */
  1138. int __pci_register_driver(struct pci_driver *drv, struct module *owner,
  1139. const char *mod_name)
  1140. {
  1141. /* initialize common driver fields */
  1142. drv->driver.name = drv->name;
  1143. drv->driver.bus = &pci_bus_type;
  1144. drv->driver.owner = owner;
  1145. drv->driver.mod_name = mod_name;
  1146. drv->driver.groups = drv->groups;
  1147. spin_lock_init(&drv->dynids.lock);
  1148. INIT_LIST_HEAD(&drv->dynids.list);
  1149. /* register with core */
  1150. return driver_register(&drv->driver);
  1151. }
  1152. EXPORT_SYMBOL(__pci_register_driver);
  1153. /**
  1154. * pci_unregister_driver - unregister a pci driver
  1155. * @drv: the driver structure to unregister
  1156. *
  1157. * Deletes the driver structure from the list of registered PCI drivers,
  1158. * gives it a chance to clean up by calling its remove() function for
  1159. * each device it was responsible for, and marks those devices as
  1160. * driverless.
  1161. */
  1162. void pci_unregister_driver(struct pci_driver *drv)
  1163. {
  1164. driver_unregister(&drv->driver);
  1165. pci_free_dynids(drv);
  1166. }
  1167. EXPORT_SYMBOL(pci_unregister_driver);
  1168. static struct pci_driver pci_compat_driver = {
  1169. .name = "compat"
  1170. };
  1171. /**
  1172. * pci_dev_driver - get the pci_driver of a device
  1173. * @dev: the device to query
  1174. *
  1175. * Returns the appropriate pci_driver structure or %NULL if there is no
  1176. * registered driver for the device.
  1177. */
  1178. struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
  1179. {
  1180. if (dev->driver)
  1181. return dev->driver;
  1182. else {
  1183. int i;
  1184. for (i = 0; i <= PCI_ROM_RESOURCE; i++)
  1185. if (dev->resource[i].flags & IORESOURCE_BUSY)
  1186. return &pci_compat_driver;
  1187. }
  1188. return NULL;
  1189. }
  1190. EXPORT_SYMBOL(pci_dev_driver);
  1191. /**
  1192. * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
  1193. * @dev: the PCI device structure to match against
  1194. * @drv: the device driver to search for matching PCI device id structures
  1195. *
  1196. * Used by a driver to check whether a PCI device present in the
  1197. * system is in its list of supported devices. Returns the matching
  1198. * pci_device_id structure or %NULL if there is no match.
  1199. */
  1200. static int pci_bus_match(struct device *dev, struct device_driver *drv)
  1201. {
  1202. struct pci_dev *pci_dev = to_pci_dev(dev);
  1203. struct pci_driver *pci_drv;
  1204. const struct pci_device_id *found_id;
  1205. if (!pci_dev->match_driver)
  1206. return 0;
  1207. pci_drv = to_pci_driver(drv);
  1208. found_id = pci_match_device(pci_drv, pci_dev);
  1209. if (found_id)
  1210. return 1;
  1211. return 0;
  1212. }
  1213. /**
  1214. * pci_dev_get - increments the reference count of the pci device structure
  1215. * @dev: the device being referenced
  1216. *
  1217. * Each live reference to a device should be refcounted.
  1218. *
  1219. * Drivers for PCI devices should normally record such references in
  1220. * their probe() methods, when they bind to a device, and release
  1221. * them by calling pci_dev_put(), in their disconnect() methods.
  1222. *
  1223. * A pointer to the device with the incremented reference counter is returned.
  1224. */
  1225. struct pci_dev *pci_dev_get(struct pci_dev *dev)
  1226. {
  1227. if (dev)
  1228. get_device(&dev->dev);
  1229. return dev;
  1230. }
  1231. EXPORT_SYMBOL(pci_dev_get);
  1232. /**
  1233. * pci_dev_put - release a use of the pci device structure
  1234. * @dev: device that's been disconnected
  1235. *
  1236. * Must be called when a user of a device is finished with it. When the last
  1237. * user of the device calls this function, the memory of the device is freed.
  1238. */
  1239. void pci_dev_put(struct pci_dev *dev)
  1240. {
  1241. if (dev)
  1242. put_device(&dev->dev);
  1243. }
  1244. EXPORT_SYMBOL(pci_dev_put);
  1245. static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
  1246. {
  1247. struct pci_dev *pdev;
  1248. if (!dev)
  1249. return -ENODEV;
  1250. pdev = to_pci_dev(dev);
  1251. if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
  1252. return -ENOMEM;
  1253. if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
  1254. return -ENOMEM;
  1255. if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
  1256. pdev->subsystem_device))
  1257. return -ENOMEM;
  1258. if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
  1259. return -ENOMEM;
  1260. if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
  1261. pdev->vendor, pdev->device,
  1262. pdev->subsystem_vendor, pdev->subsystem_device,
  1263. (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
  1264. (u8)(pdev->class)))
  1265. return -ENOMEM;
  1266. return 0;
  1267. }
  1268. #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
  1269. /**
  1270. * pci_uevent_ers - emit a uevent during recovery path of PCI device
  1271. * @pdev: PCI device undergoing error recovery
  1272. * @err_type: type of error event
  1273. */
  1274. void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
  1275. {
  1276. int idx = 0;
  1277. char *envp[3];
  1278. switch (err_type) {
  1279. case PCI_ERS_RESULT_NONE:
  1280. case PCI_ERS_RESULT_CAN_RECOVER:
  1281. envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
  1282. envp[idx++] = "DEVICE_ONLINE=0";
  1283. break;
  1284. case PCI_ERS_RESULT_RECOVERED:
  1285. envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY";
  1286. envp[idx++] = "DEVICE_ONLINE=1";
  1287. break;
  1288. case PCI_ERS_RESULT_DISCONNECT:
  1289. envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY";
  1290. envp[idx++] = "DEVICE_ONLINE=0";
  1291. break;
  1292. default:
  1293. break;
  1294. }
  1295. if (idx > 0) {
  1296. envp[idx++] = NULL;
  1297. kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
  1298. }
  1299. }
  1300. #endif
  1301. static int pci_bus_num_vf(struct device *dev)
  1302. {
  1303. return pci_num_vf(to_pci_dev(dev));
  1304. }
  1305. /**
  1306. * pci_dma_configure - Setup DMA configuration
  1307. * @dev: ptr to dev structure
  1308. *
  1309. * Function to update PCI devices's DMA configuration using the same
  1310. * info from the OF node or ACPI node of host bridge's parent (if any).
  1311. */
  1312. static int pci_dma_configure(struct device *dev)
  1313. {
  1314. struct device *bridge;
  1315. int ret = 0;
  1316. bridge = pci_get_host_bridge_device(to_pci_dev(dev));
  1317. if (IS_ENABLED(CONFIG_OF) && bridge->parent &&
  1318. bridge->parent->of_node) {
  1319. ret = of_dma_configure(dev, bridge->parent->of_node, true);
  1320. } else if (has_acpi_companion(bridge)) {
  1321. struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
  1322. enum dev_dma_attr attr = acpi_get_dma_attr(adev);
  1323. if (attr != DEV_DMA_NOT_SUPPORTED)
  1324. ret = acpi_dma_configure(dev, attr);
  1325. }
  1326. pci_put_host_bridge_device(bridge);
  1327. return ret;
  1328. }
  1329. struct bus_type pci_bus_type = {
  1330. .name = "pci",
  1331. .match = pci_bus_match,
  1332. .uevent = pci_uevent,
  1333. .probe = pci_device_probe,
  1334. .remove = pci_device_remove,
  1335. .shutdown = pci_device_shutdown,
  1336. .dev_groups = pci_dev_groups,
  1337. .bus_groups = pci_bus_groups,
  1338. .drv_groups = pci_drv_groups,
  1339. .pm = PCI_PM_OPS_PTR,
  1340. .num_vf = pci_bus_num_vf,
  1341. .dma_configure = pci_dma_configure,
  1342. };
  1343. EXPORT_SYMBOL(pci_bus_type);
  1344. #ifdef CONFIG_PCIEPORTBUS
  1345. static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
  1346. {
  1347. struct pcie_device *pciedev;
  1348. struct pcie_port_service_driver *driver;
  1349. if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
  1350. return 0;
  1351. pciedev = to_pcie_device(dev);
  1352. driver = to_service_driver(drv);
  1353. if (driver->service != pciedev->service)
  1354. return 0;
  1355. if (driver->port_type != PCIE_ANY_PORT &&
  1356. driver->port_type != pci_pcie_type(pciedev->port))
  1357. return 0;
  1358. return 1;
  1359. }
  1360. struct bus_type pcie_port_bus_type = {
  1361. .name = "pci_express",
  1362. .match = pcie_port_bus_match,
  1363. };
  1364. EXPORT_SYMBOL_GPL(pcie_port_bus_type);
  1365. #endif
  1366. static int __init pci_driver_init(void)
  1367. {
  1368. int ret;
  1369. ret = bus_register(&pci_bus_type);
  1370. if (ret)
  1371. return ret;
  1372. #ifdef CONFIG_PCIEPORTBUS
  1373. ret = bus_register(&pcie_port_bus_type);
  1374. if (ret)
  1375. return ret;
  1376. #endif
  1377. dma_debug_add_bus(&pci_bus_type);
  1378. return 0;
  1379. }
  1380. postcore_initcall(pci_driver_init);