pci-driver.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
  4. * (C) Copyright 2007 Novell Inc.
  5. */
  6. #include <linux/pci.h>
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/device.h>
  10. #include <linux/mempolicy.h>
  11. #include <linux/string.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/cpu.h>
  15. #include <linux/pm_runtime.h>
  16. #include <linux/suspend.h>
  17. #include <linux/kexec.h>
  18. #include <linux/of_device.h>
  19. #include <linux/acpi.h>
  20. #include "pci.h"
  21. #include "pcie/portdrv.h"
  22. struct pci_dynid {
  23. struct list_head node;
  24. struct pci_device_id id;
  25. };
  26. /**
  27. * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
  28. * @drv: target pci driver
  29. * @vendor: PCI vendor ID
  30. * @device: PCI device ID
  31. * @subvendor: PCI subvendor ID
  32. * @subdevice: PCI subdevice ID
  33. * @class: PCI class
  34. * @class_mask: PCI class mask
  35. * @driver_data: private driver data
  36. *
  37. * Adds a new dynamic pci device ID to this driver and causes the
  38. * driver to probe for all devices again. @drv must have been
  39. * registered prior to calling this function.
  40. *
  41. * CONTEXT:
  42. * Does GFP_KERNEL allocation.
  43. *
  44. * RETURNS:
  45. * 0 on success, -errno on failure.
  46. */
  47. int pci_add_dynid(struct pci_driver *drv,
  48. unsigned int vendor, unsigned int device,
  49. unsigned int subvendor, unsigned int subdevice,
  50. unsigned int class, unsigned int class_mask,
  51. unsigned long driver_data)
  52. {
  53. struct pci_dynid *dynid;
  54. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  55. if (!dynid)
  56. return -ENOMEM;
  57. dynid->id.vendor = vendor;
  58. dynid->id.device = device;
  59. dynid->id.subvendor = subvendor;
  60. dynid->id.subdevice = subdevice;
  61. dynid->id.class = class;
  62. dynid->id.class_mask = class_mask;
  63. dynid->id.driver_data = driver_data;
  64. spin_lock(&drv->dynids.lock);
  65. list_add_tail(&dynid->node, &drv->dynids.list);
  66. spin_unlock(&drv->dynids.lock);
  67. return driver_attach(&drv->driver);
  68. }
  69. EXPORT_SYMBOL_GPL(pci_add_dynid);
  70. static void pci_free_dynids(struct pci_driver *drv)
  71. {
  72. struct pci_dynid *dynid, *n;
  73. spin_lock(&drv->dynids.lock);
  74. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  75. list_del(&dynid->node);
  76. kfree(dynid);
  77. }
  78. spin_unlock(&drv->dynids.lock);
  79. }
  80. /**
  81. * store_new_id - sysfs frontend to pci_add_dynid()
  82. * @driver: target device driver
  83. * @buf: buffer for scanning device ID data
  84. * @count: input size
  85. *
  86. * Allow PCI IDs to be added to an existing driver via sysfs.
  87. */
  88. static ssize_t new_id_store(struct device_driver *driver, const char *buf,
  89. size_t count)
  90. {
  91. struct pci_driver *pdrv = to_pci_driver(driver);
  92. const struct pci_device_id *ids = pdrv->id_table;
  93. __u32 vendor, device, subvendor = PCI_ANY_ID,
  94. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  95. unsigned long driver_data = 0;
  96. int fields = 0;
  97. int retval = 0;
  98. fields = sscanf(buf, "%x %x %x %x %x %x %lx",
  99. &vendor, &device, &subvendor, &subdevice,
  100. &class, &class_mask, &driver_data);
  101. if (fields < 2)
  102. return -EINVAL;
  103. if (fields != 7) {
  104. struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
  105. if (!pdev)
  106. return -ENOMEM;
  107. pdev->vendor = vendor;
  108. pdev->device = device;
  109. pdev->subsystem_vendor = subvendor;
  110. pdev->subsystem_device = subdevice;
  111. pdev->class = class;
  112. if (pci_match_id(pdrv->id_table, pdev))
  113. retval = -EEXIST;
  114. kfree(pdev);
  115. if (retval)
  116. return retval;
  117. }
  118. /* Only accept driver_data values that match an existing id_table
  119. entry */
  120. if (ids) {
  121. retval = -EINVAL;
  122. while (ids->vendor || ids->subvendor || ids->class_mask) {
  123. if (driver_data == ids->driver_data) {
  124. retval = 0;
  125. break;
  126. }
  127. ids++;
  128. }
  129. if (retval) /* No match */
  130. return retval;
  131. }
  132. retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
  133. class, class_mask, driver_data);
  134. if (retval)
  135. return retval;
  136. return count;
  137. }
  138. static DRIVER_ATTR_WO(new_id);
  139. /**
  140. * store_remove_id - remove a PCI device ID from this driver
  141. * @driver: target device driver
  142. * @buf: buffer for scanning device ID data
  143. * @count: input size
  144. *
  145. * Removes a dynamic pci device ID to this driver.
  146. */
  147. static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
  148. size_t count)
  149. {
  150. struct pci_dynid *dynid, *n;
  151. struct pci_driver *pdrv = to_pci_driver(driver);
  152. __u32 vendor, device, subvendor = PCI_ANY_ID,
  153. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  154. int fields = 0;
  155. size_t retval = -ENODEV;
  156. fields = sscanf(buf, "%x %x %x %x %x %x",
  157. &vendor, &device, &subvendor, &subdevice,
  158. &class, &class_mask);
  159. if (fields < 2)
  160. return -EINVAL;
  161. spin_lock(&pdrv->dynids.lock);
  162. list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
  163. struct pci_device_id *id = &dynid->id;
  164. if ((id->vendor == vendor) &&
  165. (id->device == device) &&
  166. (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
  167. (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
  168. !((id->class ^ class) & class_mask)) {
  169. list_del(&dynid->node);
  170. kfree(dynid);
  171. retval = count;
  172. break;
  173. }
  174. }
  175. spin_unlock(&pdrv->dynids.lock);
  176. return retval;
  177. }
  178. static DRIVER_ATTR_WO(remove_id);
  179. static struct attribute *pci_drv_attrs[] = {
  180. &driver_attr_new_id.attr,
  181. &driver_attr_remove_id.attr,
  182. NULL,
  183. };
  184. ATTRIBUTE_GROUPS(pci_drv);
  185. /**
  186. * pci_match_id - See if a pci device matches a given pci_id table
  187. * @ids: array of PCI device id structures to search in
  188. * @dev: the PCI device structure to match against.
  189. *
  190. * Used by a driver to check whether a PCI device present in the
  191. * system is in its list of supported devices. Returns the matching
  192. * pci_device_id structure or %NULL if there is no match.
  193. *
  194. * Deprecated, don't use this as it will not catch any dynamic ids
  195. * that a driver might want to check for.
  196. */
  197. const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
  198. struct pci_dev *dev)
  199. {
  200. if (ids) {
  201. while (ids->vendor || ids->subvendor || ids->class_mask) {
  202. if (pci_match_one_device(ids, dev))
  203. return ids;
  204. ids++;
  205. }
  206. }
  207. return NULL;
  208. }
  209. EXPORT_SYMBOL(pci_match_id);
  210. static const struct pci_device_id pci_device_id_any = {
  211. .vendor = PCI_ANY_ID,
  212. .device = PCI_ANY_ID,
  213. .subvendor = PCI_ANY_ID,
  214. .subdevice = PCI_ANY_ID,
  215. };
  216. /**
  217. * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
  218. * @drv: the PCI driver to match against
  219. * @dev: the PCI device structure to match against
  220. *
  221. * Used by a driver to check whether a PCI device present in the
  222. * system is in its list of supported devices. Returns the matching
  223. * pci_device_id structure or %NULL if there is no match.
  224. */
  225. static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
  226. struct pci_dev *dev)
  227. {
  228. struct pci_dynid *dynid;
  229. const struct pci_device_id *found_id = NULL;
  230. /* When driver_override is set, only bind to the matching driver */
  231. if (dev->driver_override && strcmp(dev->driver_override, drv->name))
  232. return NULL;
  233. /* Look at the dynamic ids first, before the static ones */
  234. spin_lock(&drv->dynids.lock);
  235. list_for_each_entry(dynid, &drv->dynids.list, node) {
  236. if (pci_match_one_device(&dynid->id, dev)) {
  237. found_id = &dynid->id;
  238. break;
  239. }
  240. }
  241. spin_unlock(&drv->dynids.lock);
  242. if (!found_id)
  243. found_id = pci_match_id(drv->id_table, dev);
  244. /* driver_override will always match, send a dummy id */
  245. if (!found_id && dev->driver_override)
  246. found_id = &pci_device_id_any;
  247. return found_id;
  248. }
  249. struct drv_dev_and_id {
  250. struct pci_driver *drv;
  251. struct pci_dev *dev;
  252. const struct pci_device_id *id;
  253. };
  254. static long local_pci_probe(void *_ddi)
  255. {
  256. struct drv_dev_and_id *ddi = _ddi;
  257. struct pci_dev *pci_dev = ddi->dev;
  258. struct pci_driver *pci_drv = ddi->drv;
  259. struct device *dev = &pci_dev->dev;
  260. int rc;
  261. /*
  262. * Unbound PCI devices are always put in D0, regardless of
  263. * runtime PM status. During probe, the device is set to
  264. * active and the usage count is incremented. If the driver
  265. * supports runtime PM, it should call pm_runtime_put_noidle(),
  266. * or any other runtime PM helper function decrementing the usage
  267. * count, in its probe routine and pm_runtime_get_noresume() in
  268. * its remove routine.
  269. */
  270. pm_runtime_get_sync(dev);
  271. pci_dev->driver = pci_drv;
  272. rc = pci_drv->probe(pci_dev, ddi->id);
  273. if (!rc)
  274. return rc;
  275. if (rc < 0) {
  276. pci_dev->driver = NULL;
  277. pm_runtime_put_sync(dev);
  278. return rc;
  279. }
  280. /*
  281. * Probe function should return < 0 for failure, 0 for success
  282. * Treat values > 0 as success, but warn.
  283. */
  284. dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
  285. return 0;
  286. }
  287. static bool pci_physfn_is_probed(struct pci_dev *dev)
  288. {
  289. #ifdef CONFIG_PCI_IOV
  290. return dev->is_virtfn && dev->physfn->is_probed;
  291. #else
  292. return false;
  293. #endif
  294. }
  295. static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
  296. const struct pci_device_id *id)
  297. {
  298. int error, node, cpu;
  299. struct drv_dev_and_id ddi = { drv, dev, id };
  300. /*
  301. * Execute driver initialization on node where the device is
  302. * attached. This way the driver likely allocates its local memory
  303. * on the right node.
  304. */
  305. node = dev_to_node(&dev->dev);
  306. dev->is_probed = 1;
  307. cpu_hotplug_disable();
  308. /*
  309. * Prevent nesting work_on_cpu() for the case where a Virtual Function
  310. * device is probed from work_on_cpu() of the Physical device.
  311. */
  312. if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
  313. pci_physfn_is_probed(dev))
  314. cpu = nr_cpu_ids;
  315. else
  316. cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
  317. if (cpu < nr_cpu_ids)
  318. error = work_on_cpu(cpu, local_pci_probe, &ddi);
  319. else
  320. error = local_pci_probe(&ddi);
  321. dev->is_probed = 0;
  322. cpu_hotplug_enable();
  323. return error;
  324. }
  325. /**
  326. * __pci_device_probe - check if a driver wants to claim a specific PCI device
  327. * @drv: driver to call to check if it wants the PCI device
  328. * @pci_dev: PCI device being probed
  329. *
  330. * returns 0 on success, else error.
  331. * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
  332. */
  333. static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
  334. {
  335. const struct pci_device_id *id;
  336. int error = 0;
  337. if (!pci_dev->driver && drv->probe) {
  338. error = -ENODEV;
  339. id = pci_match_device(drv, pci_dev);
  340. if (id)
  341. error = pci_call_probe(drv, pci_dev, id);
  342. }
  343. return error;
  344. }
  345. int __weak pcibios_alloc_irq(struct pci_dev *dev)
  346. {
  347. return 0;
  348. }
  349. void __weak pcibios_free_irq(struct pci_dev *dev)
  350. {
  351. }
  352. #ifdef CONFIG_PCI_IOV
  353. static inline bool pci_device_can_probe(struct pci_dev *pdev)
  354. {
  355. return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
  356. }
  357. #else
  358. static inline bool pci_device_can_probe(struct pci_dev *pdev)
  359. {
  360. return true;
  361. }
  362. #endif
  363. static int pci_device_probe(struct device *dev)
  364. {
  365. int error;
  366. struct pci_dev *pci_dev = to_pci_dev(dev);
  367. struct pci_driver *drv = to_pci_driver(dev->driver);
  368. pci_assign_irq(pci_dev);
  369. error = pcibios_alloc_irq(pci_dev);
  370. if (error < 0)
  371. return error;
  372. pci_dev_get(pci_dev);
  373. if (pci_device_can_probe(pci_dev)) {
  374. error = __pci_device_probe(drv, pci_dev);
  375. if (error) {
  376. pcibios_free_irq(pci_dev);
  377. pci_dev_put(pci_dev);
  378. }
  379. }
  380. return error;
  381. }
  382. static int pci_device_remove(struct device *dev)
  383. {
  384. struct pci_dev *pci_dev = to_pci_dev(dev);
  385. struct pci_driver *drv = pci_dev->driver;
  386. if (drv) {
  387. if (drv->remove) {
  388. pm_runtime_get_sync(dev);
  389. drv->remove(pci_dev);
  390. pm_runtime_put_noidle(dev);
  391. }
  392. pcibios_free_irq(pci_dev);
  393. pci_dev->driver = NULL;
  394. }
  395. /* Undo the runtime PM settings in local_pci_probe() */
  396. pm_runtime_put_sync(dev);
  397. /*
  398. * If the device is still on, set the power state as "unknown",
  399. * since it might change by the next time we load the driver.
  400. */
  401. if (pci_dev->current_state == PCI_D0)
  402. pci_dev->current_state = PCI_UNKNOWN;
  403. /*
  404. * We would love to complain here if pci_dev->is_enabled is set, that
  405. * the driver should have called pci_disable_device(), but the
  406. * unfortunate fact is there are too many odd BIOS and bridge setups
  407. * that don't like drivers doing that all of the time.
  408. * Oh well, we can dream of sane hardware when we sleep, no matter how
  409. * horrible the crap we have to deal with is when we are awake...
  410. */
  411. pci_dev_put(pci_dev);
  412. return 0;
  413. }
  414. static void pci_device_shutdown(struct device *dev)
  415. {
  416. struct pci_dev *pci_dev = to_pci_dev(dev);
  417. struct pci_driver *drv = pci_dev->driver;
  418. pm_runtime_resume(dev);
  419. if (drv && drv->shutdown)
  420. drv->shutdown(pci_dev);
  421. /*
  422. * If this is a kexec reboot, turn off Bus Master bit on the
  423. * device to tell it to not continue to do DMA. Don't touch
  424. * devices in D3cold or unknown states.
  425. * If it is not a kexec reboot, firmware will hit the PCI
  426. * devices with big hammer and stop their DMA any way.
  427. */
  428. if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
  429. pci_clear_master(pci_dev);
  430. }
  431. #ifdef CONFIG_PM
  432. /* Auxiliary functions used for system resume and run-time resume. */
  433. /**
  434. * pci_restore_standard_config - restore standard config registers of PCI device
  435. * @pci_dev: PCI device to handle
  436. */
  437. static int pci_restore_standard_config(struct pci_dev *pci_dev)
  438. {
  439. pci_update_current_state(pci_dev, PCI_UNKNOWN);
  440. if (pci_dev->current_state != PCI_D0) {
  441. int error = pci_set_power_state(pci_dev, PCI_D0);
  442. if (error)
  443. return error;
  444. }
  445. pci_restore_state(pci_dev);
  446. pci_pme_restore(pci_dev);
  447. return 0;
  448. }
  449. #endif
  450. #ifdef CONFIG_PM_SLEEP
  451. static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
  452. {
  453. pci_power_up(pci_dev);
  454. pci_restore_state(pci_dev);
  455. pci_pme_restore(pci_dev);
  456. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  457. }
  458. /*
  459. * Default "suspend" method for devices that have no driver provided suspend,
  460. * or not even a driver at all (second part).
  461. */
  462. static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
  463. {
  464. /*
  465. * mark its power state as "unknown", since we don't know if
  466. * e.g. the BIOS will change its device state when we suspend.
  467. */
  468. if (pci_dev->current_state == PCI_D0)
  469. pci_dev->current_state = PCI_UNKNOWN;
  470. }
  471. /*
  472. * Default "resume" method for devices that have no driver provided resume,
  473. * or not even a driver at all (second part).
  474. */
  475. static int pci_pm_reenable_device(struct pci_dev *pci_dev)
  476. {
  477. int retval;
  478. /* if the device was enabled before suspend, reenable */
  479. retval = pci_reenable_device(pci_dev);
  480. /*
  481. * if the device was busmaster before the suspend, make it busmaster
  482. * again
  483. */
  484. if (pci_dev->is_busmaster)
  485. pci_set_master(pci_dev);
  486. return retval;
  487. }
  488. static int pci_legacy_suspend(struct device *dev, pm_message_t state)
  489. {
  490. struct pci_dev *pci_dev = to_pci_dev(dev);
  491. struct pci_driver *drv = pci_dev->driver;
  492. if (drv && drv->suspend) {
  493. pci_power_t prev = pci_dev->current_state;
  494. int error;
  495. error = drv->suspend(pci_dev, state);
  496. suspend_report_result(drv->suspend, error);
  497. if (error)
  498. return error;
  499. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  500. && pci_dev->current_state != PCI_UNKNOWN) {
  501. WARN_ONCE(pci_dev->current_state != prev,
  502. "PCI PM: Device state not saved by %pF\n",
  503. drv->suspend);
  504. }
  505. }
  506. pci_fixup_device(pci_fixup_suspend, pci_dev);
  507. return 0;
  508. }
  509. static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
  510. {
  511. struct pci_dev *pci_dev = to_pci_dev(dev);
  512. struct pci_driver *drv = pci_dev->driver;
  513. if (drv && drv->suspend_late) {
  514. pci_power_t prev = pci_dev->current_state;
  515. int error;
  516. error = drv->suspend_late(pci_dev, state);
  517. suspend_report_result(drv->suspend_late, error);
  518. if (error)
  519. return error;
  520. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  521. && pci_dev->current_state != PCI_UNKNOWN) {
  522. WARN_ONCE(pci_dev->current_state != prev,
  523. "PCI PM: Device state not saved by %pF\n",
  524. drv->suspend_late);
  525. goto Fixup;
  526. }
  527. }
  528. if (!pci_dev->state_saved)
  529. pci_save_state(pci_dev);
  530. pci_pm_set_unknown_state(pci_dev);
  531. Fixup:
  532. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  533. return 0;
  534. }
  535. static int pci_legacy_resume_early(struct device *dev)
  536. {
  537. struct pci_dev *pci_dev = to_pci_dev(dev);
  538. struct pci_driver *drv = pci_dev->driver;
  539. return drv && drv->resume_early ?
  540. drv->resume_early(pci_dev) : 0;
  541. }
  542. static int pci_legacy_resume(struct device *dev)
  543. {
  544. struct pci_dev *pci_dev = to_pci_dev(dev);
  545. struct pci_driver *drv = pci_dev->driver;
  546. pci_fixup_device(pci_fixup_resume, pci_dev);
  547. return drv && drv->resume ?
  548. drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
  549. }
  550. /* Auxiliary functions used by the new power management framework */
  551. static void pci_pm_default_resume(struct pci_dev *pci_dev)
  552. {
  553. pci_fixup_device(pci_fixup_resume, pci_dev);
  554. pci_enable_wake(pci_dev, PCI_D0, false);
  555. }
  556. static void pci_pm_default_suspend(struct pci_dev *pci_dev)
  557. {
  558. /* Disable non-bridge devices without PM support */
  559. if (!pci_has_subordinate(pci_dev))
  560. pci_disable_enabled_device(pci_dev);
  561. }
  562. static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
  563. {
  564. struct pci_driver *drv = pci_dev->driver;
  565. bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
  566. || drv->resume_early);
  567. /*
  568. * Legacy PM support is used by default, so warn if the new framework is
  569. * supported as well. Drivers are supposed to support either the
  570. * former, or the latter, but not both at the same time.
  571. */
  572. WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
  573. drv->name, pci_dev->vendor, pci_dev->device);
  574. return ret;
  575. }
  576. /* New power management framework */
  577. static int pci_pm_prepare(struct device *dev)
  578. {
  579. struct device_driver *drv = dev->driver;
  580. if (drv && drv->pm && drv->pm->prepare) {
  581. int error = drv->pm->prepare(dev);
  582. if (error < 0)
  583. return error;
  584. if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
  585. return 0;
  586. }
  587. return pci_dev_keep_suspended(to_pci_dev(dev));
  588. }
  589. static void pci_pm_complete(struct device *dev)
  590. {
  591. struct pci_dev *pci_dev = to_pci_dev(dev);
  592. pci_dev_complete_resume(pci_dev);
  593. pm_generic_complete(dev);
  594. /* Resume device if platform firmware has put it in reset-power-on */
  595. if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
  596. pci_power_t pre_sleep_state = pci_dev->current_state;
  597. pci_update_current_state(pci_dev, pci_dev->current_state);
  598. if (pci_dev->current_state < pre_sleep_state)
  599. pm_request_resume(dev);
  600. }
  601. }
  602. #else /* !CONFIG_PM_SLEEP */
  603. #define pci_pm_prepare NULL
  604. #define pci_pm_complete NULL
  605. #endif /* !CONFIG_PM_SLEEP */
  606. #ifdef CONFIG_SUSPEND
  607. static void pcie_pme_root_status_cleanup(struct pci_dev *pci_dev)
  608. {
  609. /*
  610. * Some BIOSes forget to clear Root PME Status bits after system
  611. * wakeup, which breaks ACPI-based runtime wakeup on PCI Express.
  612. * Clear those bits now just in case (shouldn't hurt).
  613. */
  614. if (pci_is_pcie(pci_dev) &&
  615. (pci_pcie_type(pci_dev) == PCI_EXP_TYPE_ROOT_PORT ||
  616. pci_pcie_type(pci_dev) == PCI_EXP_TYPE_RC_EC))
  617. pcie_clear_root_pme_status(pci_dev);
  618. }
  619. static int pci_pm_suspend(struct device *dev)
  620. {
  621. struct pci_dev *pci_dev = to_pci_dev(dev);
  622. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  623. if (pci_has_legacy_pm_support(pci_dev))
  624. return pci_legacy_suspend(dev, PMSG_SUSPEND);
  625. if (!pm) {
  626. pci_pm_default_suspend(pci_dev);
  627. return 0;
  628. }
  629. /*
  630. * PCI devices suspended at run time may need to be resumed at this
  631. * point, because in general it may be necessary to reconfigure them for
  632. * system suspend. Namely, if the device is expected to wake up the
  633. * system from the sleep state, it may have to be reconfigured for this
  634. * purpose, or if the device is not expected to wake up the system from
  635. * the sleep state, it should be prevented from signaling wakeup events
  636. * going forward.
  637. *
  638. * Also if the driver of the device does not indicate that its system
  639. * suspend callbacks can cope with runtime-suspended devices, it is
  640. * better to resume the device from runtime suspend here.
  641. */
  642. if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
  643. !pci_dev_keep_suspended(pci_dev)) {
  644. pm_runtime_resume(dev);
  645. pci_dev->state_saved = false;
  646. }
  647. if (pm->suspend) {
  648. pci_power_t prev = pci_dev->current_state;
  649. int error;
  650. error = pm->suspend(dev);
  651. suspend_report_result(pm->suspend, error);
  652. if (error)
  653. return error;
  654. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  655. && pci_dev->current_state != PCI_UNKNOWN) {
  656. WARN_ONCE(pci_dev->current_state != prev,
  657. "PCI PM: State of device not saved by %pF\n",
  658. pm->suspend);
  659. }
  660. }
  661. return 0;
  662. }
  663. static int pci_pm_suspend_late(struct device *dev)
  664. {
  665. if (dev_pm_smart_suspend_and_suspended(dev))
  666. return 0;
  667. pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
  668. return pm_generic_suspend_late(dev);
  669. }
  670. static int pci_pm_suspend_noirq(struct device *dev)
  671. {
  672. struct pci_dev *pci_dev = to_pci_dev(dev);
  673. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  674. if (dev_pm_smart_suspend_and_suspended(dev)) {
  675. dev->power.may_skip_resume = true;
  676. return 0;
  677. }
  678. if (pci_has_legacy_pm_support(pci_dev))
  679. return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
  680. if (!pm) {
  681. pci_save_state(pci_dev);
  682. goto Fixup;
  683. }
  684. if (pm->suspend_noirq) {
  685. pci_power_t prev = pci_dev->current_state;
  686. int error;
  687. error = pm->suspend_noirq(dev);
  688. suspend_report_result(pm->suspend_noirq, error);
  689. if (error)
  690. return error;
  691. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  692. && pci_dev->current_state != PCI_UNKNOWN) {
  693. WARN_ONCE(pci_dev->current_state != prev,
  694. "PCI PM: State of device not saved by %pF\n",
  695. pm->suspend_noirq);
  696. goto Fixup;
  697. }
  698. }
  699. if (!pci_dev->state_saved) {
  700. pci_save_state(pci_dev);
  701. if (pci_power_manageable(pci_dev))
  702. pci_prepare_to_sleep(pci_dev);
  703. }
  704. dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
  705. pci_power_name(pci_dev->current_state));
  706. pci_pm_set_unknown_state(pci_dev);
  707. /*
  708. * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
  709. * PCI COMMAND register isn't 0, the BIOS assumes that the controller
  710. * hasn't been quiesced and tries to turn it off. If the controller
  711. * is already in D3, this can hang or cause memory corruption.
  712. *
  713. * Since the value of the COMMAND register doesn't matter once the
  714. * device has been suspended, we can safely set it to 0 here.
  715. */
  716. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  717. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  718. Fixup:
  719. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  720. /*
  721. * If the target system sleep state is suspend-to-idle, it is sufficient
  722. * to check whether or not the device's wakeup settings are good for
  723. * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
  724. * pci_pm_complete() to take care of fixing up the device's state
  725. * anyway, if need be.
  726. */
  727. dev->power.may_skip_resume = device_may_wakeup(dev) ||
  728. !device_can_wakeup(dev);
  729. return 0;
  730. }
  731. static int pci_pm_resume_noirq(struct device *dev)
  732. {
  733. struct pci_dev *pci_dev = to_pci_dev(dev);
  734. struct device_driver *drv = dev->driver;
  735. int error = 0;
  736. if (dev_pm_may_skip_resume(dev))
  737. return 0;
  738. /*
  739. * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
  740. * during system suspend, so update their runtime PM status to "active"
  741. * as they are going to be put into D0 shortly.
  742. */
  743. if (dev_pm_smart_suspend_and_suspended(dev))
  744. pm_runtime_set_active(dev);
  745. pci_pm_default_resume_early(pci_dev);
  746. if (pci_has_legacy_pm_support(pci_dev))
  747. return pci_legacy_resume_early(dev);
  748. pcie_pme_root_status_cleanup(pci_dev);
  749. if (drv && drv->pm && drv->pm->resume_noirq)
  750. error = drv->pm->resume_noirq(dev);
  751. return error;
  752. }
  753. static int pci_pm_resume(struct device *dev)
  754. {
  755. struct pci_dev *pci_dev = to_pci_dev(dev);
  756. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  757. int error = 0;
  758. /*
  759. * This is necessary for the suspend error path in which resume is
  760. * called without restoring the standard config registers of the device.
  761. */
  762. if (pci_dev->state_saved)
  763. pci_restore_standard_config(pci_dev);
  764. if (pci_has_legacy_pm_support(pci_dev))
  765. return pci_legacy_resume(dev);
  766. pci_pm_default_resume(pci_dev);
  767. if (pm) {
  768. if (pm->resume)
  769. error = pm->resume(dev);
  770. } else {
  771. pci_pm_reenable_device(pci_dev);
  772. }
  773. return error;
  774. }
  775. #else /* !CONFIG_SUSPEND */
  776. #define pci_pm_suspend NULL
  777. #define pci_pm_suspend_late NULL
  778. #define pci_pm_suspend_noirq NULL
  779. #define pci_pm_resume NULL
  780. #define pci_pm_resume_noirq NULL
  781. #endif /* !CONFIG_SUSPEND */
  782. #ifdef CONFIG_HIBERNATE_CALLBACKS
  783. /*
  784. * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
  785. * a hibernate transition
  786. */
  787. struct dev_pm_ops __weak pcibios_pm_ops;
  788. static int pci_pm_freeze(struct device *dev)
  789. {
  790. struct pci_dev *pci_dev = to_pci_dev(dev);
  791. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  792. if (pci_has_legacy_pm_support(pci_dev))
  793. return pci_legacy_suspend(dev, PMSG_FREEZE);
  794. if (!pm) {
  795. pci_pm_default_suspend(pci_dev);
  796. return 0;
  797. }
  798. /*
  799. * This used to be done in pci_pm_prepare() for all devices and some
  800. * drivers may depend on it, so do it here. Ideally, runtime-suspended
  801. * devices should not be touched during freeze/thaw transitions,
  802. * however.
  803. */
  804. if (!dev_pm_smart_suspend_and_suspended(dev)) {
  805. pm_runtime_resume(dev);
  806. pci_dev->state_saved = false;
  807. }
  808. if (pm->freeze) {
  809. int error;
  810. error = pm->freeze(dev);
  811. suspend_report_result(pm->freeze, error);
  812. if (error)
  813. return error;
  814. }
  815. return 0;
  816. }
  817. static int pci_pm_freeze_late(struct device *dev)
  818. {
  819. if (dev_pm_smart_suspend_and_suspended(dev))
  820. return 0;
  821. return pm_generic_freeze_late(dev);
  822. }
  823. static int pci_pm_freeze_noirq(struct device *dev)
  824. {
  825. struct pci_dev *pci_dev = to_pci_dev(dev);
  826. struct device_driver *drv = dev->driver;
  827. if (dev_pm_smart_suspend_and_suspended(dev))
  828. return 0;
  829. if (pci_has_legacy_pm_support(pci_dev))
  830. return pci_legacy_suspend_late(dev, PMSG_FREEZE);
  831. if (drv && drv->pm && drv->pm->freeze_noirq) {
  832. int error;
  833. error = drv->pm->freeze_noirq(dev);
  834. suspend_report_result(drv->pm->freeze_noirq, error);
  835. if (error)
  836. return error;
  837. }
  838. if (!pci_dev->state_saved)
  839. pci_save_state(pci_dev);
  840. pci_pm_set_unknown_state(pci_dev);
  841. if (pcibios_pm_ops.freeze_noirq)
  842. return pcibios_pm_ops.freeze_noirq(dev);
  843. return 0;
  844. }
  845. static int pci_pm_thaw_noirq(struct device *dev)
  846. {
  847. struct pci_dev *pci_dev = to_pci_dev(dev);
  848. struct device_driver *drv = dev->driver;
  849. int error = 0;
  850. /*
  851. * If the device is in runtime suspend, the code below may not work
  852. * correctly with it, so skip that code and make the PM core skip all of
  853. * the subsequent "thaw" callbacks for the device.
  854. */
  855. if (dev_pm_smart_suspend_and_suspended(dev)) {
  856. dev_pm_skip_next_resume_phases(dev);
  857. return 0;
  858. }
  859. if (pcibios_pm_ops.thaw_noirq) {
  860. error = pcibios_pm_ops.thaw_noirq(dev);
  861. if (error)
  862. return error;
  863. }
  864. if (pci_has_legacy_pm_support(pci_dev))
  865. return pci_legacy_resume_early(dev);
  866. /*
  867. * pci_restore_state() requires the device to be in D0 (because of MSI
  868. * restoration among other things), so force it into D0 in case the
  869. * driver's "freeze" callbacks put it into a low-power state directly.
  870. */
  871. pci_set_power_state(pci_dev, PCI_D0);
  872. pci_restore_state(pci_dev);
  873. if (drv && drv->pm && drv->pm->thaw_noirq)
  874. error = drv->pm->thaw_noirq(dev);
  875. return error;
  876. }
  877. static int pci_pm_thaw(struct device *dev)
  878. {
  879. struct pci_dev *pci_dev = to_pci_dev(dev);
  880. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  881. int error = 0;
  882. if (pci_has_legacy_pm_support(pci_dev))
  883. return pci_legacy_resume(dev);
  884. if (pm) {
  885. if (pm->thaw)
  886. error = pm->thaw(dev);
  887. } else {
  888. pci_pm_reenable_device(pci_dev);
  889. }
  890. pci_dev->state_saved = false;
  891. return error;
  892. }
  893. static int pci_pm_poweroff(struct device *dev)
  894. {
  895. struct pci_dev *pci_dev = to_pci_dev(dev);
  896. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  897. if (pci_has_legacy_pm_support(pci_dev))
  898. return pci_legacy_suspend(dev, PMSG_HIBERNATE);
  899. if (!pm) {
  900. pci_pm_default_suspend(pci_dev);
  901. return 0;
  902. }
  903. /* The reason to do that is the same as in pci_pm_suspend(). */
  904. if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
  905. !pci_dev_keep_suspended(pci_dev))
  906. pm_runtime_resume(dev);
  907. pci_dev->state_saved = false;
  908. if (pm->poweroff) {
  909. int error;
  910. error = pm->poweroff(dev);
  911. suspend_report_result(pm->poweroff, error);
  912. if (error)
  913. return error;
  914. }
  915. return 0;
  916. }
  917. static int pci_pm_poweroff_late(struct device *dev)
  918. {
  919. if (dev_pm_smart_suspend_and_suspended(dev))
  920. return 0;
  921. pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
  922. return pm_generic_poweroff_late(dev);
  923. }
  924. static int pci_pm_poweroff_noirq(struct device *dev)
  925. {
  926. struct pci_dev *pci_dev = to_pci_dev(dev);
  927. struct device_driver *drv = dev->driver;
  928. if (dev_pm_smart_suspend_and_suspended(dev))
  929. return 0;
  930. if (pci_has_legacy_pm_support(to_pci_dev(dev)))
  931. return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
  932. if (!drv || !drv->pm) {
  933. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  934. return 0;
  935. }
  936. if (drv->pm->poweroff_noirq) {
  937. int error;
  938. error = drv->pm->poweroff_noirq(dev);
  939. suspend_report_result(drv->pm->poweroff_noirq, error);
  940. if (error)
  941. return error;
  942. }
  943. if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev))
  944. pci_prepare_to_sleep(pci_dev);
  945. /*
  946. * The reason for doing this here is the same as for the analogous code
  947. * in pci_pm_suspend_noirq().
  948. */
  949. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  950. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  951. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  952. if (pcibios_pm_ops.poweroff_noirq)
  953. return pcibios_pm_ops.poweroff_noirq(dev);
  954. return 0;
  955. }
  956. static int pci_pm_restore_noirq(struct device *dev)
  957. {
  958. struct pci_dev *pci_dev = to_pci_dev(dev);
  959. struct device_driver *drv = dev->driver;
  960. int error = 0;
  961. /* This is analogous to the pci_pm_resume_noirq() case. */
  962. if (dev_pm_smart_suspend_and_suspended(dev))
  963. pm_runtime_set_active(dev);
  964. if (pcibios_pm_ops.restore_noirq) {
  965. error = pcibios_pm_ops.restore_noirq(dev);
  966. if (error)
  967. return error;
  968. }
  969. pci_pm_default_resume_early(pci_dev);
  970. if (pci_has_legacy_pm_support(pci_dev))
  971. return pci_legacy_resume_early(dev);
  972. if (drv && drv->pm && drv->pm->restore_noirq)
  973. error = drv->pm->restore_noirq(dev);
  974. return error;
  975. }
  976. static int pci_pm_restore(struct device *dev)
  977. {
  978. struct pci_dev *pci_dev = to_pci_dev(dev);
  979. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  980. int error = 0;
  981. /*
  982. * This is necessary for the hibernation error path in which restore is
  983. * called without restoring the standard config registers of the device.
  984. */
  985. if (pci_dev->state_saved)
  986. pci_restore_standard_config(pci_dev);
  987. if (pci_has_legacy_pm_support(pci_dev))
  988. return pci_legacy_resume(dev);
  989. pci_pm_default_resume(pci_dev);
  990. if (pm) {
  991. if (pm->restore)
  992. error = pm->restore(dev);
  993. } else {
  994. pci_pm_reenable_device(pci_dev);
  995. }
  996. return error;
  997. }
  998. #else /* !CONFIG_HIBERNATE_CALLBACKS */
  999. #define pci_pm_freeze NULL
  1000. #define pci_pm_freeze_late NULL
  1001. #define pci_pm_freeze_noirq NULL
  1002. #define pci_pm_thaw NULL
  1003. #define pci_pm_thaw_noirq NULL
  1004. #define pci_pm_poweroff NULL
  1005. #define pci_pm_poweroff_late NULL
  1006. #define pci_pm_poweroff_noirq NULL
  1007. #define pci_pm_restore NULL
  1008. #define pci_pm_restore_noirq NULL
  1009. #endif /* !CONFIG_HIBERNATE_CALLBACKS */
  1010. #ifdef CONFIG_PM
  1011. static int pci_pm_runtime_suspend(struct device *dev)
  1012. {
  1013. struct pci_dev *pci_dev = to_pci_dev(dev);
  1014. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1015. pci_power_t prev = pci_dev->current_state;
  1016. int error;
  1017. /*
  1018. * If pci_dev->driver is not set (unbound), we leave the device in D0,
  1019. * but it may go to D3cold when the bridge above it runtime suspends.
  1020. * Save its config space in case that happens.
  1021. */
  1022. if (!pci_dev->driver) {
  1023. pci_save_state(pci_dev);
  1024. return 0;
  1025. }
  1026. if (!pm || !pm->runtime_suspend)
  1027. return -ENOSYS;
  1028. pci_dev->state_saved = false;
  1029. error = pm->runtime_suspend(dev);
  1030. if (error) {
  1031. /*
  1032. * -EBUSY and -EAGAIN is used to request the runtime PM core
  1033. * to schedule a new suspend, so log the event only with debug
  1034. * log level.
  1035. */
  1036. if (error == -EBUSY || error == -EAGAIN)
  1037. dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
  1038. pm->runtime_suspend, error);
  1039. else
  1040. dev_err(dev, "can't suspend (%pf returned %d)\n",
  1041. pm->runtime_suspend, error);
  1042. return error;
  1043. }
  1044. pci_fixup_device(pci_fixup_suspend, pci_dev);
  1045. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  1046. && pci_dev->current_state != PCI_UNKNOWN) {
  1047. WARN_ONCE(pci_dev->current_state != prev,
  1048. "PCI PM: State of device not saved by %pF\n",
  1049. pm->runtime_suspend);
  1050. return 0;
  1051. }
  1052. if (!pci_dev->state_saved) {
  1053. pci_save_state(pci_dev);
  1054. pci_finish_runtime_suspend(pci_dev);
  1055. }
  1056. return 0;
  1057. }
  1058. static int pci_pm_runtime_resume(struct device *dev)
  1059. {
  1060. int rc;
  1061. struct pci_dev *pci_dev = to_pci_dev(dev);
  1062. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1063. /*
  1064. * Restoring config space is necessary even if the device is not bound
  1065. * to a driver because although we left it in D0, it may have gone to
  1066. * D3cold when the bridge above it runtime suspended.
  1067. */
  1068. pci_restore_standard_config(pci_dev);
  1069. if (!pci_dev->driver)
  1070. return 0;
  1071. if (!pm || !pm->runtime_resume)
  1072. return -ENOSYS;
  1073. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  1074. pci_enable_wake(pci_dev, PCI_D0, false);
  1075. pci_fixup_device(pci_fixup_resume, pci_dev);
  1076. rc = pm->runtime_resume(dev);
  1077. pci_dev->runtime_d3cold = false;
  1078. return rc;
  1079. }
  1080. static int pci_pm_runtime_idle(struct device *dev)
  1081. {
  1082. struct pci_dev *pci_dev = to_pci_dev(dev);
  1083. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1084. int ret = 0;
  1085. /*
  1086. * If pci_dev->driver is not set (unbound), the device should
  1087. * always remain in D0 regardless of the runtime PM status
  1088. */
  1089. if (!pci_dev->driver)
  1090. return 0;
  1091. if (!pm)
  1092. return -ENOSYS;
  1093. if (pm->runtime_idle)
  1094. ret = pm->runtime_idle(dev);
  1095. return ret;
  1096. }
  1097. static const struct dev_pm_ops pci_dev_pm_ops = {
  1098. .prepare = pci_pm_prepare,
  1099. .complete = pci_pm_complete,
  1100. .suspend = pci_pm_suspend,
  1101. .suspend_late = pci_pm_suspend_late,
  1102. .resume = pci_pm_resume,
  1103. .freeze = pci_pm_freeze,
  1104. .freeze_late = pci_pm_freeze_late,
  1105. .thaw = pci_pm_thaw,
  1106. .poweroff = pci_pm_poweroff,
  1107. .poweroff_late = pci_pm_poweroff_late,
  1108. .restore = pci_pm_restore,
  1109. .suspend_noirq = pci_pm_suspend_noirq,
  1110. .resume_noirq = pci_pm_resume_noirq,
  1111. .freeze_noirq = pci_pm_freeze_noirq,
  1112. .thaw_noirq = pci_pm_thaw_noirq,
  1113. .poweroff_noirq = pci_pm_poweroff_noirq,
  1114. .restore_noirq = pci_pm_restore_noirq,
  1115. .runtime_suspend = pci_pm_runtime_suspend,
  1116. .runtime_resume = pci_pm_runtime_resume,
  1117. .runtime_idle = pci_pm_runtime_idle,
  1118. };
  1119. #define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
  1120. #else /* !CONFIG_PM */
  1121. #define pci_pm_runtime_suspend NULL
  1122. #define pci_pm_runtime_resume NULL
  1123. #define pci_pm_runtime_idle NULL
  1124. #define PCI_PM_OPS_PTR NULL
  1125. #endif /* !CONFIG_PM */
  1126. /**
  1127. * __pci_register_driver - register a new pci driver
  1128. * @drv: the driver structure to register
  1129. * @owner: owner module of drv
  1130. * @mod_name: module name string
  1131. *
  1132. * Adds the driver structure to the list of registered drivers.
  1133. * Returns a negative value on error, otherwise 0.
  1134. * If no error occurred, the driver remains registered even if
  1135. * no device was claimed during registration.
  1136. */
  1137. int __pci_register_driver(struct pci_driver *drv, struct module *owner,
  1138. const char *mod_name)
  1139. {
  1140. /* initialize common driver fields */
  1141. drv->driver.name = drv->name;
  1142. drv->driver.bus = &pci_bus_type;
  1143. drv->driver.owner = owner;
  1144. drv->driver.mod_name = mod_name;
  1145. drv->driver.groups = drv->groups;
  1146. spin_lock_init(&drv->dynids.lock);
  1147. INIT_LIST_HEAD(&drv->dynids.list);
  1148. /* register with core */
  1149. return driver_register(&drv->driver);
  1150. }
  1151. EXPORT_SYMBOL(__pci_register_driver);
  1152. /**
  1153. * pci_unregister_driver - unregister a pci driver
  1154. * @drv: the driver structure to unregister
  1155. *
  1156. * Deletes the driver structure from the list of registered PCI drivers,
  1157. * gives it a chance to clean up by calling its remove() function for
  1158. * each device it was responsible for, and marks those devices as
  1159. * driverless.
  1160. */
  1161. void pci_unregister_driver(struct pci_driver *drv)
  1162. {
  1163. driver_unregister(&drv->driver);
  1164. pci_free_dynids(drv);
  1165. }
  1166. EXPORT_SYMBOL(pci_unregister_driver);
  1167. static struct pci_driver pci_compat_driver = {
  1168. .name = "compat"
  1169. };
  1170. /**
  1171. * pci_dev_driver - get the pci_driver of a device
  1172. * @dev: the device to query
  1173. *
  1174. * Returns the appropriate pci_driver structure or %NULL if there is no
  1175. * registered driver for the device.
  1176. */
  1177. struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
  1178. {
  1179. if (dev->driver)
  1180. return dev->driver;
  1181. else {
  1182. int i;
  1183. for (i = 0; i <= PCI_ROM_RESOURCE; i++)
  1184. if (dev->resource[i].flags & IORESOURCE_BUSY)
  1185. return &pci_compat_driver;
  1186. }
  1187. return NULL;
  1188. }
  1189. EXPORT_SYMBOL(pci_dev_driver);
  1190. /**
  1191. * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
  1192. * @dev: the PCI device structure to match against
  1193. * @drv: the device driver to search for matching PCI device id structures
  1194. *
  1195. * Used by a driver to check whether a PCI device present in the
  1196. * system is in its list of supported devices. Returns the matching
  1197. * pci_device_id structure or %NULL if there is no match.
  1198. */
  1199. static int pci_bus_match(struct device *dev, struct device_driver *drv)
  1200. {
  1201. struct pci_dev *pci_dev = to_pci_dev(dev);
  1202. struct pci_driver *pci_drv;
  1203. const struct pci_device_id *found_id;
  1204. if (!pci_dev->match_driver)
  1205. return 0;
  1206. pci_drv = to_pci_driver(drv);
  1207. found_id = pci_match_device(pci_drv, pci_dev);
  1208. if (found_id)
  1209. return 1;
  1210. return 0;
  1211. }
  1212. /**
  1213. * pci_dev_get - increments the reference count of the pci device structure
  1214. * @dev: the device being referenced
  1215. *
  1216. * Each live reference to a device should be refcounted.
  1217. *
  1218. * Drivers for PCI devices should normally record such references in
  1219. * their probe() methods, when they bind to a device, and release
  1220. * them by calling pci_dev_put(), in their disconnect() methods.
  1221. *
  1222. * A pointer to the device with the incremented reference counter is returned.
  1223. */
  1224. struct pci_dev *pci_dev_get(struct pci_dev *dev)
  1225. {
  1226. if (dev)
  1227. get_device(&dev->dev);
  1228. return dev;
  1229. }
  1230. EXPORT_SYMBOL(pci_dev_get);
  1231. /**
  1232. * pci_dev_put - release a use of the pci device structure
  1233. * @dev: device that's been disconnected
  1234. *
  1235. * Must be called when a user of a device is finished with it. When the last
  1236. * user of the device calls this function, the memory of the device is freed.
  1237. */
  1238. void pci_dev_put(struct pci_dev *dev)
  1239. {
  1240. if (dev)
  1241. put_device(&dev->dev);
  1242. }
  1243. EXPORT_SYMBOL(pci_dev_put);
  1244. static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
  1245. {
  1246. struct pci_dev *pdev;
  1247. if (!dev)
  1248. return -ENODEV;
  1249. pdev = to_pci_dev(dev);
  1250. if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
  1251. return -ENOMEM;
  1252. if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
  1253. return -ENOMEM;
  1254. if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
  1255. pdev->subsystem_device))
  1256. return -ENOMEM;
  1257. if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
  1258. return -ENOMEM;
  1259. if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
  1260. pdev->vendor, pdev->device,
  1261. pdev->subsystem_vendor, pdev->subsystem_device,
  1262. (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
  1263. (u8)(pdev->class)))
  1264. return -ENOMEM;
  1265. return 0;
  1266. }
  1267. #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
  1268. /**
  1269. * pci_uevent_ers - emit a uevent during recovery path of PCI device
  1270. * @pdev: PCI device undergoing error recovery
  1271. * @err_type: type of error event
  1272. */
  1273. void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
  1274. {
  1275. int idx = 0;
  1276. char *envp[3];
  1277. switch (err_type) {
  1278. case PCI_ERS_RESULT_NONE:
  1279. case PCI_ERS_RESULT_CAN_RECOVER:
  1280. envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
  1281. envp[idx++] = "DEVICE_ONLINE=0";
  1282. break;
  1283. case PCI_ERS_RESULT_RECOVERED:
  1284. envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY";
  1285. envp[idx++] = "DEVICE_ONLINE=1";
  1286. break;
  1287. case PCI_ERS_RESULT_DISCONNECT:
  1288. envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY";
  1289. envp[idx++] = "DEVICE_ONLINE=0";
  1290. break;
  1291. default:
  1292. break;
  1293. }
  1294. if (idx > 0) {
  1295. envp[idx++] = NULL;
  1296. kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
  1297. }
  1298. }
  1299. #endif
  1300. static int pci_bus_num_vf(struct device *dev)
  1301. {
  1302. return pci_num_vf(to_pci_dev(dev));
  1303. }
  1304. /**
  1305. * pci_dma_configure - Setup DMA configuration
  1306. * @dev: ptr to dev structure
  1307. *
  1308. * Function to update PCI devices's DMA configuration using the same
  1309. * info from the OF node or ACPI node of host bridge's parent (if any).
  1310. */
  1311. static int pci_dma_configure(struct device *dev)
  1312. {
  1313. struct device *bridge;
  1314. int ret = 0;
  1315. bridge = pci_get_host_bridge_device(to_pci_dev(dev));
  1316. if (IS_ENABLED(CONFIG_OF) && bridge->parent &&
  1317. bridge->parent->of_node) {
  1318. ret = of_dma_configure(dev, bridge->parent->of_node, true);
  1319. } else if (has_acpi_companion(bridge)) {
  1320. struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
  1321. enum dev_dma_attr attr = acpi_get_dma_attr(adev);
  1322. if (attr != DEV_DMA_NOT_SUPPORTED)
  1323. ret = acpi_dma_configure(dev, attr);
  1324. }
  1325. pci_put_host_bridge_device(bridge);
  1326. return ret;
  1327. }
  1328. struct bus_type pci_bus_type = {
  1329. .name = "pci",
  1330. .match = pci_bus_match,
  1331. .uevent = pci_uevent,
  1332. .probe = pci_device_probe,
  1333. .remove = pci_device_remove,
  1334. .shutdown = pci_device_shutdown,
  1335. .dev_groups = pci_dev_groups,
  1336. .bus_groups = pci_bus_groups,
  1337. .drv_groups = pci_drv_groups,
  1338. .pm = PCI_PM_OPS_PTR,
  1339. .num_vf = pci_bus_num_vf,
  1340. .dma_configure = pci_dma_configure,
  1341. };
  1342. EXPORT_SYMBOL(pci_bus_type);
  1343. #ifdef CONFIG_PCIEPORTBUS
  1344. static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
  1345. {
  1346. struct pcie_device *pciedev;
  1347. struct pcie_port_service_driver *driver;
  1348. if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
  1349. return 0;
  1350. pciedev = to_pcie_device(dev);
  1351. driver = to_service_driver(drv);
  1352. if (driver->service != pciedev->service)
  1353. return 0;
  1354. if (driver->port_type != PCIE_ANY_PORT &&
  1355. driver->port_type != pci_pcie_type(pciedev->port))
  1356. return 0;
  1357. return 1;
  1358. }
  1359. struct bus_type pcie_port_bus_type = {
  1360. .name = "pci_express",
  1361. .match = pcie_port_bus_match,
  1362. };
  1363. EXPORT_SYMBOL_GPL(pcie_port_bus_type);
  1364. #endif
  1365. static int __init pci_driver_init(void)
  1366. {
  1367. int ret;
  1368. ret = bus_register(&pci_bus_type);
  1369. if (ret)
  1370. return ret;
  1371. #ifdef CONFIG_PCIEPORTBUS
  1372. ret = bus_register(&pcie_port_bus_type);
  1373. if (ret)
  1374. return ret;
  1375. #endif
  1376. return 0;
  1377. }
  1378. postcore_initcall(pci_driver_init);