access.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/pci.h>
  3. #include <linux/module.h>
  4. #include <linux/slab.h>
  5. #include <linux/ioport.h>
  6. #include <linux/wait.h>
  7. #include "pci.h"
  8. /*
  9. * This interrupt-safe spinlock protects all accesses to PCI
  10. * configuration space.
  11. */
  12. DEFINE_RAW_SPINLOCK(pci_lock);
  13. /*
  14. * Wrappers for all PCI configuration access functions. They just check
  15. * alignment, do locking and call the low-level functions pointed to
  16. * by pci_dev->ops.
  17. */
  18. #define PCI_byte_BAD 0
  19. #define PCI_word_BAD (pos & 1)
  20. #define PCI_dword_BAD (pos & 3)
  21. #ifdef CONFIG_PCI_LOCKLESS_CONFIG
  22. # define pci_lock_config(f) do { (void)(f); } while (0)
  23. # define pci_unlock_config(f) do { (void)(f); } while (0)
  24. #else
  25. # define pci_lock_config(f) raw_spin_lock_irqsave(&pci_lock, f)
  26. # define pci_unlock_config(f) raw_spin_unlock_irqrestore(&pci_lock, f)
  27. #endif
  28. #define PCI_OP_READ(size, type, len) \
  29. int pci_bus_read_config_##size \
  30. (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
  31. { \
  32. int res; \
  33. unsigned long flags; \
  34. u32 data = 0; \
  35. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  36. pci_lock_config(flags); \
  37. res = bus->ops->read(bus, devfn, pos, len, &data); \
  38. *value = (type)data; \
  39. pci_unlock_config(flags); \
  40. return res; \
  41. }
  42. #define PCI_OP_WRITE(size, type, len) \
  43. int pci_bus_write_config_##size \
  44. (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
  45. { \
  46. int res; \
  47. unsigned long flags; \
  48. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  49. pci_lock_config(flags); \
  50. res = bus->ops->write(bus, devfn, pos, len, value); \
  51. pci_unlock_config(flags); \
  52. return res; \
  53. }
  54. PCI_OP_READ(byte, u8, 1)
  55. PCI_OP_READ(word, u16, 2)
  56. PCI_OP_READ(dword, u32, 4)
  57. PCI_OP_WRITE(byte, u8, 1)
  58. PCI_OP_WRITE(word, u16, 2)
  59. PCI_OP_WRITE(dword, u32, 4)
  60. EXPORT_SYMBOL(pci_bus_read_config_byte);
  61. EXPORT_SYMBOL(pci_bus_read_config_word);
  62. EXPORT_SYMBOL(pci_bus_read_config_dword);
  63. EXPORT_SYMBOL(pci_bus_write_config_byte);
  64. EXPORT_SYMBOL(pci_bus_write_config_word);
  65. EXPORT_SYMBOL(pci_bus_write_config_dword);
  66. int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
  67. int where, int size, u32 *val)
  68. {
  69. void __iomem *addr;
  70. addr = bus->ops->map_bus(bus, devfn, where);
  71. if (!addr) {
  72. *val = ~0;
  73. return PCIBIOS_DEVICE_NOT_FOUND;
  74. }
  75. if (size == 1)
  76. *val = readb(addr);
  77. else if (size == 2)
  78. *val = readw(addr);
  79. else
  80. *val = readl(addr);
  81. return PCIBIOS_SUCCESSFUL;
  82. }
  83. EXPORT_SYMBOL_GPL(pci_generic_config_read);
  84. int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
  85. int where, int size, u32 val)
  86. {
  87. void __iomem *addr;
  88. addr = bus->ops->map_bus(bus, devfn, where);
  89. if (!addr)
  90. return PCIBIOS_DEVICE_NOT_FOUND;
  91. if (size == 1)
  92. writeb(val, addr);
  93. else if (size == 2)
  94. writew(val, addr);
  95. else
  96. writel(val, addr);
  97. return PCIBIOS_SUCCESSFUL;
  98. }
  99. EXPORT_SYMBOL_GPL(pci_generic_config_write);
  100. int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
  101. int where, int size, u32 *val)
  102. {
  103. void __iomem *addr;
  104. addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
  105. if (!addr) {
  106. *val = ~0;
  107. return PCIBIOS_DEVICE_NOT_FOUND;
  108. }
  109. *val = readl(addr);
  110. if (size <= 2)
  111. *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
  112. return PCIBIOS_SUCCESSFUL;
  113. }
  114. EXPORT_SYMBOL_GPL(pci_generic_config_read32);
  115. int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
  116. int where, int size, u32 val)
  117. {
  118. void __iomem *addr;
  119. u32 mask, tmp;
  120. addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
  121. if (!addr)
  122. return PCIBIOS_DEVICE_NOT_FOUND;
  123. if (size == 4) {
  124. writel(val, addr);
  125. return PCIBIOS_SUCCESSFUL;
  126. }
  127. /*
  128. * In general, hardware that supports only 32-bit writes on PCI is
  129. * not spec-compliant. For example, software may perform a 16-bit
  130. * write. If the hardware only supports 32-bit accesses, we must
  131. * do a 32-bit read, merge in the 16 bits we intend to write,
  132. * followed by a 32-bit write. If the 16 bits we *don't* intend to
  133. * write happen to have any RW1C (write-one-to-clear) bits set, we
  134. * just inadvertently cleared something we shouldn't have.
  135. */
  136. dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
  137. size, pci_domain_nr(bus), bus->number,
  138. PCI_SLOT(devfn), PCI_FUNC(devfn), where);
  139. mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
  140. tmp = readl(addr) & mask;
  141. tmp |= val << ((where & 0x3) * 8);
  142. writel(tmp, addr);
  143. return PCIBIOS_SUCCESSFUL;
  144. }
  145. EXPORT_SYMBOL_GPL(pci_generic_config_write32);
  146. /**
  147. * pci_bus_set_ops - Set raw operations of pci bus
  148. * @bus: pci bus struct
  149. * @ops: new raw operations
  150. *
  151. * Return previous raw operations
  152. */
  153. struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
  154. {
  155. struct pci_ops *old_ops;
  156. unsigned long flags;
  157. raw_spin_lock_irqsave(&pci_lock, flags);
  158. old_ops = bus->ops;
  159. bus->ops = ops;
  160. raw_spin_unlock_irqrestore(&pci_lock, flags);
  161. return old_ops;
  162. }
  163. EXPORT_SYMBOL(pci_bus_set_ops);
  164. /*
  165. * The following routines are to prevent the user from accessing PCI config
  166. * space when it's unsafe to do so. Some devices require this during BIST and
  167. * we're required to prevent it during D-state transitions.
  168. *
  169. * We have a bit per device to indicate it's blocked and a global wait queue
  170. * for callers to sleep on until devices are unblocked.
  171. */
  172. static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
  173. static noinline void pci_wait_cfg(struct pci_dev *dev)
  174. {
  175. DECLARE_WAITQUEUE(wait, current);
  176. __add_wait_queue(&pci_cfg_wait, &wait);
  177. do {
  178. set_current_state(TASK_UNINTERRUPTIBLE);
  179. raw_spin_unlock_irq(&pci_lock);
  180. schedule();
  181. raw_spin_lock_irq(&pci_lock);
  182. } while (dev->block_cfg_access);
  183. __remove_wait_queue(&pci_cfg_wait, &wait);
  184. }
  185. /* Returns 0 on success, negative values indicate error. */
  186. #define PCI_USER_READ_CONFIG(size, type) \
  187. int pci_user_read_config_##size \
  188. (struct pci_dev *dev, int pos, type *val) \
  189. { \
  190. int ret = PCIBIOS_SUCCESSFUL; \
  191. u32 data = -1; \
  192. if (PCI_##size##_BAD) \
  193. return -EINVAL; \
  194. raw_spin_lock_irq(&pci_lock); \
  195. if (unlikely(dev->block_cfg_access)) \
  196. pci_wait_cfg(dev); \
  197. ret = dev->bus->ops->read(dev->bus, dev->devfn, \
  198. pos, sizeof(type), &data); \
  199. raw_spin_unlock_irq(&pci_lock); \
  200. *val = (type)data; \
  201. return pcibios_err_to_errno(ret); \
  202. } \
  203. EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
  204. /* Returns 0 on success, negative values indicate error. */
  205. #define PCI_USER_WRITE_CONFIG(size, type) \
  206. int pci_user_write_config_##size \
  207. (struct pci_dev *dev, int pos, type val) \
  208. { \
  209. int ret = PCIBIOS_SUCCESSFUL; \
  210. if (PCI_##size##_BAD) \
  211. return -EINVAL; \
  212. raw_spin_lock_irq(&pci_lock); \
  213. if (unlikely(dev->block_cfg_access)) \
  214. pci_wait_cfg(dev); \
  215. ret = dev->bus->ops->write(dev->bus, dev->devfn, \
  216. pos, sizeof(type), val); \
  217. raw_spin_unlock_irq(&pci_lock); \
  218. return pcibios_err_to_errno(ret); \
  219. } \
  220. EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
  221. PCI_USER_READ_CONFIG(byte, u8)
  222. PCI_USER_READ_CONFIG(word, u16)
  223. PCI_USER_READ_CONFIG(dword, u32)
  224. PCI_USER_WRITE_CONFIG(byte, u8)
  225. PCI_USER_WRITE_CONFIG(word, u16)
  226. PCI_USER_WRITE_CONFIG(dword, u32)
  227. /**
  228. * pci_cfg_access_lock - Lock PCI config reads/writes
  229. * @dev: pci device struct
  230. *
  231. * When access is locked, any userspace reads or writes to config
  232. * space and concurrent lock requests will sleep until access is
  233. * allowed via pci_cfg_access_unlock() again.
  234. */
  235. void pci_cfg_access_lock(struct pci_dev *dev)
  236. {
  237. might_sleep();
  238. raw_spin_lock_irq(&pci_lock);
  239. if (dev->block_cfg_access)
  240. pci_wait_cfg(dev);
  241. dev->block_cfg_access = 1;
  242. raw_spin_unlock_irq(&pci_lock);
  243. }
  244. EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
  245. /**
  246. * pci_cfg_access_trylock - try to lock PCI config reads/writes
  247. * @dev: pci device struct
  248. *
  249. * Same as pci_cfg_access_lock, but will return 0 if access is
  250. * already locked, 1 otherwise. This function can be used from
  251. * atomic contexts.
  252. */
  253. bool pci_cfg_access_trylock(struct pci_dev *dev)
  254. {
  255. unsigned long flags;
  256. bool locked = true;
  257. raw_spin_lock_irqsave(&pci_lock, flags);
  258. if (dev->block_cfg_access)
  259. locked = false;
  260. else
  261. dev->block_cfg_access = 1;
  262. raw_spin_unlock_irqrestore(&pci_lock, flags);
  263. return locked;
  264. }
  265. EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
  266. /**
  267. * pci_cfg_access_unlock - Unlock PCI config reads/writes
  268. * @dev: pci device struct
  269. *
  270. * This function allows PCI config accesses to resume.
  271. */
  272. void pci_cfg_access_unlock(struct pci_dev *dev)
  273. {
  274. unsigned long flags;
  275. raw_spin_lock_irqsave(&pci_lock, flags);
  276. /*
  277. * This indicates a problem in the caller, but we don't need
  278. * to kill them, unlike a double-block above.
  279. */
  280. WARN_ON(!dev->block_cfg_access);
  281. dev->block_cfg_access = 0;
  282. raw_spin_unlock_irqrestore(&pci_lock, flags);
  283. wake_up_all(&pci_cfg_wait);
  284. }
  285. EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
  286. static inline int pcie_cap_version(const struct pci_dev *dev)
  287. {
  288. return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
  289. }
  290. static bool pcie_downstream_port(const struct pci_dev *dev)
  291. {
  292. int type = pci_pcie_type(dev);
  293. return type == PCI_EXP_TYPE_ROOT_PORT ||
  294. type == PCI_EXP_TYPE_DOWNSTREAM ||
  295. type == PCI_EXP_TYPE_PCIE_BRIDGE;
  296. }
  297. bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
  298. {
  299. int type = pci_pcie_type(dev);
  300. return type == PCI_EXP_TYPE_ENDPOINT ||
  301. type == PCI_EXP_TYPE_LEG_END ||
  302. type == PCI_EXP_TYPE_ROOT_PORT ||
  303. type == PCI_EXP_TYPE_UPSTREAM ||
  304. type == PCI_EXP_TYPE_DOWNSTREAM ||
  305. type == PCI_EXP_TYPE_PCI_BRIDGE ||
  306. type == PCI_EXP_TYPE_PCIE_BRIDGE;
  307. }
  308. static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
  309. {
  310. return pcie_downstream_port(dev) &&
  311. pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
  312. }
  313. static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
  314. {
  315. int type = pci_pcie_type(dev);
  316. return type == PCI_EXP_TYPE_ROOT_PORT ||
  317. type == PCI_EXP_TYPE_RC_EC;
  318. }
  319. static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
  320. {
  321. if (!pci_is_pcie(dev))
  322. return false;
  323. switch (pos) {
  324. case PCI_EXP_FLAGS:
  325. return true;
  326. case PCI_EXP_DEVCAP:
  327. case PCI_EXP_DEVCTL:
  328. case PCI_EXP_DEVSTA:
  329. return true;
  330. case PCI_EXP_LNKCAP:
  331. case PCI_EXP_LNKCTL:
  332. case PCI_EXP_LNKSTA:
  333. return pcie_cap_has_lnkctl(dev);
  334. case PCI_EXP_SLTCAP:
  335. case PCI_EXP_SLTCTL:
  336. case PCI_EXP_SLTSTA:
  337. return pcie_cap_has_sltctl(dev);
  338. case PCI_EXP_RTCTL:
  339. case PCI_EXP_RTCAP:
  340. case PCI_EXP_RTSTA:
  341. return pcie_cap_has_rtctl(dev);
  342. case PCI_EXP_DEVCAP2:
  343. case PCI_EXP_DEVCTL2:
  344. case PCI_EXP_LNKCAP2:
  345. case PCI_EXP_LNKCTL2:
  346. case PCI_EXP_LNKSTA2:
  347. return pcie_cap_version(dev) > 1;
  348. default:
  349. return false;
  350. }
  351. }
  352. /*
  353. * Note that these accessor functions are only for the "PCI Express
  354. * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
  355. * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
  356. */
  357. int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
  358. {
  359. int ret;
  360. *val = 0;
  361. if (pos & 1)
  362. return -EINVAL;
  363. if (pcie_capability_reg_implemented(dev, pos)) {
  364. ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
  365. /*
  366. * Reset *val to 0 if pci_read_config_word() fails, it may
  367. * have been written as 0xFFFF if hardware error happens
  368. * during pci_read_config_word().
  369. */
  370. if (ret)
  371. *val = 0;
  372. return ret;
  373. }
  374. /*
  375. * For Functions that do not implement the Slot Capabilities,
  376. * Slot Status, and Slot Control registers, these spaces must
  377. * be hardwired to 0b, with the exception of the Presence Detect
  378. * State bit in the Slot Status register of Downstream Ports,
  379. * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
  380. */
  381. if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
  382. pos == PCI_EXP_SLTSTA)
  383. *val = PCI_EXP_SLTSTA_PDS;
  384. return 0;
  385. }
  386. EXPORT_SYMBOL(pcie_capability_read_word);
  387. int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
  388. {
  389. int ret;
  390. *val = 0;
  391. if (pos & 3)
  392. return -EINVAL;
  393. if (pcie_capability_reg_implemented(dev, pos)) {
  394. ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
  395. /*
  396. * Reset *val to 0 if pci_read_config_dword() fails, it may
  397. * have been written as 0xFFFFFFFF if hardware error happens
  398. * during pci_read_config_dword().
  399. */
  400. if (ret)
  401. *val = 0;
  402. return ret;
  403. }
  404. if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
  405. pos == PCI_EXP_SLTSTA)
  406. *val = PCI_EXP_SLTSTA_PDS;
  407. return 0;
  408. }
  409. EXPORT_SYMBOL(pcie_capability_read_dword);
  410. int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
  411. {
  412. if (pos & 1)
  413. return -EINVAL;
  414. if (!pcie_capability_reg_implemented(dev, pos))
  415. return 0;
  416. return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
  417. }
  418. EXPORT_SYMBOL(pcie_capability_write_word);
  419. int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
  420. {
  421. if (pos & 3)
  422. return -EINVAL;
  423. if (!pcie_capability_reg_implemented(dev, pos))
  424. return 0;
  425. return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
  426. }
  427. EXPORT_SYMBOL(pcie_capability_write_dword);
  428. int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
  429. u16 clear, u16 set)
  430. {
  431. int ret;
  432. u16 val;
  433. ret = pcie_capability_read_word(dev, pos, &val);
  434. if (!ret) {
  435. val &= ~clear;
  436. val |= set;
  437. ret = pcie_capability_write_word(dev, pos, val);
  438. }
  439. return ret;
  440. }
  441. EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
  442. int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
  443. u32 clear, u32 set)
  444. {
  445. int ret;
  446. u32 val;
  447. ret = pcie_capability_read_dword(dev, pos, &val);
  448. if (!ret) {
  449. val &= ~clear;
  450. val |= set;
  451. ret = pcie_capability_write_dword(dev, pos, val);
  452. }
  453. return ret;
  454. }
  455. EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
  456. int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
  457. {
  458. if (pci_dev_is_disconnected(dev)) {
  459. *val = ~0;
  460. return PCIBIOS_DEVICE_NOT_FOUND;
  461. }
  462. return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
  463. }
  464. EXPORT_SYMBOL(pci_read_config_byte);
  465. int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
  466. {
  467. if (pci_dev_is_disconnected(dev)) {
  468. *val = ~0;
  469. return PCIBIOS_DEVICE_NOT_FOUND;
  470. }
  471. return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
  472. }
  473. EXPORT_SYMBOL(pci_read_config_word);
  474. int pci_read_config_dword(const struct pci_dev *dev, int where,
  475. u32 *val)
  476. {
  477. if (pci_dev_is_disconnected(dev)) {
  478. *val = ~0;
  479. return PCIBIOS_DEVICE_NOT_FOUND;
  480. }
  481. return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
  482. }
  483. EXPORT_SYMBOL(pci_read_config_dword);
  484. int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
  485. {
  486. if (pci_dev_is_disconnected(dev))
  487. return PCIBIOS_DEVICE_NOT_FOUND;
  488. return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
  489. }
  490. EXPORT_SYMBOL(pci_write_config_byte);
  491. int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
  492. {
  493. if (pci_dev_is_disconnected(dev))
  494. return PCIBIOS_DEVICE_NOT_FOUND;
  495. return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
  496. }
  497. EXPORT_SYMBOL(pci_write_config_word);
  498. int pci_write_config_dword(const struct pci_dev *dev, int where,
  499. u32 val)
  500. {
  501. if (pci_dev_is_disconnected(dev))
  502. return PCIBIOS_DEVICE_NOT_FOUND;
  503. return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
  504. }
  505. EXPORT_SYMBOL(pci_write_config_dword);