access.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/delay.h>
  3. #include <linux/pci.h>
  4. #include <linux/module.h>
  5. #include <linux/sched/signal.h>
  6. #include <linux/slab.h>
  7. #include <linux/ioport.h>
  8. #include <linux/wait.h>
  9. #include "pci.h"
  10. /*
  11. * This interrupt-safe spinlock protects all accesses to PCI
  12. * configuration space.
  13. */
  14. DEFINE_RAW_SPINLOCK(pci_lock);
  15. /*
  16. * Wrappers for all PCI configuration access functions. They just check
  17. * alignment, do locking and call the low-level functions pointed to
  18. * by pci_dev->ops.
  19. */
  20. #define PCI_byte_BAD 0
  21. #define PCI_word_BAD (pos & 1)
  22. #define PCI_dword_BAD (pos & 3)
  23. #ifdef CONFIG_PCI_LOCKLESS_CONFIG
  24. # define pci_lock_config(f) do { (void)(f); } while (0)
  25. # define pci_unlock_config(f) do { (void)(f); } while (0)
  26. #else
  27. # define pci_lock_config(f) raw_spin_lock_irqsave(&pci_lock, f)
  28. # define pci_unlock_config(f) raw_spin_unlock_irqrestore(&pci_lock, f)
  29. #endif
  30. #define PCI_OP_READ(size, type, len) \
  31. int pci_bus_read_config_##size \
  32. (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
  33. { \
  34. int res; \
  35. unsigned long flags; \
  36. u32 data = 0; \
  37. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  38. pci_lock_config(flags); \
  39. res = bus->ops->read(bus, devfn, pos, len, &data); \
  40. *value = (type)data; \
  41. pci_unlock_config(flags); \
  42. return res; \
  43. }
  44. #define PCI_OP_WRITE(size, type, len) \
  45. int pci_bus_write_config_##size \
  46. (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
  47. { \
  48. int res; \
  49. unsigned long flags; \
  50. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  51. pci_lock_config(flags); \
  52. res = bus->ops->write(bus, devfn, pos, len, value); \
  53. pci_unlock_config(flags); \
  54. return res; \
  55. }
  56. PCI_OP_READ(byte, u8, 1)
  57. PCI_OP_READ(word, u16, 2)
  58. PCI_OP_READ(dword, u32, 4)
  59. PCI_OP_WRITE(byte, u8, 1)
  60. PCI_OP_WRITE(word, u16, 2)
  61. PCI_OP_WRITE(dword, u32, 4)
  62. EXPORT_SYMBOL(pci_bus_read_config_byte);
  63. EXPORT_SYMBOL(pci_bus_read_config_word);
  64. EXPORT_SYMBOL(pci_bus_read_config_dword);
  65. EXPORT_SYMBOL(pci_bus_write_config_byte);
  66. EXPORT_SYMBOL(pci_bus_write_config_word);
  67. EXPORT_SYMBOL(pci_bus_write_config_dword);
  68. int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
  69. int where, int size, u32 *val)
  70. {
  71. void __iomem *addr;
  72. addr = bus->ops->map_bus(bus, devfn, where);
  73. if (!addr) {
  74. *val = ~0;
  75. return PCIBIOS_DEVICE_NOT_FOUND;
  76. }
  77. if (size == 1)
  78. *val = readb(addr);
  79. else if (size == 2)
  80. *val = readw(addr);
  81. else
  82. *val = readl(addr);
  83. return PCIBIOS_SUCCESSFUL;
  84. }
  85. EXPORT_SYMBOL_GPL(pci_generic_config_read);
  86. int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
  87. int where, int size, u32 val)
  88. {
  89. void __iomem *addr;
  90. addr = bus->ops->map_bus(bus, devfn, where);
  91. if (!addr)
  92. return PCIBIOS_DEVICE_NOT_FOUND;
  93. if (size == 1)
  94. writeb(val, addr);
  95. else if (size == 2)
  96. writew(val, addr);
  97. else
  98. writel(val, addr);
  99. return PCIBIOS_SUCCESSFUL;
  100. }
  101. EXPORT_SYMBOL_GPL(pci_generic_config_write);
  102. int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
  103. int where, int size, u32 *val)
  104. {
  105. void __iomem *addr;
  106. addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
  107. if (!addr) {
  108. *val = ~0;
  109. return PCIBIOS_DEVICE_NOT_FOUND;
  110. }
  111. *val = readl(addr);
  112. if (size <= 2)
  113. *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
  114. return PCIBIOS_SUCCESSFUL;
  115. }
  116. EXPORT_SYMBOL_GPL(pci_generic_config_read32);
  117. int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
  118. int where, int size, u32 val)
  119. {
  120. void __iomem *addr;
  121. u32 mask, tmp;
  122. addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
  123. if (!addr)
  124. return PCIBIOS_DEVICE_NOT_FOUND;
  125. if (size == 4) {
  126. writel(val, addr);
  127. return PCIBIOS_SUCCESSFUL;
  128. }
  129. /*
  130. * In general, hardware that supports only 32-bit writes on PCI is
  131. * not spec-compliant. For example, software may perform a 16-bit
  132. * write. If the hardware only supports 32-bit accesses, we must
  133. * do a 32-bit read, merge in the 16 bits we intend to write,
  134. * followed by a 32-bit write. If the 16 bits we *don't* intend to
  135. * write happen to have any RW1C (write-one-to-clear) bits set, we
  136. * just inadvertently cleared something we shouldn't have.
  137. */
  138. dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
  139. size, pci_domain_nr(bus), bus->number,
  140. PCI_SLOT(devfn), PCI_FUNC(devfn), where);
  141. mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
  142. tmp = readl(addr) & mask;
  143. tmp |= val << ((where & 0x3) * 8);
  144. writel(tmp, addr);
  145. return PCIBIOS_SUCCESSFUL;
  146. }
  147. EXPORT_SYMBOL_GPL(pci_generic_config_write32);
  148. /**
  149. * pci_bus_set_ops - Set raw operations of pci bus
  150. * @bus: pci bus struct
  151. * @ops: new raw operations
  152. *
  153. * Return previous raw operations
  154. */
  155. struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
  156. {
  157. struct pci_ops *old_ops;
  158. unsigned long flags;
  159. raw_spin_lock_irqsave(&pci_lock, flags);
  160. old_ops = bus->ops;
  161. bus->ops = ops;
  162. raw_spin_unlock_irqrestore(&pci_lock, flags);
  163. return old_ops;
  164. }
  165. EXPORT_SYMBOL(pci_bus_set_ops);
  166. /*
  167. * The following routines are to prevent the user from accessing PCI config
  168. * space when it's unsafe to do so. Some devices require this during BIST and
  169. * we're required to prevent it during D-state transitions.
  170. *
  171. * We have a bit per device to indicate it's blocked and a global wait queue
  172. * for callers to sleep on until devices are unblocked.
  173. */
  174. static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
  175. static noinline void pci_wait_cfg(struct pci_dev *dev)
  176. {
  177. DECLARE_WAITQUEUE(wait, current);
  178. __add_wait_queue(&pci_cfg_wait, &wait);
  179. do {
  180. set_current_state(TASK_UNINTERRUPTIBLE);
  181. raw_spin_unlock_irq(&pci_lock);
  182. schedule();
  183. raw_spin_lock_irq(&pci_lock);
  184. } while (dev->block_cfg_access);
  185. __remove_wait_queue(&pci_cfg_wait, &wait);
  186. }
  187. /* Returns 0 on success, negative values indicate error. */
  188. #define PCI_USER_READ_CONFIG(size, type) \
  189. int pci_user_read_config_##size \
  190. (struct pci_dev *dev, int pos, type *val) \
  191. { \
  192. int ret = PCIBIOS_SUCCESSFUL; \
  193. u32 data = -1; \
  194. if (PCI_##size##_BAD) \
  195. return -EINVAL; \
  196. raw_spin_lock_irq(&pci_lock); \
  197. if (unlikely(dev->block_cfg_access)) \
  198. pci_wait_cfg(dev); \
  199. ret = dev->bus->ops->read(dev->bus, dev->devfn, \
  200. pos, sizeof(type), &data); \
  201. raw_spin_unlock_irq(&pci_lock); \
  202. *val = (type)data; \
  203. return pcibios_err_to_errno(ret); \
  204. } \
  205. EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
  206. /* Returns 0 on success, negative values indicate error. */
  207. #define PCI_USER_WRITE_CONFIG(size, type) \
  208. int pci_user_write_config_##size \
  209. (struct pci_dev *dev, int pos, type val) \
  210. { \
  211. int ret = PCIBIOS_SUCCESSFUL; \
  212. if (PCI_##size##_BAD) \
  213. return -EINVAL; \
  214. raw_spin_lock_irq(&pci_lock); \
  215. if (unlikely(dev->block_cfg_access)) \
  216. pci_wait_cfg(dev); \
  217. ret = dev->bus->ops->write(dev->bus, dev->devfn, \
  218. pos, sizeof(type), val); \
  219. raw_spin_unlock_irq(&pci_lock); \
  220. return pcibios_err_to_errno(ret); \
  221. } \
  222. EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
  223. PCI_USER_READ_CONFIG(byte, u8)
  224. PCI_USER_READ_CONFIG(word, u16)
  225. PCI_USER_READ_CONFIG(dword, u32)
  226. PCI_USER_WRITE_CONFIG(byte, u8)
  227. PCI_USER_WRITE_CONFIG(word, u16)
  228. PCI_USER_WRITE_CONFIG(dword, u32)
  229. /* VPD access through PCI 2.2+ VPD capability */
  230. /**
  231. * pci_read_vpd - Read one entry from Vital Product Data
  232. * @dev: pci device struct
  233. * @pos: offset in vpd space
  234. * @count: number of bytes to read
  235. * @buf: pointer to where to store result
  236. */
  237. ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
  238. {
  239. if (!dev->vpd || !dev->vpd->ops)
  240. return -ENODEV;
  241. return dev->vpd->ops->read(dev, pos, count, buf);
  242. }
  243. EXPORT_SYMBOL(pci_read_vpd);
  244. /**
  245. * pci_write_vpd - Write entry to Vital Product Data
  246. * @dev: pci device struct
  247. * @pos: offset in vpd space
  248. * @count: number of bytes to write
  249. * @buf: buffer containing write data
  250. */
  251. ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
  252. {
  253. if (!dev->vpd || !dev->vpd->ops)
  254. return -ENODEV;
  255. return dev->vpd->ops->write(dev, pos, count, buf);
  256. }
  257. EXPORT_SYMBOL(pci_write_vpd);
  258. /**
  259. * pci_set_vpd_size - Set size of Vital Product Data space
  260. * @dev: pci device struct
  261. * @len: size of vpd space
  262. */
  263. int pci_set_vpd_size(struct pci_dev *dev, size_t len)
  264. {
  265. if (!dev->vpd || !dev->vpd->ops)
  266. return -ENODEV;
  267. return dev->vpd->ops->set_size(dev, len);
  268. }
  269. EXPORT_SYMBOL(pci_set_vpd_size);
  270. #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
  271. /**
  272. * pci_vpd_size - determine actual size of Vital Product Data
  273. * @dev: pci device struct
  274. * @old_size: current assumed size, also maximum allowed size
  275. */
  276. static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
  277. {
  278. size_t off = 0;
  279. unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
  280. while (off < old_size &&
  281. pci_read_vpd(dev, off, 1, header) == 1) {
  282. unsigned char tag;
  283. if (header[0] & PCI_VPD_LRDT) {
  284. /* Large Resource Data Type Tag */
  285. tag = pci_vpd_lrdt_tag(header);
  286. /* Only read length from known tag items */
  287. if ((tag == PCI_VPD_LTIN_ID_STRING) ||
  288. (tag == PCI_VPD_LTIN_RO_DATA) ||
  289. (tag == PCI_VPD_LTIN_RW_DATA)) {
  290. if (pci_read_vpd(dev, off+1, 2,
  291. &header[1]) != 2) {
  292. pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
  293. tag, off + 1);
  294. return 0;
  295. }
  296. off += PCI_VPD_LRDT_TAG_SIZE +
  297. pci_vpd_lrdt_size(header);
  298. }
  299. } else {
  300. /* Short Resource Data Type Tag */
  301. off += PCI_VPD_SRDT_TAG_SIZE +
  302. pci_vpd_srdt_size(header);
  303. tag = pci_vpd_srdt_tag(header);
  304. }
  305. if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
  306. return off;
  307. if ((tag != PCI_VPD_LTIN_ID_STRING) &&
  308. (tag != PCI_VPD_LTIN_RO_DATA) &&
  309. (tag != PCI_VPD_LTIN_RW_DATA)) {
  310. pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
  311. (header[0] & PCI_VPD_LRDT) ? "large" : "short",
  312. tag, off);
  313. return 0;
  314. }
  315. }
  316. return 0;
  317. }
  318. /*
  319. * Wait for last operation to complete.
  320. * This code has to spin since there is no other notification from the PCI
  321. * hardware. Since the VPD is often implemented by serial attachment to an
  322. * EEPROM, it may take many milliseconds to complete.
  323. *
  324. * Returns 0 on success, negative values indicate error.
  325. */
  326. static int pci_vpd_wait(struct pci_dev *dev)
  327. {
  328. struct pci_vpd *vpd = dev->vpd;
  329. unsigned long timeout = jiffies + msecs_to_jiffies(125);
  330. unsigned long max_sleep = 16;
  331. u16 status;
  332. int ret;
  333. if (!vpd->busy)
  334. return 0;
  335. while (time_before(jiffies, timeout)) {
  336. ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
  337. &status);
  338. if (ret < 0)
  339. return ret;
  340. if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
  341. vpd->busy = 0;
  342. return 0;
  343. }
  344. if (fatal_signal_pending(current))
  345. return -EINTR;
  346. usleep_range(10, max_sleep);
  347. if (max_sleep < 1024)
  348. max_sleep *= 2;
  349. }
  350. pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
  351. return -ETIMEDOUT;
  352. }
  353. static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
  354. void *arg)
  355. {
  356. struct pci_vpd *vpd = dev->vpd;
  357. int ret;
  358. loff_t end = pos + count;
  359. u8 *buf = arg;
  360. if (pos < 0)
  361. return -EINVAL;
  362. if (!vpd->valid) {
  363. vpd->valid = 1;
  364. vpd->len = pci_vpd_size(dev, vpd->len);
  365. }
  366. if (vpd->len == 0)
  367. return -EIO;
  368. if (pos > vpd->len)
  369. return 0;
  370. if (end > vpd->len) {
  371. end = vpd->len;
  372. count = end - pos;
  373. }
  374. if (mutex_lock_killable(&vpd->lock))
  375. return -EINTR;
  376. ret = pci_vpd_wait(dev);
  377. if (ret < 0)
  378. goto out;
  379. while (pos < end) {
  380. u32 val;
  381. unsigned int i, skip;
  382. ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
  383. pos & ~3);
  384. if (ret < 0)
  385. break;
  386. vpd->busy = 1;
  387. vpd->flag = PCI_VPD_ADDR_F;
  388. ret = pci_vpd_wait(dev);
  389. if (ret < 0)
  390. break;
  391. ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
  392. if (ret < 0)
  393. break;
  394. skip = pos & 3;
  395. for (i = 0; i < sizeof(u32); i++) {
  396. if (i >= skip) {
  397. *buf++ = val;
  398. if (++pos == end)
  399. break;
  400. }
  401. val >>= 8;
  402. }
  403. }
  404. out:
  405. mutex_unlock(&vpd->lock);
  406. return ret ? ret : count;
  407. }
  408. static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
  409. const void *arg)
  410. {
  411. struct pci_vpd *vpd = dev->vpd;
  412. const u8 *buf = arg;
  413. loff_t end = pos + count;
  414. int ret = 0;
  415. if (pos < 0 || (pos & 3) || (count & 3))
  416. return -EINVAL;
  417. if (!vpd->valid) {
  418. vpd->valid = 1;
  419. vpd->len = pci_vpd_size(dev, vpd->len);
  420. }
  421. if (vpd->len == 0)
  422. return -EIO;
  423. if (end > vpd->len)
  424. return -EINVAL;
  425. if (mutex_lock_killable(&vpd->lock))
  426. return -EINTR;
  427. ret = pci_vpd_wait(dev);
  428. if (ret < 0)
  429. goto out;
  430. while (pos < end) {
  431. u32 val;
  432. val = *buf++;
  433. val |= *buf++ << 8;
  434. val |= *buf++ << 16;
  435. val |= *buf++ << 24;
  436. ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
  437. if (ret < 0)
  438. break;
  439. ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
  440. pos | PCI_VPD_ADDR_F);
  441. if (ret < 0)
  442. break;
  443. vpd->busy = 1;
  444. vpd->flag = 0;
  445. ret = pci_vpd_wait(dev);
  446. if (ret < 0)
  447. break;
  448. pos += sizeof(u32);
  449. }
  450. out:
  451. mutex_unlock(&vpd->lock);
  452. return ret ? ret : count;
  453. }
  454. static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
  455. {
  456. struct pci_vpd *vpd = dev->vpd;
  457. if (len == 0 || len > PCI_VPD_MAX_SIZE)
  458. return -EIO;
  459. vpd->valid = 1;
  460. vpd->len = len;
  461. return 0;
  462. }
  463. static const struct pci_vpd_ops pci_vpd_ops = {
  464. .read = pci_vpd_read,
  465. .write = pci_vpd_write,
  466. .set_size = pci_vpd_set_size,
  467. };
  468. static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
  469. void *arg)
  470. {
  471. struct pci_dev *tdev = pci_get_slot(dev->bus,
  472. PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
  473. ssize_t ret;
  474. if (!tdev)
  475. return -ENODEV;
  476. ret = pci_read_vpd(tdev, pos, count, arg);
  477. pci_dev_put(tdev);
  478. return ret;
  479. }
  480. static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
  481. const void *arg)
  482. {
  483. struct pci_dev *tdev = pci_get_slot(dev->bus,
  484. PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
  485. ssize_t ret;
  486. if (!tdev)
  487. return -ENODEV;
  488. ret = pci_write_vpd(tdev, pos, count, arg);
  489. pci_dev_put(tdev);
  490. return ret;
  491. }
  492. static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
  493. {
  494. struct pci_dev *tdev = pci_get_slot(dev->bus,
  495. PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
  496. int ret;
  497. if (!tdev)
  498. return -ENODEV;
  499. ret = pci_set_vpd_size(tdev, len);
  500. pci_dev_put(tdev);
  501. return ret;
  502. }
  503. static const struct pci_vpd_ops pci_vpd_f0_ops = {
  504. .read = pci_vpd_f0_read,
  505. .write = pci_vpd_f0_write,
  506. .set_size = pci_vpd_f0_set_size,
  507. };
  508. int pci_vpd_init(struct pci_dev *dev)
  509. {
  510. struct pci_vpd *vpd;
  511. u8 cap;
  512. cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
  513. if (!cap)
  514. return -ENODEV;
  515. vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
  516. if (!vpd)
  517. return -ENOMEM;
  518. vpd->len = PCI_VPD_MAX_SIZE;
  519. if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
  520. vpd->ops = &pci_vpd_f0_ops;
  521. else
  522. vpd->ops = &pci_vpd_ops;
  523. mutex_init(&vpd->lock);
  524. vpd->cap = cap;
  525. vpd->busy = 0;
  526. vpd->valid = 0;
  527. dev->vpd = vpd;
  528. return 0;
  529. }
  530. void pci_vpd_release(struct pci_dev *dev)
  531. {
  532. kfree(dev->vpd);
  533. }
  534. /**
  535. * pci_cfg_access_lock - Lock PCI config reads/writes
  536. * @dev: pci device struct
  537. *
  538. * When access is locked, any userspace reads or writes to config
  539. * space and concurrent lock requests will sleep until access is
  540. * allowed via pci_cfg_access_unlock() again.
  541. */
  542. void pci_cfg_access_lock(struct pci_dev *dev)
  543. {
  544. might_sleep();
  545. raw_spin_lock_irq(&pci_lock);
  546. if (dev->block_cfg_access)
  547. pci_wait_cfg(dev);
  548. dev->block_cfg_access = 1;
  549. raw_spin_unlock_irq(&pci_lock);
  550. }
  551. EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
  552. /**
  553. * pci_cfg_access_trylock - try to lock PCI config reads/writes
  554. * @dev: pci device struct
  555. *
  556. * Same as pci_cfg_access_lock, but will return 0 if access is
  557. * already locked, 1 otherwise. This function can be used from
  558. * atomic contexts.
  559. */
  560. bool pci_cfg_access_trylock(struct pci_dev *dev)
  561. {
  562. unsigned long flags;
  563. bool locked = true;
  564. raw_spin_lock_irqsave(&pci_lock, flags);
  565. if (dev->block_cfg_access)
  566. locked = false;
  567. else
  568. dev->block_cfg_access = 1;
  569. raw_spin_unlock_irqrestore(&pci_lock, flags);
  570. return locked;
  571. }
  572. EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
  573. /**
  574. * pci_cfg_access_unlock - Unlock PCI config reads/writes
  575. * @dev: pci device struct
  576. *
  577. * This function allows PCI config accesses to resume.
  578. */
  579. void pci_cfg_access_unlock(struct pci_dev *dev)
  580. {
  581. unsigned long flags;
  582. raw_spin_lock_irqsave(&pci_lock, flags);
  583. /* This indicates a problem in the caller, but we don't need
  584. * to kill them, unlike a double-block above. */
  585. WARN_ON(!dev->block_cfg_access);
  586. dev->block_cfg_access = 0;
  587. raw_spin_unlock_irqrestore(&pci_lock, flags);
  588. wake_up_all(&pci_cfg_wait);
  589. }
  590. EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
  591. static inline int pcie_cap_version(const struct pci_dev *dev)
  592. {
  593. return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
  594. }
  595. static bool pcie_downstream_port(const struct pci_dev *dev)
  596. {
  597. int type = pci_pcie_type(dev);
  598. return type == PCI_EXP_TYPE_ROOT_PORT ||
  599. type == PCI_EXP_TYPE_DOWNSTREAM ||
  600. type == PCI_EXP_TYPE_PCIE_BRIDGE;
  601. }
  602. bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
  603. {
  604. int type = pci_pcie_type(dev);
  605. return type == PCI_EXP_TYPE_ENDPOINT ||
  606. type == PCI_EXP_TYPE_LEG_END ||
  607. type == PCI_EXP_TYPE_ROOT_PORT ||
  608. type == PCI_EXP_TYPE_UPSTREAM ||
  609. type == PCI_EXP_TYPE_DOWNSTREAM ||
  610. type == PCI_EXP_TYPE_PCI_BRIDGE ||
  611. type == PCI_EXP_TYPE_PCIE_BRIDGE;
  612. }
  613. static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
  614. {
  615. return pcie_downstream_port(dev) &&
  616. pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
  617. }
  618. static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
  619. {
  620. int type = pci_pcie_type(dev);
  621. return type == PCI_EXP_TYPE_ROOT_PORT ||
  622. type == PCI_EXP_TYPE_RC_EC;
  623. }
  624. static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
  625. {
  626. if (!pci_is_pcie(dev))
  627. return false;
  628. switch (pos) {
  629. case PCI_EXP_FLAGS:
  630. return true;
  631. case PCI_EXP_DEVCAP:
  632. case PCI_EXP_DEVCTL:
  633. case PCI_EXP_DEVSTA:
  634. return true;
  635. case PCI_EXP_LNKCAP:
  636. case PCI_EXP_LNKCTL:
  637. case PCI_EXP_LNKSTA:
  638. return pcie_cap_has_lnkctl(dev);
  639. case PCI_EXP_SLTCAP:
  640. case PCI_EXP_SLTCTL:
  641. case PCI_EXP_SLTSTA:
  642. return pcie_cap_has_sltctl(dev);
  643. case PCI_EXP_RTCTL:
  644. case PCI_EXP_RTCAP:
  645. case PCI_EXP_RTSTA:
  646. return pcie_cap_has_rtctl(dev);
  647. case PCI_EXP_DEVCAP2:
  648. case PCI_EXP_DEVCTL2:
  649. case PCI_EXP_LNKCAP2:
  650. case PCI_EXP_LNKCTL2:
  651. case PCI_EXP_LNKSTA2:
  652. return pcie_cap_version(dev) > 1;
  653. default:
  654. return false;
  655. }
  656. }
  657. /*
  658. * Note that these accessor functions are only for the "PCI Express
  659. * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
  660. * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
  661. */
  662. int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
  663. {
  664. int ret;
  665. *val = 0;
  666. if (pos & 1)
  667. return -EINVAL;
  668. if (pcie_capability_reg_implemented(dev, pos)) {
  669. ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
  670. /*
  671. * Reset *val to 0 if pci_read_config_word() fails, it may
  672. * have been written as 0xFFFF if hardware error happens
  673. * during pci_read_config_word().
  674. */
  675. if (ret)
  676. *val = 0;
  677. return ret;
  678. }
  679. /*
  680. * For Functions that do not implement the Slot Capabilities,
  681. * Slot Status, and Slot Control registers, these spaces must
  682. * be hardwired to 0b, with the exception of the Presence Detect
  683. * State bit in the Slot Status register of Downstream Ports,
  684. * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
  685. */
  686. if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
  687. pos == PCI_EXP_SLTSTA)
  688. *val = PCI_EXP_SLTSTA_PDS;
  689. return 0;
  690. }
  691. EXPORT_SYMBOL(pcie_capability_read_word);
  692. int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
  693. {
  694. int ret;
  695. *val = 0;
  696. if (pos & 3)
  697. return -EINVAL;
  698. if (pcie_capability_reg_implemented(dev, pos)) {
  699. ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
  700. /*
  701. * Reset *val to 0 if pci_read_config_dword() fails, it may
  702. * have been written as 0xFFFFFFFF if hardware error happens
  703. * during pci_read_config_dword().
  704. */
  705. if (ret)
  706. *val = 0;
  707. return ret;
  708. }
  709. if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
  710. pos == PCI_EXP_SLTSTA)
  711. *val = PCI_EXP_SLTSTA_PDS;
  712. return 0;
  713. }
  714. EXPORT_SYMBOL(pcie_capability_read_dword);
  715. int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
  716. {
  717. if (pos & 1)
  718. return -EINVAL;
  719. if (!pcie_capability_reg_implemented(dev, pos))
  720. return 0;
  721. return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
  722. }
  723. EXPORT_SYMBOL(pcie_capability_write_word);
  724. int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
  725. {
  726. if (pos & 3)
  727. return -EINVAL;
  728. if (!pcie_capability_reg_implemented(dev, pos))
  729. return 0;
  730. return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
  731. }
  732. EXPORT_SYMBOL(pcie_capability_write_dword);
  733. int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
  734. u16 clear, u16 set)
  735. {
  736. int ret;
  737. u16 val;
  738. ret = pcie_capability_read_word(dev, pos, &val);
  739. if (!ret) {
  740. val &= ~clear;
  741. val |= set;
  742. ret = pcie_capability_write_word(dev, pos, val);
  743. }
  744. return ret;
  745. }
  746. EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
  747. int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
  748. u32 clear, u32 set)
  749. {
  750. int ret;
  751. u32 val;
  752. ret = pcie_capability_read_dword(dev, pos, &val);
  753. if (!ret) {
  754. val &= ~clear;
  755. val |= set;
  756. ret = pcie_capability_write_dword(dev, pos, val);
  757. }
  758. return ret;
  759. }
  760. EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
  761. int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
  762. {
  763. if (pci_dev_is_disconnected(dev)) {
  764. *val = ~0;
  765. return PCIBIOS_DEVICE_NOT_FOUND;
  766. }
  767. return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
  768. }
  769. EXPORT_SYMBOL(pci_read_config_byte);
  770. int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
  771. {
  772. if (pci_dev_is_disconnected(dev)) {
  773. *val = ~0;
  774. return PCIBIOS_DEVICE_NOT_FOUND;
  775. }
  776. return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
  777. }
  778. EXPORT_SYMBOL(pci_read_config_word);
  779. int pci_read_config_dword(const struct pci_dev *dev, int where,
  780. u32 *val)
  781. {
  782. if (pci_dev_is_disconnected(dev)) {
  783. *val = ~0;
  784. return PCIBIOS_DEVICE_NOT_FOUND;
  785. }
  786. return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
  787. }
  788. EXPORT_SYMBOL(pci_read_config_dword);
  789. int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
  790. {
  791. if (pci_dev_is_disconnected(dev))
  792. return PCIBIOS_DEVICE_NOT_FOUND;
  793. return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
  794. }
  795. EXPORT_SYMBOL(pci_write_config_byte);
  796. int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
  797. {
  798. if (pci_dev_is_disconnected(dev))
  799. return PCIBIOS_DEVICE_NOT_FOUND;
  800. return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
  801. }
  802. EXPORT_SYMBOL(pci_write_config_word);
  803. int pci_write_config_dword(const struct pci_dev *dev, int where,
  804. u32 val)
  805. {
  806. if (pci_dev_is_disconnected(dev))
  807. return PCIBIOS_DEVICE_NOT_FOUND;
  808. return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
  809. }
  810. EXPORT_SYMBOL(pci_write_config_dword);