access.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957
  1. #include <linux/delay.h>
  2. #include <linux/pci.h>
  3. #include <linux/module.h>
  4. #include <linux/sched/signal.h>
  5. #include <linux/slab.h>
  6. #include <linux/ioport.h>
  7. #include <linux/wait.h>
  8. #include "pci.h"
  9. /*
  10. * This interrupt-safe spinlock protects all accesses to PCI
  11. * configuration space.
  12. */
  13. DEFINE_RAW_SPINLOCK(pci_lock);
  14. /*
  15. * Wrappers for all PCI configuration access functions. They just check
  16. * alignment, do locking and call the low-level functions pointed to
  17. * by pci_dev->ops.
  18. */
  19. #define PCI_byte_BAD 0
  20. #define PCI_word_BAD (pos & 1)
  21. #define PCI_dword_BAD (pos & 3)
  22. #ifdef CONFIG_PCI_LOCKLESS_CONFIG
  23. # define pci_lock_config(f) do { (void)(f); } while (0)
  24. # define pci_unlock_config(f) do { (void)(f); } while (0)
  25. #else
  26. # define pci_lock_config(f) raw_spin_lock_irqsave(&pci_lock, f)
  27. # define pci_unlock_config(f) raw_spin_unlock_irqrestore(&pci_lock, f)
  28. #endif
  29. #define PCI_OP_READ(size, type, len) \
  30. int pci_bus_read_config_##size \
  31. (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
  32. { \
  33. int res; \
  34. unsigned long flags; \
  35. u32 data = 0; \
  36. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  37. pci_lock_config(flags); \
  38. res = bus->ops->read(bus, devfn, pos, len, &data); \
  39. *value = (type)data; \
  40. pci_unlock_config(flags); \
  41. return res; \
  42. }
  43. #define PCI_OP_WRITE(size, type, len) \
  44. int pci_bus_write_config_##size \
  45. (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
  46. { \
  47. int res; \
  48. unsigned long flags; \
  49. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  50. pci_lock_config(flags); \
  51. res = bus->ops->write(bus, devfn, pos, len, value); \
  52. pci_unlock_config(flags); \
  53. return res; \
  54. }
  55. PCI_OP_READ(byte, u8, 1)
  56. PCI_OP_READ(word, u16, 2)
  57. PCI_OP_READ(dword, u32, 4)
  58. PCI_OP_WRITE(byte, u8, 1)
  59. PCI_OP_WRITE(word, u16, 2)
  60. PCI_OP_WRITE(dword, u32, 4)
  61. EXPORT_SYMBOL(pci_bus_read_config_byte);
  62. EXPORT_SYMBOL(pci_bus_read_config_word);
  63. EXPORT_SYMBOL(pci_bus_read_config_dword);
  64. EXPORT_SYMBOL(pci_bus_write_config_byte);
  65. EXPORT_SYMBOL(pci_bus_write_config_word);
  66. EXPORT_SYMBOL(pci_bus_write_config_dword);
  67. int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
  68. int where, int size, u32 *val)
  69. {
  70. void __iomem *addr;
  71. addr = bus->ops->map_bus(bus, devfn, where);
  72. if (!addr) {
  73. *val = ~0;
  74. return PCIBIOS_DEVICE_NOT_FOUND;
  75. }
  76. if (size == 1)
  77. *val = readb(addr);
  78. else if (size == 2)
  79. *val = readw(addr);
  80. else
  81. *val = readl(addr);
  82. return PCIBIOS_SUCCESSFUL;
  83. }
  84. EXPORT_SYMBOL_GPL(pci_generic_config_read);
  85. int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
  86. int where, int size, u32 val)
  87. {
  88. void __iomem *addr;
  89. addr = bus->ops->map_bus(bus, devfn, where);
  90. if (!addr)
  91. return PCIBIOS_DEVICE_NOT_FOUND;
  92. if (size == 1)
  93. writeb(val, addr);
  94. else if (size == 2)
  95. writew(val, addr);
  96. else
  97. writel(val, addr);
  98. return PCIBIOS_SUCCESSFUL;
  99. }
  100. EXPORT_SYMBOL_GPL(pci_generic_config_write);
  101. int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
  102. int where, int size, u32 *val)
  103. {
  104. void __iomem *addr;
  105. addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
  106. if (!addr) {
  107. *val = ~0;
  108. return PCIBIOS_DEVICE_NOT_FOUND;
  109. }
  110. *val = readl(addr);
  111. if (size <= 2)
  112. *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
  113. return PCIBIOS_SUCCESSFUL;
  114. }
  115. EXPORT_SYMBOL_GPL(pci_generic_config_read32);
  116. int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
  117. int where, int size, u32 val)
  118. {
  119. void __iomem *addr;
  120. u32 mask, tmp;
  121. addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
  122. if (!addr)
  123. return PCIBIOS_DEVICE_NOT_FOUND;
  124. if (size == 4) {
  125. writel(val, addr);
  126. return PCIBIOS_SUCCESSFUL;
  127. }
  128. /*
  129. * In general, hardware that supports only 32-bit writes on PCI is
  130. * not spec-compliant. For example, software may perform a 16-bit
  131. * write. If the hardware only supports 32-bit accesses, we must
  132. * do a 32-bit read, merge in the 16 bits we intend to write,
  133. * followed by a 32-bit write. If the 16 bits we *don't* intend to
  134. * write happen to have any RW1C (write-one-to-clear) bits set, we
  135. * just inadvertently cleared something we shouldn't have.
  136. */
  137. dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
  138. size, pci_domain_nr(bus), bus->number,
  139. PCI_SLOT(devfn), PCI_FUNC(devfn), where);
  140. mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
  141. tmp = readl(addr) & mask;
  142. tmp |= val << ((where & 0x3) * 8);
  143. writel(tmp, addr);
  144. return PCIBIOS_SUCCESSFUL;
  145. }
  146. EXPORT_SYMBOL_GPL(pci_generic_config_write32);
  147. /**
  148. * pci_bus_set_ops - Set raw operations of pci bus
  149. * @bus: pci bus struct
  150. * @ops: new raw operations
  151. *
  152. * Return previous raw operations
  153. */
  154. struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
  155. {
  156. struct pci_ops *old_ops;
  157. unsigned long flags;
  158. raw_spin_lock_irqsave(&pci_lock, flags);
  159. old_ops = bus->ops;
  160. bus->ops = ops;
  161. raw_spin_unlock_irqrestore(&pci_lock, flags);
  162. return old_ops;
  163. }
  164. EXPORT_SYMBOL(pci_bus_set_ops);
  165. /*
  166. * The following routines are to prevent the user from accessing PCI config
  167. * space when it's unsafe to do so. Some devices require this during BIST and
  168. * we're required to prevent it during D-state transitions.
  169. *
  170. * We have a bit per device to indicate it's blocked and a global wait queue
  171. * for callers to sleep on until devices are unblocked.
  172. */
  173. static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
  174. static noinline void pci_wait_cfg(struct pci_dev *dev)
  175. {
  176. DECLARE_WAITQUEUE(wait, current);
  177. __add_wait_queue(&pci_cfg_wait, &wait);
  178. do {
  179. set_current_state(TASK_UNINTERRUPTIBLE);
  180. raw_spin_unlock_irq(&pci_lock);
  181. schedule();
  182. raw_spin_lock_irq(&pci_lock);
  183. } while (dev->block_cfg_access);
  184. __remove_wait_queue(&pci_cfg_wait, &wait);
  185. }
  186. /* Returns 0 on success, negative values indicate error. */
  187. #define PCI_USER_READ_CONFIG(size, type) \
  188. int pci_user_read_config_##size \
  189. (struct pci_dev *dev, int pos, type *val) \
  190. { \
  191. int ret = PCIBIOS_SUCCESSFUL; \
  192. u32 data = -1; \
  193. if (PCI_##size##_BAD) \
  194. return -EINVAL; \
  195. raw_spin_lock_irq(&pci_lock); \
  196. if (unlikely(dev->block_cfg_access)) \
  197. pci_wait_cfg(dev); \
  198. ret = dev->bus->ops->read(dev->bus, dev->devfn, \
  199. pos, sizeof(type), &data); \
  200. raw_spin_unlock_irq(&pci_lock); \
  201. *val = (type)data; \
  202. return pcibios_err_to_errno(ret); \
  203. } \
  204. EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
  205. /* Returns 0 on success, negative values indicate error. */
  206. #define PCI_USER_WRITE_CONFIG(size, type) \
  207. int pci_user_write_config_##size \
  208. (struct pci_dev *dev, int pos, type val) \
  209. { \
  210. int ret = PCIBIOS_SUCCESSFUL; \
  211. if (PCI_##size##_BAD) \
  212. return -EINVAL; \
  213. raw_spin_lock_irq(&pci_lock); \
  214. if (unlikely(dev->block_cfg_access)) \
  215. pci_wait_cfg(dev); \
  216. ret = dev->bus->ops->write(dev->bus, dev->devfn, \
  217. pos, sizeof(type), val); \
  218. raw_spin_unlock_irq(&pci_lock); \
  219. return pcibios_err_to_errno(ret); \
  220. } \
  221. EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
  222. PCI_USER_READ_CONFIG(byte, u8)
  223. PCI_USER_READ_CONFIG(word, u16)
  224. PCI_USER_READ_CONFIG(dword, u32)
  225. PCI_USER_WRITE_CONFIG(byte, u8)
  226. PCI_USER_WRITE_CONFIG(word, u16)
  227. PCI_USER_WRITE_CONFIG(dword, u32)
  228. /* VPD access through PCI 2.2+ VPD capability */
  229. /**
  230. * pci_read_vpd - Read one entry from Vital Product Data
  231. * @dev: pci device struct
  232. * @pos: offset in vpd space
  233. * @count: number of bytes to read
  234. * @buf: pointer to where to store result
  235. */
  236. ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
  237. {
  238. if (!dev->vpd || !dev->vpd->ops)
  239. return -ENODEV;
  240. return dev->vpd->ops->read(dev, pos, count, buf);
  241. }
  242. EXPORT_SYMBOL(pci_read_vpd);
  243. /**
  244. * pci_write_vpd - Write entry to Vital Product Data
  245. * @dev: pci device struct
  246. * @pos: offset in vpd space
  247. * @count: number of bytes to write
  248. * @buf: buffer containing write data
  249. */
  250. ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
  251. {
  252. if (!dev->vpd || !dev->vpd->ops)
  253. return -ENODEV;
  254. return dev->vpd->ops->write(dev, pos, count, buf);
  255. }
  256. EXPORT_SYMBOL(pci_write_vpd);
  257. /**
  258. * pci_set_vpd_size - Set size of Vital Product Data space
  259. * @dev: pci device struct
  260. * @len: size of vpd space
  261. */
  262. int pci_set_vpd_size(struct pci_dev *dev, size_t len)
  263. {
  264. if (!dev->vpd || !dev->vpd->ops)
  265. return -ENODEV;
  266. return dev->vpd->ops->set_size(dev, len);
  267. }
  268. EXPORT_SYMBOL(pci_set_vpd_size);
  269. #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
  270. /**
  271. * pci_vpd_size - determine actual size of Vital Product Data
  272. * @dev: pci device struct
  273. * @old_size: current assumed size, also maximum allowed size
  274. */
  275. static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
  276. {
  277. size_t off = 0;
  278. unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
  279. while (off < old_size &&
  280. pci_read_vpd(dev, off, 1, header) == 1) {
  281. unsigned char tag;
  282. if (header[0] & PCI_VPD_LRDT) {
  283. /* Large Resource Data Type Tag */
  284. tag = pci_vpd_lrdt_tag(header);
  285. /* Only read length from known tag items */
  286. if ((tag == PCI_VPD_LTIN_ID_STRING) ||
  287. (tag == PCI_VPD_LTIN_RO_DATA) ||
  288. (tag == PCI_VPD_LTIN_RW_DATA)) {
  289. if (pci_read_vpd(dev, off+1, 2,
  290. &header[1]) != 2) {
  291. dev_warn(&dev->dev,
  292. "invalid large VPD tag %02x size at offset %zu",
  293. tag, off + 1);
  294. return 0;
  295. }
  296. off += PCI_VPD_LRDT_TAG_SIZE +
  297. pci_vpd_lrdt_size(header);
  298. }
  299. } else {
  300. /* Short Resource Data Type Tag */
  301. off += PCI_VPD_SRDT_TAG_SIZE +
  302. pci_vpd_srdt_size(header);
  303. tag = pci_vpd_srdt_tag(header);
  304. }
  305. if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
  306. return off;
  307. if ((tag != PCI_VPD_LTIN_ID_STRING) &&
  308. (tag != PCI_VPD_LTIN_RO_DATA) &&
  309. (tag != PCI_VPD_LTIN_RW_DATA)) {
  310. dev_warn(&dev->dev,
  311. "invalid %s VPD tag %02x at offset %zu",
  312. (header[0] & PCI_VPD_LRDT) ? "large" : "short",
  313. tag, off);
  314. return 0;
  315. }
  316. }
  317. return 0;
  318. }
  319. /*
  320. * Wait for last operation to complete.
  321. * This code has to spin since there is no other notification from the PCI
  322. * hardware. Since the VPD is often implemented by serial attachment to an
  323. * EEPROM, it may take many milliseconds to complete.
  324. *
  325. * Returns 0 on success, negative values indicate error.
  326. */
  327. static int pci_vpd_wait(struct pci_dev *dev)
  328. {
  329. struct pci_vpd *vpd = dev->vpd;
  330. unsigned long timeout = jiffies + msecs_to_jiffies(125);
  331. unsigned long max_sleep = 16;
  332. u16 status;
  333. int ret;
  334. if (!vpd->busy)
  335. return 0;
  336. while (time_before(jiffies, timeout)) {
  337. ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
  338. &status);
  339. if (ret < 0)
  340. return ret;
  341. if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
  342. vpd->busy = 0;
  343. return 0;
  344. }
  345. if (fatal_signal_pending(current))
  346. return -EINTR;
  347. usleep_range(10, max_sleep);
  348. if (max_sleep < 1024)
  349. max_sleep *= 2;
  350. }
  351. dev_warn(&dev->dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
  352. return -ETIMEDOUT;
  353. }
  354. static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
  355. void *arg)
  356. {
  357. struct pci_vpd *vpd = dev->vpd;
  358. int ret;
  359. loff_t end = pos + count;
  360. u8 *buf = arg;
  361. if (pos < 0)
  362. return -EINVAL;
  363. if (!vpd->valid) {
  364. vpd->valid = 1;
  365. vpd->len = pci_vpd_size(dev, vpd->len);
  366. }
  367. if (vpd->len == 0)
  368. return -EIO;
  369. if (pos > vpd->len)
  370. return 0;
  371. if (end > vpd->len) {
  372. end = vpd->len;
  373. count = end - pos;
  374. }
  375. if (mutex_lock_killable(&vpd->lock))
  376. return -EINTR;
  377. ret = pci_vpd_wait(dev);
  378. if (ret < 0)
  379. goto out;
  380. while (pos < end) {
  381. u32 val;
  382. unsigned int i, skip;
  383. ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
  384. pos & ~3);
  385. if (ret < 0)
  386. break;
  387. vpd->busy = 1;
  388. vpd->flag = PCI_VPD_ADDR_F;
  389. ret = pci_vpd_wait(dev);
  390. if (ret < 0)
  391. break;
  392. ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
  393. if (ret < 0)
  394. break;
  395. skip = pos & 3;
  396. for (i = 0; i < sizeof(u32); i++) {
  397. if (i >= skip) {
  398. *buf++ = val;
  399. if (++pos == end)
  400. break;
  401. }
  402. val >>= 8;
  403. }
  404. }
  405. out:
  406. mutex_unlock(&vpd->lock);
  407. return ret ? ret : count;
  408. }
  409. static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
  410. const void *arg)
  411. {
  412. struct pci_vpd *vpd = dev->vpd;
  413. const u8 *buf = arg;
  414. loff_t end = pos + count;
  415. int ret = 0;
  416. if (pos < 0 || (pos & 3) || (count & 3))
  417. return -EINVAL;
  418. if (!vpd->valid) {
  419. vpd->valid = 1;
  420. vpd->len = pci_vpd_size(dev, vpd->len);
  421. }
  422. if (vpd->len == 0)
  423. return -EIO;
  424. if (end > vpd->len)
  425. return -EINVAL;
  426. if (mutex_lock_killable(&vpd->lock))
  427. return -EINTR;
  428. ret = pci_vpd_wait(dev);
  429. if (ret < 0)
  430. goto out;
  431. while (pos < end) {
  432. u32 val;
  433. val = *buf++;
  434. val |= *buf++ << 8;
  435. val |= *buf++ << 16;
  436. val |= *buf++ << 24;
  437. ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
  438. if (ret < 0)
  439. break;
  440. ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
  441. pos | PCI_VPD_ADDR_F);
  442. if (ret < 0)
  443. break;
  444. vpd->busy = 1;
  445. vpd->flag = 0;
  446. ret = pci_vpd_wait(dev);
  447. if (ret < 0)
  448. break;
  449. pos += sizeof(u32);
  450. }
  451. out:
  452. mutex_unlock(&vpd->lock);
  453. return ret ? ret : count;
  454. }
  455. static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
  456. {
  457. struct pci_vpd *vpd = dev->vpd;
  458. if (len == 0 || len > PCI_VPD_MAX_SIZE)
  459. return -EIO;
  460. vpd->valid = 1;
  461. vpd->len = len;
  462. return 0;
  463. }
  464. static const struct pci_vpd_ops pci_vpd_ops = {
  465. .read = pci_vpd_read,
  466. .write = pci_vpd_write,
  467. .set_size = pci_vpd_set_size,
  468. };
  469. static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
  470. void *arg)
  471. {
  472. struct pci_dev *tdev = pci_get_slot(dev->bus,
  473. PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
  474. ssize_t ret;
  475. if (!tdev)
  476. return -ENODEV;
  477. ret = pci_read_vpd(tdev, pos, count, arg);
  478. pci_dev_put(tdev);
  479. return ret;
  480. }
  481. static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
  482. const void *arg)
  483. {
  484. struct pci_dev *tdev = pci_get_slot(dev->bus,
  485. PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
  486. ssize_t ret;
  487. if (!tdev)
  488. return -ENODEV;
  489. ret = pci_write_vpd(tdev, pos, count, arg);
  490. pci_dev_put(tdev);
  491. return ret;
  492. }
  493. static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
  494. {
  495. struct pci_dev *tdev = pci_get_slot(dev->bus,
  496. PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
  497. int ret;
  498. if (!tdev)
  499. return -ENODEV;
  500. ret = pci_set_vpd_size(tdev, len);
  501. pci_dev_put(tdev);
  502. return ret;
  503. }
  504. static const struct pci_vpd_ops pci_vpd_f0_ops = {
  505. .read = pci_vpd_f0_read,
  506. .write = pci_vpd_f0_write,
  507. .set_size = pci_vpd_f0_set_size,
  508. };
  509. int pci_vpd_init(struct pci_dev *dev)
  510. {
  511. struct pci_vpd *vpd;
  512. u8 cap;
  513. cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
  514. if (!cap)
  515. return -ENODEV;
  516. vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
  517. if (!vpd)
  518. return -ENOMEM;
  519. vpd->len = PCI_VPD_MAX_SIZE;
  520. if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
  521. vpd->ops = &pci_vpd_f0_ops;
  522. else
  523. vpd->ops = &pci_vpd_ops;
  524. mutex_init(&vpd->lock);
  525. vpd->cap = cap;
  526. vpd->busy = 0;
  527. vpd->valid = 0;
  528. dev->vpd = vpd;
  529. return 0;
  530. }
  531. void pci_vpd_release(struct pci_dev *dev)
  532. {
  533. kfree(dev->vpd);
  534. }
  535. /**
  536. * pci_cfg_access_lock - Lock PCI config reads/writes
  537. * @dev: pci device struct
  538. *
  539. * When access is locked, any userspace reads or writes to config
  540. * space and concurrent lock requests will sleep until access is
  541. * allowed via pci_cfg_access_unlock() again.
  542. */
  543. void pci_cfg_access_lock(struct pci_dev *dev)
  544. {
  545. might_sleep();
  546. raw_spin_lock_irq(&pci_lock);
  547. if (dev->block_cfg_access)
  548. pci_wait_cfg(dev);
  549. dev->block_cfg_access = 1;
  550. raw_spin_unlock_irq(&pci_lock);
  551. }
  552. EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
  553. /**
  554. * pci_cfg_access_trylock - try to lock PCI config reads/writes
  555. * @dev: pci device struct
  556. *
  557. * Same as pci_cfg_access_lock, but will return 0 if access is
  558. * already locked, 1 otherwise. This function can be used from
  559. * atomic contexts.
  560. */
  561. bool pci_cfg_access_trylock(struct pci_dev *dev)
  562. {
  563. unsigned long flags;
  564. bool locked = true;
  565. raw_spin_lock_irqsave(&pci_lock, flags);
  566. if (dev->block_cfg_access)
  567. locked = false;
  568. else
  569. dev->block_cfg_access = 1;
  570. raw_spin_unlock_irqrestore(&pci_lock, flags);
  571. return locked;
  572. }
  573. EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
  574. /**
  575. * pci_cfg_access_unlock - Unlock PCI config reads/writes
  576. * @dev: pci device struct
  577. *
  578. * This function allows PCI config accesses to resume.
  579. */
  580. void pci_cfg_access_unlock(struct pci_dev *dev)
  581. {
  582. unsigned long flags;
  583. raw_spin_lock_irqsave(&pci_lock, flags);
  584. /* This indicates a problem in the caller, but we don't need
  585. * to kill them, unlike a double-block above. */
  586. WARN_ON(!dev->block_cfg_access);
  587. dev->block_cfg_access = 0;
  588. raw_spin_unlock_irqrestore(&pci_lock, flags);
  589. wake_up_all(&pci_cfg_wait);
  590. }
  591. EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
  592. static inline int pcie_cap_version(const struct pci_dev *dev)
  593. {
  594. return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
  595. }
  596. static bool pcie_downstream_port(const struct pci_dev *dev)
  597. {
  598. int type = pci_pcie_type(dev);
  599. return type == PCI_EXP_TYPE_ROOT_PORT ||
  600. type == PCI_EXP_TYPE_DOWNSTREAM ||
  601. type == PCI_EXP_TYPE_PCIE_BRIDGE;
  602. }
  603. bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
  604. {
  605. int type = pci_pcie_type(dev);
  606. return type == PCI_EXP_TYPE_ENDPOINT ||
  607. type == PCI_EXP_TYPE_LEG_END ||
  608. type == PCI_EXP_TYPE_ROOT_PORT ||
  609. type == PCI_EXP_TYPE_UPSTREAM ||
  610. type == PCI_EXP_TYPE_DOWNSTREAM ||
  611. type == PCI_EXP_TYPE_PCI_BRIDGE ||
  612. type == PCI_EXP_TYPE_PCIE_BRIDGE;
  613. }
  614. static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
  615. {
  616. return pcie_downstream_port(dev) &&
  617. pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
  618. }
  619. static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
  620. {
  621. int type = pci_pcie_type(dev);
  622. return type == PCI_EXP_TYPE_ROOT_PORT ||
  623. type == PCI_EXP_TYPE_RC_EC;
  624. }
  625. static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
  626. {
  627. if (!pci_is_pcie(dev))
  628. return false;
  629. switch (pos) {
  630. case PCI_EXP_FLAGS:
  631. return true;
  632. case PCI_EXP_DEVCAP:
  633. case PCI_EXP_DEVCTL:
  634. case PCI_EXP_DEVSTA:
  635. return true;
  636. case PCI_EXP_LNKCAP:
  637. case PCI_EXP_LNKCTL:
  638. case PCI_EXP_LNKSTA:
  639. return pcie_cap_has_lnkctl(dev);
  640. case PCI_EXP_SLTCAP:
  641. case PCI_EXP_SLTCTL:
  642. case PCI_EXP_SLTSTA:
  643. return pcie_cap_has_sltctl(dev);
  644. case PCI_EXP_RTCTL:
  645. case PCI_EXP_RTCAP:
  646. case PCI_EXP_RTSTA:
  647. return pcie_cap_has_rtctl(dev);
  648. case PCI_EXP_DEVCAP2:
  649. case PCI_EXP_DEVCTL2:
  650. case PCI_EXP_LNKCAP2:
  651. case PCI_EXP_LNKCTL2:
  652. case PCI_EXP_LNKSTA2:
  653. return pcie_cap_version(dev) > 1;
  654. default:
  655. return false;
  656. }
  657. }
  658. /*
  659. * Note that these accessor functions are only for the "PCI Express
  660. * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
  661. * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
  662. */
  663. int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
  664. {
  665. int ret;
  666. *val = 0;
  667. if (pos & 1)
  668. return -EINVAL;
  669. if (pcie_capability_reg_implemented(dev, pos)) {
  670. ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
  671. /*
  672. * Reset *val to 0 if pci_read_config_word() fails, it may
  673. * have been written as 0xFFFF if hardware error happens
  674. * during pci_read_config_word().
  675. */
  676. if (ret)
  677. *val = 0;
  678. return ret;
  679. }
  680. /*
  681. * For Functions that do not implement the Slot Capabilities,
  682. * Slot Status, and Slot Control registers, these spaces must
  683. * be hardwired to 0b, with the exception of the Presence Detect
  684. * State bit in the Slot Status register of Downstream Ports,
  685. * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
  686. */
  687. if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
  688. pos == PCI_EXP_SLTSTA)
  689. *val = PCI_EXP_SLTSTA_PDS;
  690. return 0;
  691. }
  692. EXPORT_SYMBOL(pcie_capability_read_word);
  693. int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
  694. {
  695. int ret;
  696. *val = 0;
  697. if (pos & 3)
  698. return -EINVAL;
  699. if (pcie_capability_reg_implemented(dev, pos)) {
  700. ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
  701. /*
  702. * Reset *val to 0 if pci_read_config_dword() fails, it may
  703. * have been written as 0xFFFFFFFF if hardware error happens
  704. * during pci_read_config_dword().
  705. */
  706. if (ret)
  707. *val = 0;
  708. return ret;
  709. }
  710. if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
  711. pos == PCI_EXP_SLTSTA)
  712. *val = PCI_EXP_SLTSTA_PDS;
  713. return 0;
  714. }
  715. EXPORT_SYMBOL(pcie_capability_read_dword);
  716. int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
  717. {
  718. if (pos & 1)
  719. return -EINVAL;
  720. if (!pcie_capability_reg_implemented(dev, pos))
  721. return 0;
  722. return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
  723. }
  724. EXPORT_SYMBOL(pcie_capability_write_word);
  725. int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
  726. {
  727. if (pos & 3)
  728. return -EINVAL;
  729. if (!pcie_capability_reg_implemented(dev, pos))
  730. return 0;
  731. return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
  732. }
  733. EXPORT_SYMBOL(pcie_capability_write_dword);
  734. int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
  735. u16 clear, u16 set)
  736. {
  737. int ret;
  738. u16 val;
  739. ret = pcie_capability_read_word(dev, pos, &val);
  740. if (!ret) {
  741. val &= ~clear;
  742. val |= set;
  743. ret = pcie_capability_write_word(dev, pos, val);
  744. }
  745. return ret;
  746. }
  747. EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
  748. int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
  749. u32 clear, u32 set)
  750. {
  751. int ret;
  752. u32 val;
  753. ret = pcie_capability_read_dword(dev, pos, &val);
  754. if (!ret) {
  755. val &= ~clear;
  756. val |= set;
  757. ret = pcie_capability_write_dword(dev, pos, val);
  758. }
  759. return ret;
  760. }
  761. EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
  762. int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
  763. {
  764. if (pci_dev_is_disconnected(dev)) {
  765. *val = ~0;
  766. return PCIBIOS_DEVICE_NOT_FOUND;
  767. }
  768. return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
  769. }
  770. EXPORT_SYMBOL(pci_read_config_byte);
  771. int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
  772. {
  773. if (pci_dev_is_disconnected(dev)) {
  774. *val = ~0;
  775. return PCIBIOS_DEVICE_NOT_FOUND;
  776. }
  777. return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
  778. }
  779. EXPORT_SYMBOL(pci_read_config_word);
  780. int pci_read_config_dword(const struct pci_dev *dev, int where,
  781. u32 *val)
  782. {
  783. if (pci_dev_is_disconnected(dev)) {
  784. *val = ~0;
  785. return PCIBIOS_DEVICE_NOT_FOUND;
  786. }
  787. return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
  788. }
  789. EXPORT_SYMBOL(pci_read_config_dword);
  790. int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
  791. {
  792. if (pci_dev_is_disconnected(dev))
  793. return PCIBIOS_DEVICE_NOT_FOUND;
  794. return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
  795. }
  796. EXPORT_SYMBOL(pci_write_config_byte);
  797. int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
  798. {
  799. if (pci_dev_is_disconnected(dev))
  800. return PCIBIOS_DEVICE_NOT_FOUND;
  801. return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
  802. }
  803. EXPORT_SYMBOL(pci_write_config_word);
  804. int pci_write_config_dword(const struct pci_dev *dev, int where,
  805. u32 val)
  806. {
  807. if (pci_dev_is_disconnected(dev))
  808. return PCIBIOS_DEVICE_NOT_FOUND;
  809. return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
  810. }
  811. EXPORT_SYMBOL(pci_write_config_dword);