vfio_pci_config.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807
  1. /*
  2. * VFIO PCI config space virtualization
  3. *
  4. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  5. * Author: Alex Williamson <alex.williamson@redhat.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Derived from original vfio:
  12. * Copyright 2010 Cisco Systems, Inc. All rights reserved.
  13. * Author: Tom Lyon, pugs@cisco.com
  14. */
  15. /*
  16. * This code handles reading and writing of PCI configuration registers.
  17. * This is hairy because we want to allow a lot of flexibility to the
  18. * user driver, but cannot trust it with all of the config fields.
  19. * Tables determine which fields can be read and written, as well as
  20. * which fields are 'virtualized' - special actions and translations to
  21. * make it appear to the user that he has control, when in fact things
  22. * must be negotiated with the underlying OS.
  23. */
  24. #include <linux/fs.h>
  25. #include <linux/pci.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/vfio.h>
  28. #include <linux/slab.h>
  29. #include "vfio_pci_private.h"
  30. /* Fake capability ID for standard config space */
  31. #define PCI_CAP_ID_BASIC 0
  32. #define is_bar(offset) \
  33. ((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \
  34. (offset >= PCI_ROM_ADDRESS && offset < PCI_ROM_ADDRESS + 4))
  35. /*
  36. * Lengths of PCI Config Capabilities
  37. * 0: Removed from the user visible capability list
  38. * FF: Variable length
  39. */
  40. static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = {
  41. [PCI_CAP_ID_BASIC] = PCI_STD_HEADER_SIZEOF, /* pci config header */
  42. [PCI_CAP_ID_PM] = PCI_PM_SIZEOF,
  43. [PCI_CAP_ID_AGP] = PCI_AGP_SIZEOF,
  44. [PCI_CAP_ID_VPD] = PCI_CAP_VPD_SIZEOF,
  45. [PCI_CAP_ID_SLOTID] = 0, /* bridge - don't care */
  46. [PCI_CAP_ID_MSI] = 0xFF, /* 10, 14, 20, or 24 */
  47. [PCI_CAP_ID_CHSWP] = 0, /* cpci - not yet */
  48. [PCI_CAP_ID_PCIX] = 0xFF, /* 8 or 24 */
  49. [PCI_CAP_ID_HT] = 0xFF, /* hypertransport */
  50. [PCI_CAP_ID_VNDR] = 0xFF, /* variable */
  51. [PCI_CAP_ID_DBG] = 0, /* debug - don't care */
  52. [PCI_CAP_ID_CCRC] = 0, /* cpci - not yet */
  53. [PCI_CAP_ID_SHPC] = 0, /* hotswap - not yet */
  54. [PCI_CAP_ID_SSVID] = 0, /* bridge - don't care */
  55. [PCI_CAP_ID_AGP3] = 0, /* AGP8x - not yet */
  56. [PCI_CAP_ID_SECDEV] = 0, /* secure device not yet */
  57. [PCI_CAP_ID_EXP] = 0xFF, /* 20 or 44 */
  58. [PCI_CAP_ID_MSIX] = PCI_CAP_MSIX_SIZEOF,
  59. [PCI_CAP_ID_SATA] = 0xFF,
  60. [PCI_CAP_ID_AF] = PCI_CAP_AF_SIZEOF,
  61. };
  62. /*
  63. * Lengths of PCIe/PCI-X Extended Config Capabilities
  64. * 0: Removed or masked from the user visible capability list
  65. * FF: Variable length
  66. */
  67. static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = {
  68. [PCI_EXT_CAP_ID_ERR] = PCI_ERR_ROOT_COMMAND,
  69. [PCI_EXT_CAP_ID_VC] = 0xFF,
  70. [PCI_EXT_CAP_ID_DSN] = PCI_EXT_CAP_DSN_SIZEOF,
  71. [PCI_EXT_CAP_ID_PWR] = PCI_EXT_CAP_PWR_SIZEOF,
  72. [PCI_EXT_CAP_ID_RCLD] = 0, /* root only - don't care */
  73. [PCI_EXT_CAP_ID_RCILC] = 0, /* root only - don't care */
  74. [PCI_EXT_CAP_ID_RCEC] = 0, /* root only - don't care */
  75. [PCI_EXT_CAP_ID_MFVC] = 0xFF,
  76. [PCI_EXT_CAP_ID_VC9] = 0xFF, /* same as CAP_ID_VC */
  77. [PCI_EXT_CAP_ID_RCRB] = 0, /* root only - don't care */
  78. [PCI_EXT_CAP_ID_VNDR] = 0xFF,
  79. [PCI_EXT_CAP_ID_CAC] = 0, /* obsolete */
  80. [PCI_EXT_CAP_ID_ACS] = 0xFF,
  81. [PCI_EXT_CAP_ID_ARI] = PCI_EXT_CAP_ARI_SIZEOF,
  82. [PCI_EXT_CAP_ID_ATS] = PCI_EXT_CAP_ATS_SIZEOF,
  83. [PCI_EXT_CAP_ID_SRIOV] = PCI_EXT_CAP_SRIOV_SIZEOF,
  84. [PCI_EXT_CAP_ID_MRIOV] = 0, /* not yet */
  85. [PCI_EXT_CAP_ID_MCAST] = PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF,
  86. [PCI_EXT_CAP_ID_PRI] = PCI_EXT_CAP_PRI_SIZEOF,
  87. [PCI_EXT_CAP_ID_AMD_XXX] = 0, /* not yet */
  88. [PCI_EXT_CAP_ID_REBAR] = 0xFF,
  89. [PCI_EXT_CAP_ID_DPA] = 0xFF,
  90. [PCI_EXT_CAP_ID_TPH] = 0xFF,
  91. [PCI_EXT_CAP_ID_LTR] = PCI_EXT_CAP_LTR_SIZEOF,
  92. [PCI_EXT_CAP_ID_SECPCI] = 0, /* not yet */
  93. [PCI_EXT_CAP_ID_PMUX] = 0, /* not yet */
  94. [PCI_EXT_CAP_ID_PASID] = 0, /* not yet */
  95. };
  96. /*
  97. * Read/Write Permission Bits - one bit for each bit in capability
  98. * Any field can be read if it exists, but what is read depends on
  99. * whether the field is 'virtualized', or just pass thru to the
  100. * hardware. Any virtualized field is also virtualized for writes.
  101. * Writes are only permitted if they have a 1 bit here.
  102. */
  103. struct perm_bits {
  104. u8 *virt; /* read/write virtual data, not hw */
  105. u8 *write; /* writeable bits */
  106. int (*readfn)(struct vfio_pci_device *vdev, int pos, int count,
  107. struct perm_bits *perm, int offset, __le32 *val);
  108. int (*writefn)(struct vfio_pci_device *vdev, int pos, int count,
  109. struct perm_bits *perm, int offset, __le32 val);
  110. };
  111. #define NO_VIRT 0
  112. #define ALL_VIRT 0xFFFFFFFFU
  113. #define NO_WRITE 0
  114. #define ALL_WRITE 0xFFFFFFFFU
  115. static int vfio_user_config_read(struct pci_dev *pdev, int offset,
  116. __le32 *val, int count)
  117. {
  118. int ret = -EINVAL;
  119. u32 tmp_val = 0;
  120. switch (count) {
  121. case 1:
  122. {
  123. u8 tmp;
  124. ret = pci_user_read_config_byte(pdev, offset, &tmp);
  125. tmp_val = tmp;
  126. break;
  127. }
  128. case 2:
  129. {
  130. u16 tmp;
  131. ret = pci_user_read_config_word(pdev, offset, &tmp);
  132. tmp_val = tmp;
  133. break;
  134. }
  135. case 4:
  136. ret = pci_user_read_config_dword(pdev, offset, &tmp_val);
  137. break;
  138. }
  139. *val = cpu_to_le32(tmp_val);
  140. return ret;
  141. }
  142. static int vfio_user_config_write(struct pci_dev *pdev, int offset,
  143. __le32 val, int count)
  144. {
  145. int ret = -EINVAL;
  146. u32 tmp_val = le32_to_cpu(val);
  147. switch (count) {
  148. case 1:
  149. ret = pci_user_write_config_byte(pdev, offset, tmp_val);
  150. break;
  151. case 2:
  152. ret = pci_user_write_config_word(pdev, offset, tmp_val);
  153. break;
  154. case 4:
  155. ret = pci_user_write_config_dword(pdev, offset, tmp_val);
  156. break;
  157. }
  158. return ret;
  159. }
  160. static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos,
  161. int count, struct perm_bits *perm,
  162. int offset, __le32 *val)
  163. {
  164. __le32 virt = 0;
  165. memcpy(val, vdev->vconfig + pos, count);
  166. memcpy(&virt, perm->virt + offset, count);
  167. /* Any non-virtualized bits? */
  168. if (cpu_to_le32(~0U >> (32 - (count * 8))) != virt) {
  169. struct pci_dev *pdev = vdev->pdev;
  170. __le32 phys_val = 0;
  171. int ret;
  172. ret = vfio_user_config_read(pdev, pos, &phys_val, count);
  173. if (ret)
  174. return ret;
  175. *val = (phys_val & ~virt) | (*val & virt);
  176. }
  177. return count;
  178. }
  179. static int vfio_default_config_write(struct vfio_pci_device *vdev, int pos,
  180. int count, struct perm_bits *perm,
  181. int offset, __le32 val)
  182. {
  183. __le32 virt = 0, write = 0;
  184. memcpy(&write, perm->write + offset, count);
  185. if (!write)
  186. return count; /* drop, no writable bits */
  187. memcpy(&virt, perm->virt + offset, count);
  188. /* Virtualized and writable bits go to vconfig */
  189. if (write & virt) {
  190. __le32 virt_val = 0;
  191. memcpy(&virt_val, vdev->vconfig + pos, count);
  192. virt_val &= ~(write & virt);
  193. virt_val |= (val & (write & virt));
  194. memcpy(vdev->vconfig + pos, &virt_val, count);
  195. }
  196. /* Non-virtualzed and writable bits go to hardware */
  197. if (write & ~virt) {
  198. struct pci_dev *pdev = vdev->pdev;
  199. __le32 phys_val = 0;
  200. int ret;
  201. ret = vfio_user_config_read(pdev, pos, &phys_val, count);
  202. if (ret)
  203. return ret;
  204. phys_val &= ~(write & ~virt);
  205. phys_val |= (val & (write & ~virt));
  206. ret = vfio_user_config_write(pdev, pos, phys_val, count);
  207. if (ret)
  208. return ret;
  209. }
  210. return count;
  211. }
  212. /* Allow direct read from hardware, except for capability next pointer */
  213. static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos,
  214. int count, struct perm_bits *perm,
  215. int offset, __le32 *val)
  216. {
  217. int ret;
  218. ret = vfio_user_config_read(vdev->pdev, pos, val, count);
  219. if (ret)
  220. return ret;
  221. if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */
  222. if (offset < 4)
  223. memcpy(val, vdev->vconfig + pos, count);
  224. } else if (pos >= PCI_STD_HEADER_SIZEOF) { /* Std cap mangling */
  225. if (offset == PCI_CAP_LIST_ID && count > 1)
  226. memcpy(val, vdev->vconfig + pos,
  227. min(PCI_CAP_FLAGS, count));
  228. else if (offset == PCI_CAP_LIST_NEXT)
  229. memcpy(val, vdev->vconfig + pos, 1);
  230. }
  231. return count;
  232. }
  233. /* Raw access skips any kind of virtualization */
  234. static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos,
  235. int count, struct perm_bits *perm,
  236. int offset, __le32 val)
  237. {
  238. int ret;
  239. ret = vfio_user_config_write(vdev->pdev, pos, val, count);
  240. if (ret)
  241. return ret;
  242. return count;
  243. }
  244. static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos,
  245. int count, struct perm_bits *perm,
  246. int offset, __le32 *val)
  247. {
  248. int ret;
  249. ret = vfio_user_config_read(vdev->pdev, pos, val, count);
  250. if (ret)
  251. return ret;
  252. return count;
  253. }
  254. /* Virt access uses only virtualization */
  255. static int vfio_virt_config_write(struct vfio_pci_device *vdev, int pos,
  256. int count, struct perm_bits *perm,
  257. int offset, __le32 val)
  258. {
  259. memcpy(vdev->vconfig + pos, &val, count);
  260. return count;
  261. }
  262. static int vfio_virt_config_read(struct vfio_pci_device *vdev, int pos,
  263. int count, struct perm_bits *perm,
  264. int offset, __le32 *val)
  265. {
  266. memcpy(val, vdev->vconfig + pos, count);
  267. return count;
  268. }
  269. /* Default capability regions to read-only, no-virtualization */
  270. static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = {
  271. [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
  272. };
  273. static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = {
  274. [0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
  275. };
  276. /*
  277. * Default unassigned regions to raw read-write access. Some devices
  278. * require this to function as they hide registers between the gaps in
  279. * config space (be2net). Like MMIO and I/O port registers, we have
  280. * to trust the hardware isolation.
  281. */
  282. static struct perm_bits unassigned_perms = {
  283. .readfn = vfio_raw_config_read,
  284. .writefn = vfio_raw_config_write
  285. };
  286. static struct perm_bits virt_perms = {
  287. .readfn = vfio_virt_config_read,
  288. .writefn = vfio_virt_config_write
  289. };
  290. static void free_perm_bits(struct perm_bits *perm)
  291. {
  292. kfree(perm->virt);
  293. kfree(perm->write);
  294. perm->virt = NULL;
  295. perm->write = NULL;
  296. }
  297. static int alloc_perm_bits(struct perm_bits *perm, int size)
  298. {
  299. /*
  300. * Round up all permission bits to the next dword, this lets us
  301. * ignore whether a read/write exceeds the defined capability
  302. * structure. We can do this because:
  303. * - Standard config space is already dword aligned
  304. * - Capabilities are all dword aligned (bits 0:1 of next reserved)
  305. * - Express capabilities defined as dword aligned
  306. */
  307. size = round_up(size, 4);
  308. /*
  309. * Zero state is
  310. * - All Readable, None Writeable, None Virtualized
  311. */
  312. perm->virt = kzalloc(size, GFP_KERNEL);
  313. perm->write = kzalloc(size, GFP_KERNEL);
  314. if (!perm->virt || !perm->write) {
  315. free_perm_bits(perm);
  316. return -ENOMEM;
  317. }
  318. perm->readfn = vfio_default_config_read;
  319. perm->writefn = vfio_default_config_write;
  320. return 0;
  321. }
  322. /*
  323. * Helper functions for filling in permission tables
  324. */
  325. static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write)
  326. {
  327. p->virt[off] = virt;
  328. p->write[off] = write;
  329. }
  330. /* Handle endian-ness - pci and tables are little-endian */
  331. static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write)
  332. {
  333. *(__le16 *)(&p->virt[off]) = cpu_to_le16(virt);
  334. *(__le16 *)(&p->write[off]) = cpu_to_le16(write);
  335. }
  336. /* Handle endian-ness - pci and tables are little-endian */
  337. static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
  338. {
  339. *(__le32 *)(&p->virt[off]) = cpu_to_le32(virt);
  340. *(__le32 *)(&p->write[off]) = cpu_to_le32(write);
  341. }
  342. /*
  343. * Restore the *real* BARs after we detect a FLR or backdoor reset.
  344. * (backdoor = some device specific technique that we didn't catch)
  345. */
  346. static void vfio_bar_restore(struct vfio_pci_device *vdev)
  347. {
  348. struct pci_dev *pdev = vdev->pdev;
  349. u32 *rbar = vdev->rbar;
  350. u16 cmd;
  351. int i;
  352. if (pdev->is_virtfn)
  353. return;
  354. pr_info("%s: %s reset recovery - restoring bars\n",
  355. __func__, dev_name(&pdev->dev));
  356. for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++)
  357. pci_user_write_config_dword(pdev, i, *rbar);
  358. pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar);
  359. if (vdev->nointx) {
  360. pci_user_read_config_word(pdev, PCI_COMMAND, &cmd);
  361. cmd |= PCI_COMMAND_INTX_DISABLE;
  362. pci_user_write_config_word(pdev, PCI_COMMAND, cmd);
  363. }
  364. }
  365. static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar)
  366. {
  367. unsigned long flags = pci_resource_flags(pdev, bar);
  368. u32 val;
  369. if (flags & IORESOURCE_IO)
  370. return cpu_to_le32(PCI_BASE_ADDRESS_SPACE_IO);
  371. val = PCI_BASE_ADDRESS_SPACE_MEMORY;
  372. if (flags & IORESOURCE_PREFETCH)
  373. val |= PCI_BASE_ADDRESS_MEM_PREFETCH;
  374. if (flags & IORESOURCE_MEM_64)
  375. val |= PCI_BASE_ADDRESS_MEM_TYPE_64;
  376. return cpu_to_le32(val);
  377. }
  378. /*
  379. * Pretend we're hardware and tweak the values of the *virtual* PCI BARs
  380. * to reflect the hardware capabilities. This implements BAR sizing.
  381. */
  382. static void vfio_bar_fixup(struct vfio_pci_device *vdev)
  383. {
  384. struct pci_dev *pdev = vdev->pdev;
  385. int i;
  386. __le32 *bar;
  387. u64 mask;
  388. bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
  389. for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) {
  390. if (!pci_resource_start(pdev, i)) {
  391. *bar = 0; /* Unmapped by host = unimplemented to user */
  392. continue;
  393. }
  394. mask = ~(pci_resource_len(pdev, i) - 1);
  395. *bar &= cpu_to_le32((u32)mask);
  396. *bar |= vfio_generate_bar_flags(pdev, i);
  397. if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
  398. bar++;
  399. *bar &= cpu_to_le32((u32)(mask >> 32));
  400. i++;
  401. }
  402. }
  403. bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
  404. /*
  405. * NB. REGION_INFO will have reported zero size if we weren't able
  406. * to read the ROM, but we still return the actual BAR size here if
  407. * it exists (or the shadow ROM space).
  408. */
  409. if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
  410. mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
  411. mask |= PCI_ROM_ADDRESS_ENABLE;
  412. *bar &= cpu_to_le32((u32)mask);
  413. } else if (pdev->resource[PCI_ROM_RESOURCE].flags &
  414. IORESOURCE_ROM_SHADOW) {
  415. mask = ~(0x20000 - 1);
  416. mask |= PCI_ROM_ADDRESS_ENABLE;
  417. *bar &= cpu_to_le32((u32)mask);
  418. } else
  419. *bar = 0;
  420. vdev->bardirty = false;
  421. }
  422. static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
  423. int count, struct perm_bits *perm,
  424. int offset, __le32 *val)
  425. {
  426. if (is_bar(offset)) /* pos == offset for basic config */
  427. vfio_bar_fixup(vdev);
  428. count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
  429. /* Mask in virtual memory enable for SR-IOV devices */
  430. if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
  431. u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
  432. u32 tmp_val = le32_to_cpu(*val);
  433. tmp_val |= cmd & PCI_COMMAND_MEMORY;
  434. *val = cpu_to_le32(tmp_val);
  435. }
  436. return count;
  437. }
  438. /* Test whether BARs match the value we think they should contain */
  439. static bool vfio_need_bar_restore(struct vfio_pci_device *vdev)
  440. {
  441. int i = 0, pos = PCI_BASE_ADDRESS_0, ret;
  442. u32 bar;
  443. for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) {
  444. if (vdev->rbar[i]) {
  445. ret = pci_user_read_config_dword(vdev->pdev, pos, &bar);
  446. if (ret || vdev->rbar[i] != bar)
  447. return true;
  448. }
  449. }
  450. return false;
  451. }
  452. static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
  453. int count, struct perm_bits *perm,
  454. int offset, __le32 val)
  455. {
  456. struct pci_dev *pdev = vdev->pdev;
  457. __le16 *virt_cmd;
  458. u16 new_cmd = 0;
  459. int ret;
  460. virt_cmd = (__le16 *)&vdev->vconfig[PCI_COMMAND];
  461. if (offset == PCI_COMMAND) {
  462. bool phys_mem, virt_mem, new_mem, phys_io, virt_io, new_io;
  463. u16 phys_cmd;
  464. ret = pci_user_read_config_word(pdev, PCI_COMMAND, &phys_cmd);
  465. if (ret)
  466. return ret;
  467. new_cmd = le32_to_cpu(val);
  468. phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
  469. virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
  470. new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
  471. phys_io = !!(phys_cmd & PCI_COMMAND_IO);
  472. virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
  473. new_io = !!(new_cmd & PCI_COMMAND_IO);
  474. /*
  475. * If the user is writing mem/io enable (new_mem/io) and we
  476. * think it's already enabled (virt_mem/io), but the hardware
  477. * shows it disabled (phys_mem/io, then the device has
  478. * undergone some kind of backdoor reset and needs to be
  479. * restored before we allow it to enable the bars.
  480. * SR-IOV devices will trigger this, but we catch them later
  481. */
  482. if ((new_mem && virt_mem && !phys_mem) ||
  483. (new_io && virt_io && !phys_io) ||
  484. vfio_need_bar_restore(vdev))
  485. vfio_bar_restore(vdev);
  486. }
  487. count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
  488. if (count < 0)
  489. return count;
  490. /*
  491. * Save current memory/io enable bits in vconfig to allow for
  492. * the test above next time.
  493. */
  494. if (offset == PCI_COMMAND) {
  495. u16 mask = PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
  496. *virt_cmd &= cpu_to_le16(~mask);
  497. *virt_cmd |= cpu_to_le16(new_cmd & mask);
  498. }
  499. /* Emulate INTx disable */
  500. if (offset >= PCI_COMMAND && offset <= PCI_COMMAND + 1) {
  501. bool virt_intx_disable;
  502. virt_intx_disable = !!(le16_to_cpu(*virt_cmd) &
  503. PCI_COMMAND_INTX_DISABLE);
  504. if (virt_intx_disable && !vdev->virq_disabled) {
  505. vdev->virq_disabled = true;
  506. vfio_pci_intx_mask(vdev);
  507. } else if (!virt_intx_disable && vdev->virq_disabled) {
  508. vdev->virq_disabled = false;
  509. vfio_pci_intx_unmask(vdev);
  510. }
  511. }
  512. if (is_bar(offset))
  513. vdev->bardirty = true;
  514. return count;
  515. }
  516. /* Permissions for the Basic PCI Header */
  517. static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
  518. {
  519. if (alloc_perm_bits(perm, PCI_STD_HEADER_SIZEOF))
  520. return -ENOMEM;
  521. perm->readfn = vfio_basic_config_read;
  522. perm->writefn = vfio_basic_config_write;
  523. /* Virtualized for SR-IOV functions, which just have FFFF */
  524. p_setw(perm, PCI_VENDOR_ID, (u16)ALL_VIRT, NO_WRITE);
  525. p_setw(perm, PCI_DEVICE_ID, (u16)ALL_VIRT, NO_WRITE);
  526. /*
  527. * Virtualize INTx disable, we use it internally for interrupt
  528. * control and can emulate it for non-PCI 2.3 devices.
  529. */
  530. p_setw(perm, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE, (u16)ALL_WRITE);
  531. /* Virtualize capability list, we might want to skip/disable */
  532. p_setw(perm, PCI_STATUS, PCI_STATUS_CAP_LIST, NO_WRITE);
  533. /* No harm to write */
  534. p_setb(perm, PCI_CACHE_LINE_SIZE, NO_VIRT, (u8)ALL_WRITE);
  535. p_setb(perm, PCI_LATENCY_TIMER, NO_VIRT, (u8)ALL_WRITE);
  536. p_setb(perm, PCI_BIST, NO_VIRT, (u8)ALL_WRITE);
  537. /* Virtualize all bars, can't touch the real ones */
  538. p_setd(perm, PCI_BASE_ADDRESS_0, ALL_VIRT, ALL_WRITE);
  539. p_setd(perm, PCI_BASE_ADDRESS_1, ALL_VIRT, ALL_WRITE);
  540. p_setd(perm, PCI_BASE_ADDRESS_2, ALL_VIRT, ALL_WRITE);
  541. p_setd(perm, PCI_BASE_ADDRESS_3, ALL_VIRT, ALL_WRITE);
  542. p_setd(perm, PCI_BASE_ADDRESS_4, ALL_VIRT, ALL_WRITE);
  543. p_setd(perm, PCI_BASE_ADDRESS_5, ALL_VIRT, ALL_WRITE);
  544. p_setd(perm, PCI_ROM_ADDRESS, ALL_VIRT, ALL_WRITE);
  545. /* Allow us to adjust capability chain */
  546. p_setb(perm, PCI_CAPABILITY_LIST, (u8)ALL_VIRT, NO_WRITE);
  547. /* Sometimes used by sw, just virtualize */
  548. p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE);
  549. /* Virtualize interrupt pin to allow hiding INTx */
  550. p_setb(perm, PCI_INTERRUPT_PIN, (u8)ALL_VIRT, (u8)NO_WRITE);
  551. return 0;
  552. }
  553. static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos,
  554. int count, struct perm_bits *perm,
  555. int offset, __le32 val)
  556. {
  557. count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
  558. if (count < 0)
  559. return count;
  560. if (offset == PCI_PM_CTRL) {
  561. pci_power_t state;
  562. switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) {
  563. case 0:
  564. state = PCI_D0;
  565. break;
  566. case 1:
  567. state = PCI_D1;
  568. break;
  569. case 2:
  570. state = PCI_D2;
  571. break;
  572. case 3:
  573. state = PCI_D3hot;
  574. break;
  575. }
  576. pci_set_power_state(vdev->pdev, state);
  577. }
  578. return count;
  579. }
  580. /* Permissions for the Power Management capability */
  581. static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
  582. {
  583. if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM]))
  584. return -ENOMEM;
  585. perm->writefn = vfio_pm_config_write;
  586. /*
  587. * We always virtualize the next field so we can remove
  588. * capabilities from the chain if we want to.
  589. */
  590. p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
  591. /*
  592. * Power management is defined *per function*, so we can let
  593. * the user change power state, but we trap and initiate the
  594. * change ourselves, so the state bits are read-only.
  595. */
  596. p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK);
  597. return 0;
  598. }
  599. static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos,
  600. int count, struct perm_bits *perm,
  601. int offset, __le32 val)
  602. {
  603. struct pci_dev *pdev = vdev->pdev;
  604. __le16 *paddr = (__le16 *)(vdev->vconfig + pos - offset + PCI_VPD_ADDR);
  605. __le32 *pdata = (__le32 *)(vdev->vconfig + pos - offset + PCI_VPD_DATA);
  606. u16 addr;
  607. u32 data;
  608. /*
  609. * Write through to emulation. If the write includes the upper byte
  610. * of PCI_VPD_ADDR, then the PCI_VPD_ADDR_F bit is written and we
  611. * have work to do.
  612. */
  613. count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
  614. if (count < 0 || offset > PCI_VPD_ADDR + 1 ||
  615. offset + count <= PCI_VPD_ADDR + 1)
  616. return count;
  617. addr = le16_to_cpu(*paddr);
  618. if (addr & PCI_VPD_ADDR_F) {
  619. data = le32_to_cpu(*pdata);
  620. if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
  621. return count;
  622. } else {
  623. data = 0;
  624. if (pci_read_vpd(pdev, addr, 4, &data) < 0)
  625. return count;
  626. *pdata = cpu_to_le32(data);
  627. }
  628. /*
  629. * Toggle PCI_VPD_ADDR_F in the emulated PCI_VPD_ADDR register to
  630. * signal completion. If an error occurs above, we assume that not
  631. * toggling this bit will induce a driver timeout.
  632. */
  633. addr ^= PCI_VPD_ADDR_F;
  634. *paddr = cpu_to_le16(addr);
  635. return count;
  636. }
  637. /* Permissions for Vital Product Data capability */
  638. static int __init init_pci_cap_vpd_perm(struct perm_bits *perm)
  639. {
  640. if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_VPD]))
  641. return -ENOMEM;
  642. perm->writefn = vfio_vpd_config_write;
  643. /*
  644. * We always virtualize the next field so we can remove
  645. * capabilities from the chain if we want to.
  646. */
  647. p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
  648. /*
  649. * Both the address and data registers are virtualized to
  650. * enable access through the pci_vpd_read/write functions
  651. */
  652. p_setw(perm, PCI_VPD_ADDR, (u16)ALL_VIRT, (u16)ALL_WRITE);
  653. p_setd(perm, PCI_VPD_DATA, ALL_VIRT, ALL_WRITE);
  654. return 0;
  655. }
  656. /* Permissions for PCI-X capability */
  657. static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
  658. {
  659. /* Alloc 24, but only 8 are used in v0 */
  660. if (alloc_perm_bits(perm, PCI_CAP_PCIX_SIZEOF_V2))
  661. return -ENOMEM;
  662. p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
  663. p_setw(perm, PCI_X_CMD, NO_VIRT, (u16)ALL_WRITE);
  664. p_setd(perm, PCI_X_ECC_CSR, NO_VIRT, ALL_WRITE);
  665. return 0;
  666. }
  667. static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
  668. int count, struct perm_bits *perm,
  669. int offset, __le32 val)
  670. {
  671. __le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
  672. offset + PCI_EXP_DEVCTL);
  673. count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
  674. if (count < 0)
  675. return count;
  676. /*
  677. * The FLR bit is virtualized, if set and the device supports PCIe
  678. * FLR, issue a reset_function. Regardless, clear the bit, the spec
  679. * requires it to be always read as zero. NB, reset_function might
  680. * not use a PCIe FLR, we don't have that level of granularity.
  681. */
  682. if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
  683. u32 cap;
  684. int ret;
  685. *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
  686. ret = pci_user_read_config_dword(vdev->pdev,
  687. pos - offset + PCI_EXP_DEVCAP,
  688. &cap);
  689. if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
  690. pci_try_reset_function(vdev->pdev);
  691. }
  692. return count;
  693. }
  694. /* Permissions for PCI Express capability */
  695. static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
  696. {
  697. /* Alloc largest of possible sizes */
  698. if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
  699. return -ENOMEM;
  700. perm->writefn = vfio_exp_config_write;
  701. p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
  702. /*
  703. * Allow writes to device control fields, except devctl_phantom,
  704. * which could confuse IOMMU, and the ARI bit in devctl2, which
  705. * is set at probe time. FLR gets virtualized via our writefn.
  706. */
  707. p_setw(perm, PCI_EXP_DEVCTL,
  708. PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM);
  709. p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
  710. return 0;
  711. }
  712. static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
  713. int count, struct perm_bits *perm,
  714. int offset, __le32 val)
  715. {
  716. u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
  717. count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
  718. if (count < 0)
  719. return count;
  720. /*
  721. * The FLR bit is virtualized, if set and the device supports AF
  722. * FLR, issue a reset_function. Regardless, clear the bit, the spec
  723. * requires it to be always read as zero. NB, reset_function might
  724. * not use an AF FLR, we don't have that level of granularity.
  725. */
  726. if (*ctrl & PCI_AF_CTRL_FLR) {
  727. u8 cap;
  728. int ret;
  729. *ctrl &= ~PCI_AF_CTRL_FLR;
  730. ret = pci_user_read_config_byte(vdev->pdev,
  731. pos - offset + PCI_AF_CAP,
  732. &cap);
  733. if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
  734. pci_try_reset_function(vdev->pdev);
  735. }
  736. return count;
  737. }
  738. /* Permissions for Advanced Function capability */
  739. static int __init init_pci_cap_af_perm(struct perm_bits *perm)
  740. {
  741. if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
  742. return -ENOMEM;
  743. perm->writefn = vfio_af_config_write;
  744. p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
  745. p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
  746. return 0;
  747. }
  748. /* Permissions for Advanced Error Reporting extended capability */
  749. static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm)
  750. {
  751. u32 mask;
  752. if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_ERR]))
  753. return -ENOMEM;
  754. /*
  755. * Virtualize the first dword of all express capabilities
  756. * because it includes the next pointer. This lets us later
  757. * remove capabilities from the chain if we need to.
  758. */
  759. p_setd(perm, 0, ALL_VIRT, NO_WRITE);
  760. /* Writable bits mask */
  761. mask = PCI_ERR_UNC_UND | /* Undefined */
  762. PCI_ERR_UNC_DLP | /* Data Link Protocol */
  763. PCI_ERR_UNC_SURPDN | /* Surprise Down */
  764. PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */
  765. PCI_ERR_UNC_FCP | /* Flow Control Protocol */
  766. PCI_ERR_UNC_COMP_TIME | /* Completion Timeout */
  767. PCI_ERR_UNC_COMP_ABORT | /* Completer Abort */
  768. PCI_ERR_UNC_UNX_COMP | /* Unexpected Completion */
  769. PCI_ERR_UNC_RX_OVER | /* Receiver Overflow */
  770. PCI_ERR_UNC_MALF_TLP | /* Malformed TLP */
  771. PCI_ERR_UNC_ECRC | /* ECRC Error Status */
  772. PCI_ERR_UNC_UNSUP | /* Unsupported Request */
  773. PCI_ERR_UNC_ACSV | /* ACS Violation */
  774. PCI_ERR_UNC_INTN | /* internal error */
  775. PCI_ERR_UNC_MCBTLP | /* MC blocked TLP */
  776. PCI_ERR_UNC_ATOMEG | /* Atomic egress blocked */
  777. PCI_ERR_UNC_TLPPRE; /* TLP prefix blocked */
  778. p_setd(perm, PCI_ERR_UNCOR_STATUS, NO_VIRT, mask);
  779. p_setd(perm, PCI_ERR_UNCOR_MASK, NO_VIRT, mask);
  780. p_setd(perm, PCI_ERR_UNCOR_SEVER, NO_VIRT, mask);
  781. mask = PCI_ERR_COR_RCVR | /* Receiver Error Status */
  782. PCI_ERR_COR_BAD_TLP | /* Bad TLP Status */
  783. PCI_ERR_COR_BAD_DLLP | /* Bad DLLP Status */
  784. PCI_ERR_COR_REP_ROLL | /* REPLAY_NUM Rollover */
  785. PCI_ERR_COR_REP_TIMER | /* Replay Timer Timeout */
  786. PCI_ERR_COR_ADV_NFAT | /* Advisory Non-Fatal */
  787. PCI_ERR_COR_INTERNAL | /* Corrected Internal */
  788. PCI_ERR_COR_LOG_OVER; /* Header Log Overflow */
  789. p_setd(perm, PCI_ERR_COR_STATUS, NO_VIRT, mask);
  790. p_setd(perm, PCI_ERR_COR_MASK, NO_VIRT, mask);
  791. mask = PCI_ERR_CAP_ECRC_GENE | /* ECRC Generation Enable */
  792. PCI_ERR_CAP_ECRC_CHKE; /* ECRC Check Enable */
  793. p_setd(perm, PCI_ERR_CAP, NO_VIRT, mask);
  794. return 0;
  795. }
  796. /* Permissions for Power Budgeting extended capability */
  797. static int __init init_pci_ext_cap_pwr_perm(struct perm_bits *perm)
  798. {
  799. if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_PWR]))
  800. return -ENOMEM;
  801. p_setd(perm, 0, ALL_VIRT, NO_WRITE);
  802. /* Writing the data selector is OK, the info is still read-only */
  803. p_setb(perm, PCI_PWR_DATA, NO_VIRT, (u8)ALL_WRITE);
  804. return 0;
  805. }
  806. /*
  807. * Initialize the shared permission tables
  808. */
  809. void vfio_pci_uninit_perm_bits(void)
  810. {
  811. free_perm_bits(&cap_perms[PCI_CAP_ID_BASIC]);
  812. free_perm_bits(&cap_perms[PCI_CAP_ID_PM]);
  813. free_perm_bits(&cap_perms[PCI_CAP_ID_VPD]);
  814. free_perm_bits(&cap_perms[PCI_CAP_ID_PCIX]);
  815. free_perm_bits(&cap_perms[PCI_CAP_ID_EXP]);
  816. free_perm_bits(&cap_perms[PCI_CAP_ID_AF]);
  817. free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
  818. free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
  819. }
  820. int __init vfio_pci_init_perm_bits(void)
  821. {
  822. int ret;
  823. /* Basic config space */
  824. ret = init_pci_cap_basic_perm(&cap_perms[PCI_CAP_ID_BASIC]);
  825. /* Capabilities */
  826. ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]);
  827. ret |= init_pci_cap_vpd_perm(&cap_perms[PCI_CAP_ID_VPD]);
  828. ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]);
  829. cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write;
  830. ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]);
  831. ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]);
  832. /* Extended capabilities */
  833. ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
  834. ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
  835. ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write;
  836. if (ret)
  837. vfio_pci_uninit_perm_bits();
  838. return ret;
  839. }
  840. static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
  841. {
  842. u8 cap;
  843. int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE :
  844. PCI_STD_HEADER_SIZEOF;
  845. cap = vdev->pci_config_map[pos];
  846. if (cap == PCI_CAP_ID_BASIC)
  847. return 0;
  848. /* XXX Can we have to abutting capabilities of the same type? */
  849. while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap)
  850. pos--;
  851. return pos;
  852. }
  853. static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos,
  854. int count, struct perm_bits *perm,
  855. int offset, __le32 *val)
  856. {
  857. /* Update max available queue size from msi_qmax */
  858. if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
  859. __le16 *flags;
  860. int start;
  861. start = vfio_find_cap_start(vdev, pos);
  862. flags = (__le16 *)&vdev->vconfig[start];
  863. *flags &= cpu_to_le16(~PCI_MSI_FLAGS_QMASK);
  864. *flags |= cpu_to_le16(vdev->msi_qmax << 1);
  865. }
  866. return vfio_default_config_read(vdev, pos, count, perm, offset, val);
  867. }
  868. static int vfio_msi_config_write(struct vfio_pci_device *vdev, int pos,
  869. int count, struct perm_bits *perm,
  870. int offset, __le32 val)
  871. {
  872. count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
  873. if (count < 0)
  874. return count;
  875. /* Fixup and write configured queue size and enable to hardware */
  876. if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
  877. __le16 *pflags;
  878. u16 flags;
  879. int start, ret;
  880. start = vfio_find_cap_start(vdev, pos);
  881. pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS];
  882. flags = le16_to_cpu(*pflags);
  883. /* MSI is enabled via ioctl */
  884. if (!is_msi(vdev))
  885. flags &= ~PCI_MSI_FLAGS_ENABLE;
  886. /* Check queue size */
  887. if ((flags & PCI_MSI_FLAGS_QSIZE) >> 4 > vdev->msi_qmax) {
  888. flags &= ~PCI_MSI_FLAGS_QSIZE;
  889. flags |= vdev->msi_qmax << 4;
  890. }
  891. /* Write back to virt and to hardware */
  892. *pflags = cpu_to_le16(flags);
  893. ret = pci_user_write_config_word(vdev->pdev,
  894. start + PCI_MSI_FLAGS,
  895. flags);
  896. if (ret)
  897. return ret;
  898. }
  899. return count;
  900. }
  901. /*
  902. * MSI determination is per-device, so this routine gets used beyond
  903. * initialization time. Don't add __init
  904. */
  905. static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags)
  906. {
  907. if (alloc_perm_bits(perm, len))
  908. return -ENOMEM;
  909. perm->readfn = vfio_msi_config_read;
  910. perm->writefn = vfio_msi_config_write;
  911. p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
  912. /*
  913. * The upper byte of the control register is reserved,
  914. * just setup the lower byte.
  915. */
  916. p_setb(perm, PCI_MSI_FLAGS, (u8)ALL_VIRT, (u8)ALL_WRITE);
  917. p_setd(perm, PCI_MSI_ADDRESS_LO, ALL_VIRT, ALL_WRITE);
  918. if (flags & PCI_MSI_FLAGS_64BIT) {
  919. p_setd(perm, PCI_MSI_ADDRESS_HI, ALL_VIRT, ALL_WRITE);
  920. p_setw(perm, PCI_MSI_DATA_64, (u16)ALL_VIRT, (u16)ALL_WRITE);
  921. if (flags & PCI_MSI_FLAGS_MASKBIT) {
  922. p_setd(perm, PCI_MSI_MASK_64, NO_VIRT, ALL_WRITE);
  923. p_setd(perm, PCI_MSI_PENDING_64, NO_VIRT, ALL_WRITE);
  924. }
  925. } else {
  926. p_setw(perm, PCI_MSI_DATA_32, (u16)ALL_VIRT, (u16)ALL_WRITE);
  927. if (flags & PCI_MSI_FLAGS_MASKBIT) {
  928. p_setd(perm, PCI_MSI_MASK_32, NO_VIRT, ALL_WRITE);
  929. p_setd(perm, PCI_MSI_PENDING_32, NO_VIRT, ALL_WRITE);
  930. }
  931. }
  932. return 0;
  933. }
  934. /* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */
  935. static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos)
  936. {
  937. struct pci_dev *pdev = vdev->pdev;
  938. int len, ret;
  939. u16 flags;
  940. ret = pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &flags);
  941. if (ret)
  942. return pcibios_err_to_errno(ret);
  943. len = 10; /* Minimum size */
  944. if (flags & PCI_MSI_FLAGS_64BIT)
  945. len += 4;
  946. if (flags & PCI_MSI_FLAGS_MASKBIT)
  947. len += 10;
  948. if (vdev->msi_perm)
  949. return len;
  950. vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL);
  951. if (!vdev->msi_perm)
  952. return -ENOMEM;
  953. ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags);
  954. if (ret)
  955. return ret;
  956. return len;
  957. }
  958. /* Determine extended capability length for VC (2 & 9) and MFVC */
  959. static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
  960. {
  961. struct pci_dev *pdev = vdev->pdev;
  962. u32 tmp;
  963. int ret, evcc, phases, vc_arb;
  964. int len = PCI_CAP_VC_BASE_SIZEOF;
  965. ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp);
  966. if (ret)
  967. return pcibios_err_to_errno(ret);
  968. evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */
  969. ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp);
  970. if (ret)
  971. return pcibios_err_to_errno(ret);
  972. if (tmp & PCI_VC_CAP2_128_PHASE)
  973. phases = 128;
  974. else if (tmp & PCI_VC_CAP2_64_PHASE)
  975. phases = 64;
  976. else if (tmp & PCI_VC_CAP2_32_PHASE)
  977. phases = 32;
  978. else
  979. phases = 0;
  980. vc_arb = phases * 4;
  981. /*
  982. * Port arbitration tables are root & switch only;
  983. * function arbitration tables are function 0 only.
  984. * In either case, we'll never let user write them so
  985. * we don't care how big they are
  986. */
  987. len += (1 + evcc) * PCI_CAP_VC_PER_VC_SIZEOF;
  988. if (vc_arb) {
  989. len = round_up(len, 16);
  990. len += vc_arb / 8;
  991. }
  992. return len;
  993. }
  994. static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
  995. {
  996. struct pci_dev *pdev = vdev->pdev;
  997. u32 dword;
  998. u16 word;
  999. u8 byte;
  1000. int ret;
  1001. switch (cap) {
  1002. case PCI_CAP_ID_MSI:
  1003. return vfio_msi_cap_len(vdev, pos);
  1004. case PCI_CAP_ID_PCIX:
  1005. ret = pci_read_config_word(pdev, pos + PCI_X_CMD, &word);
  1006. if (ret)
  1007. return pcibios_err_to_errno(ret);
  1008. if (PCI_X_CMD_VERSION(word)) {
  1009. if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
  1010. /* Test for extended capabilities */
  1011. pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE,
  1012. &dword);
  1013. vdev->extended_caps = (dword != 0);
  1014. }
  1015. return PCI_CAP_PCIX_SIZEOF_V2;
  1016. } else
  1017. return PCI_CAP_PCIX_SIZEOF_V0;
  1018. case PCI_CAP_ID_VNDR:
  1019. /* length follows next field */
  1020. ret = pci_read_config_byte(pdev, pos + PCI_CAP_FLAGS, &byte);
  1021. if (ret)
  1022. return pcibios_err_to_errno(ret);
  1023. return byte;
  1024. case PCI_CAP_ID_EXP:
  1025. if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
  1026. /* Test for extended capabilities */
  1027. pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
  1028. vdev->extended_caps = (dword != 0);
  1029. }
  1030. /* length based on version and type */
  1031. if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) {
  1032. if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
  1033. return 0xc; /* "All Devices" only, no link */
  1034. return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
  1035. } else {
  1036. if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
  1037. return 0x2c; /* No link */
  1038. return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
  1039. }
  1040. case PCI_CAP_ID_HT:
  1041. ret = pci_read_config_byte(pdev, pos + 3, &byte);
  1042. if (ret)
  1043. return pcibios_err_to_errno(ret);
  1044. return (byte & HT_3BIT_CAP_MASK) ?
  1045. HT_CAP_SIZEOF_SHORT : HT_CAP_SIZEOF_LONG;
  1046. case PCI_CAP_ID_SATA:
  1047. ret = pci_read_config_byte(pdev, pos + PCI_SATA_REGS, &byte);
  1048. if (ret)
  1049. return pcibios_err_to_errno(ret);
  1050. byte &= PCI_SATA_REGS_MASK;
  1051. if (byte == PCI_SATA_REGS_INLINE)
  1052. return PCI_SATA_SIZEOF_LONG;
  1053. else
  1054. return PCI_SATA_SIZEOF_SHORT;
  1055. default:
  1056. pr_warn("%s: %s unknown length for pci cap 0x%x@0x%x\n",
  1057. dev_name(&pdev->dev), __func__, cap, pos);
  1058. }
  1059. return 0;
  1060. }
  1061. static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
  1062. {
  1063. struct pci_dev *pdev = vdev->pdev;
  1064. u8 byte;
  1065. u32 dword;
  1066. int ret;
  1067. switch (ecap) {
  1068. case PCI_EXT_CAP_ID_VNDR:
  1069. ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword);
  1070. if (ret)
  1071. return pcibios_err_to_errno(ret);
  1072. return dword >> PCI_VSEC_HDR_LEN_SHIFT;
  1073. case PCI_EXT_CAP_ID_VC:
  1074. case PCI_EXT_CAP_ID_VC9:
  1075. case PCI_EXT_CAP_ID_MFVC:
  1076. return vfio_vc_cap_len(vdev, epos);
  1077. case PCI_EXT_CAP_ID_ACS:
  1078. ret = pci_read_config_byte(pdev, epos + PCI_ACS_CAP, &byte);
  1079. if (ret)
  1080. return pcibios_err_to_errno(ret);
  1081. if (byte & PCI_ACS_EC) {
  1082. int bits;
  1083. ret = pci_read_config_byte(pdev,
  1084. epos + PCI_ACS_EGRESS_BITS,
  1085. &byte);
  1086. if (ret)
  1087. return pcibios_err_to_errno(ret);
  1088. bits = byte ? round_up(byte, 32) : 256;
  1089. return 8 + (bits / 8);
  1090. }
  1091. return 8;
  1092. case PCI_EXT_CAP_ID_REBAR:
  1093. ret = pci_read_config_byte(pdev, epos + PCI_REBAR_CTRL, &byte);
  1094. if (ret)
  1095. return pcibios_err_to_errno(ret);
  1096. byte &= PCI_REBAR_CTRL_NBAR_MASK;
  1097. byte >>= PCI_REBAR_CTRL_NBAR_SHIFT;
  1098. return 4 + (byte * 8);
  1099. case PCI_EXT_CAP_ID_DPA:
  1100. ret = pci_read_config_byte(pdev, epos + PCI_DPA_CAP, &byte);
  1101. if (ret)
  1102. return pcibios_err_to_errno(ret);
  1103. byte &= PCI_DPA_CAP_SUBSTATE_MASK;
  1104. return PCI_DPA_BASE_SIZEOF + byte + 1;
  1105. case PCI_EXT_CAP_ID_TPH:
  1106. ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword);
  1107. if (ret)
  1108. return pcibios_err_to_errno(ret);
  1109. if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) {
  1110. int sts;
  1111. sts = dword & PCI_TPH_CAP_ST_MASK;
  1112. sts >>= PCI_TPH_CAP_ST_SHIFT;
  1113. return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2;
  1114. }
  1115. return PCI_TPH_BASE_SIZEOF;
  1116. default:
  1117. pr_warn("%s: %s unknown length for pci ecap 0x%x@0x%x\n",
  1118. dev_name(&pdev->dev), __func__, ecap, epos);
  1119. }
  1120. return 0;
  1121. }
  1122. static int vfio_fill_vconfig_bytes(struct vfio_pci_device *vdev,
  1123. int offset, int size)
  1124. {
  1125. struct pci_dev *pdev = vdev->pdev;
  1126. int ret = 0;
  1127. /*
  1128. * We try to read physical config space in the largest chunks
  1129. * we can, assuming that all of the fields support dword access.
  1130. * pci_save_state() makes this same assumption and seems to do ok.
  1131. */
  1132. while (size) {
  1133. int filled;
  1134. if (size >= 4 && !(offset % 4)) {
  1135. __le32 *dwordp = (__le32 *)&vdev->vconfig[offset];
  1136. u32 dword;
  1137. ret = pci_read_config_dword(pdev, offset, &dword);
  1138. if (ret)
  1139. return ret;
  1140. *dwordp = cpu_to_le32(dword);
  1141. filled = 4;
  1142. } else if (size >= 2 && !(offset % 2)) {
  1143. __le16 *wordp = (__le16 *)&vdev->vconfig[offset];
  1144. u16 word;
  1145. ret = pci_read_config_word(pdev, offset, &word);
  1146. if (ret)
  1147. return ret;
  1148. *wordp = cpu_to_le16(word);
  1149. filled = 2;
  1150. } else {
  1151. u8 *byte = &vdev->vconfig[offset];
  1152. ret = pci_read_config_byte(pdev, offset, byte);
  1153. if (ret)
  1154. return ret;
  1155. filled = 1;
  1156. }
  1157. offset += filled;
  1158. size -= filled;
  1159. }
  1160. return ret;
  1161. }
  1162. static int vfio_cap_init(struct vfio_pci_device *vdev)
  1163. {
  1164. struct pci_dev *pdev = vdev->pdev;
  1165. u8 *map = vdev->pci_config_map;
  1166. u16 status;
  1167. u8 pos, *prev, cap;
  1168. int loops, ret, caps = 0;
  1169. /* Any capabilities? */
  1170. ret = pci_read_config_word(pdev, PCI_STATUS, &status);
  1171. if (ret)
  1172. return ret;
  1173. if (!(status & PCI_STATUS_CAP_LIST))
  1174. return 0; /* Done */
  1175. ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
  1176. if (ret)
  1177. return ret;
  1178. /* Mark the previous position in case we want to skip a capability */
  1179. prev = &vdev->vconfig[PCI_CAPABILITY_LIST];
  1180. /* We can bound our loop, capabilities are dword aligned */
  1181. loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF;
  1182. while (pos && loops--) {
  1183. u8 next;
  1184. int i, len = 0;
  1185. ret = pci_read_config_byte(pdev, pos, &cap);
  1186. if (ret)
  1187. return ret;
  1188. ret = pci_read_config_byte(pdev,
  1189. pos + PCI_CAP_LIST_NEXT, &next);
  1190. if (ret)
  1191. return ret;
  1192. if (cap <= PCI_CAP_ID_MAX) {
  1193. len = pci_cap_length[cap];
  1194. if (len == 0xFF) { /* Variable length */
  1195. len = vfio_cap_len(vdev, cap, pos);
  1196. if (len < 0)
  1197. return len;
  1198. }
  1199. }
  1200. if (!len) {
  1201. pr_info("%s: %s hiding cap 0x%x\n",
  1202. __func__, dev_name(&pdev->dev), cap);
  1203. *prev = next;
  1204. pos = next;
  1205. continue;
  1206. }
  1207. /* Sanity check, do we overlap other capabilities? */
  1208. for (i = 0; i < len; i++) {
  1209. if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
  1210. continue;
  1211. pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n",
  1212. __func__, dev_name(&pdev->dev),
  1213. pos + i, map[pos + i], cap);
  1214. }
  1215. BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
  1216. memset(map + pos, cap, len);
  1217. ret = vfio_fill_vconfig_bytes(vdev, pos, len);
  1218. if (ret)
  1219. return ret;
  1220. prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT];
  1221. pos = next;
  1222. caps++;
  1223. }
  1224. /* If we didn't fill any capabilities, clear the status flag */
  1225. if (!caps) {
  1226. __le16 *vstatus = (__le16 *)&vdev->vconfig[PCI_STATUS];
  1227. *vstatus &= ~cpu_to_le16(PCI_STATUS_CAP_LIST);
  1228. }
  1229. return 0;
  1230. }
  1231. static int vfio_ecap_init(struct vfio_pci_device *vdev)
  1232. {
  1233. struct pci_dev *pdev = vdev->pdev;
  1234. u8 *map = vdev->pci_config_map;
  1235. u16 epos;
  1236. __le32 *prev = NULL;
  1237. int loops, ret, ecaps = 0;
  1238. if (!vdev->extended_caps)
  1239. return 0;
  1240. epos = PCI_CFG_SPACE_SIZE;
  1241. loops = (pdev->cfg_size - PCI_CFG_SPACE_SIZE) / PCI_CAP_SIZEOF;
  1242. while (loops-- && epos >= PCI_CFG_SPACE_SIZE) {
  1243. u32 header;
  1244. u16 ecap;
  1245. int i, len = 0;
  1246. bool hidden = false;
  1247. ret = pci_read_config_dword(pdev, epos, &header);
  1248. if (ret)
  1249. return ret;
  1250. ecap = PCI_EXT_CAP_ID(header);
  1251. if (ecap <= PCI_EXT_CAP_ID_MAX) {
  1252. len = pci_ext_cap_length[ecap];
  1253. if (len == 0xFF) {
  1254. len = vfio_ext_cap_len(vdev, ecap, epos);
  1255. if (len < 0)
  1256. return ret;
  1257. }
  1258. }
  1259. if (!len) {
  1260. pr_info("%s: %s hiding ecap 0x%x@0x%x\n",
  1261. __func__, dev_name(&pdev->dev), ecap, epos);
  1262. /* If not the first in the chain, we can skip over it */
  1263. if (prev) {
  1264. u32 val = epos = PCI_EXT_CAP_NEXT(header);
  1265. *prev &= cpu_to_le32(~(0xffcU << 20));
  1266. *prev |= cpu_to_le32(val << 20);
  1267. continue;
  1268. }
  1269. /*
  1270. * Otherwise, fill in a placeholder, the direct
  1271. * readfn will virtualize this automatically
  1272. */
  1273. len = PCI_CAP_SIZEOF;
  1274. hidden = true;
  1275. }
  1276. for (i = 0; i < len; i++) {
  1277. if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
  1278. continue;
  1279. pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n",
  1280. __func__, dev_name(&pdev->dev),
  1281. epos + i, map[epos + i], ecap);
  1282. }
  1283. /*
  1284. * Even though ecap is 2 bytes, we're currently a long way
  1285. * from exceeding 1 byte capabilities. If we ever make it
  1286. * up to 0xFE we'll need to up this to a two-byte, byte map.
  1287. */
  1288. BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
  1289. memset(map + epos, ecap, len);
  1290. ret = vfio_fill_vconfig_bytes(vdev, epos, len);
  1291. if (ret)
  1292. return ret;
  1293. /*
  1294. * If we're just using this capability to anchor the list,
  1295. * hide the real ID. Only count real ecaps. XXX PCI spec
  1296. * indicates to use cap id = 0, version = 0, next = 0 if
  1297. * ecaps are absent, hope users check all the way to next.
  1298. */
  1299. if (hidden)
  1300. *(__le32 *)&vdev->vconfig[epos] &=
  1301. cpu_to_le32((0xffcU << 20));
  1302. else
  1303. ecaps++;
  1304. prev = (__le32 *)&vdev->vconfig[epos];
  1305. epos = PCI_EXT_CAP_NEXT(header);
  1306. }
  1307. if (!ecaps)
  1308. *(u32 *)&vdev->vconfig[PCI_CFG_SPACE_SIZE] = 0;
  1309. return 0;
  1310. }
  1311. /*
  1312. * For each device we allocate a pci_config_map that indicates the
  1313. * capability occupying each dword and thus the struct perm_bits we
  1314. * use for read and write. We also allocate a virtualized config
  1315. * space which tracks reads and writes to bits that we emulate for
  1316. * the user. Initial values filled from device.
  1317. *
  1318. * Using shared struct perm_bits between all vfio-pci devices saves
  1319. * us from allocating cfg_size buffers for virt and write for every
  1320. * device. We could remove vconfig and allocate individual buffers
  1321. * for each area requiring emulated bits, but the array of pointers
  1322. * would be comparable in size (at least for standard config space).
  1323. */
  1324. int vfio_config_init(struct vfio_pci_device *vdev)
  1325. {
  1326. struct pci_dev *pdev = vdev->pdev;
  1327. u8 *map, *vconfig;
  1328. int ret;
  1329. /*
  1330. * Config space, caps and ecaps are all dword aligned, so we could
  1331. * use one byte per dword to record the type. However, there are
  1332. * no requiremenst on the length of a capability, so the gap between
  1333. * capabilities needs byte granularity.
  1334. */
  1335. map = kmalloc(pdev->cfg_size, GFP_KERNEL);
  1336. if (!map)
  1337. return -ENOMEM;
  1338. vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL);
  1339. if (!vconfig) {
  1340. kfree(map);
  1341. return -ENOMEM;
  1342. }
  1343. vdev->pci_config_map = map;
  1344. vdev->vconfig = vconfig;
  1345. memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF);
  1346. memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID,
  1347. pdev->cfg_size - PCI_STD_HEADER_SIZEOF);
  1348. ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF);
  1349. if (ret)
  1350. goto out;
  1351. vdev->bardirty = true;
  1352. /*
  1353. * XXX can we just pci_load_saved_state/pci_restore_state?
  1354. * may need to rebuild vconfig after that
  1355. */
  1356. /* For restore after reset */
  1357. vdev->rbar[0] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_0]);
  1358. vdev->rbar[1] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_1]);
  1359. vdev->rbar[2] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_2]);
  1360. vdev->rbar[3] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_3]);
  1361. vdev->rbar[4] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_4]);
  1362. vdev->rbar[5] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_5]);
  1363. vdev->rbar[6] = le32_to_cpu(*(__le32 *)&vconfig[PCI_ROM_ADDRESS]);
  1364. if (pdev->is_virtfn) {
  1365. *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor);
  1366. *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
  1367. }
  1368. if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
  1369. vconfig[PCI_INTERRUPT_PIN] = 0;
  1370. ret = vfio_cap_init(vdev);
  1371. if (ret)
  1372. goto out;
  1373. ret = vfio_ecap_init(vdev);
  1374. if (ret)
  1375. goto out;
  1376. return 0;
  1377. out:
  1378. kfree(map);
  1379. vdev->pci_config_map = NULL;
  1380. kfree(vconfig);
  1381. vdev->vconfig = NULL;
  1382. return pcibios_err_to_errno(ret);
  1383. }
  1384. void vfio_config_free(struct vfio_pci_device *vdev)
  1385. {
  1386. kfree(vdev->vconfig);
  1387. vdev->vconfig = NULL;
  1388. kfree(vdev->pci_config_map);
  1389. vdev->pci_config_map = NULL;
  1390. kfree(vdev->msi_perm);
  1391. vdev->msi_perm = NULL;
  1392. }
  1393. /*
  1394. * Find the remaining number of bytes in a dword that match the given
  1395. * position. Stop at either the end of the capability or the dword boundary.
  1396. */
  1397. static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev,
  1398. loff_t pos)
  1399. {
  1400. u8 cap = vdev->pci_config_map[pos];
  1401. size_t i;
  1402. for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++)
  1403. /* nop */;
  1404. return i;
  1405. }
  1406. static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
  1407. size_t count, loff_t *ppos, bool iswrite)
  1408. {
  1409. struct pci_dev *pdev = vdev->pdev;
  1410. struct perm_bits *perm;
  1411. __le32 val = 0;
  1412. int cap_start = 0, offset;
  1413. u8 cap_id;
  1414. ssize_t ret;
  1415. if (*ppos < 0 || *ppos >= pdev->cfg_size ||
  1416. *ppos + count > pdev->cfg_size)
  1417. return -EFAULT;
  1418. /*
  1419. * Chop accesses into aligned chunks containing no more than a
  1420. * single capability. Caller increments to the next chunk.
  1421. */
  1422. count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos));
  1423. if (count >= 4 && !(*ppos % 4))
  1424. count = 4;
  1425. else if (count >= 2 && !(*ppos % 2))
  1426. count = 2;
  1427. else
  1428. count = 1;
  1429. ret = count;
  1430. cap_id = vdev->pci_config_map[*ppos];
  1431. if (cap_id == PCI_CAP_ID_INVALID) {
  1432. perm = &unassigned_perms;
  1433. cap_start = *ppos;
  1434. } else if (cap_id == PCI_CAP_ID_INVALID_VIRT) {
  1435. perm = &virt_perms;
  1436. cap_start = *ppos;
  1437. } else {
  1438. if (*ppos >= PCI_CFG_SPACE_SIZE) {
  1439. WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
  1440. perm = &ecap_perms[cap_id];
  1441. cap_start = vfio_find_cap_start(vdev, *ppos);
  1442. } else {
  1443. WARN_ON(cap_id > PCI_CAP_ID_MAX);
  1444. perm = &cap_perms[cap_id];
  1445. if (cap_id == PCI_CAP_ID_MSI)
  1446. perm = vdev->msi_perm;
  1447. if (cap_id > PCI_CAP_ID_BASIC)
  1448. cap_start = vfio_find_cap_start(vdev, *ppos);
  1449. }
  1450. }
  1451. WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC);
  1452. WARN_ON(cap_start > *ppos);
  1453. offset = *ppos - cap_start;
  1454. if (iswrite) {
  1455. if (!perm->writefn)
  1456. return ret;
  1457. if (copy_from_user(&val, buf, count))
  1458. return -EFAULT;
  1459. ret = perm->writefn(vdev, *ppos, count, perm, offset, val);
  1460. } else {
  1461. if (perm->readfn) {
  1462. ret = perm->readfn(vdev, *ppos, count,
  1463. perm, offset, &val);
  1464. if (ret < 0)
  1465. return ret;
  1466. }
  1467. if (copy_to_user(buf, &val, count))
  1468. return -EFAULT;
  1469. }
  1470. return ret;
  1471. }
  1472. ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf,
  1473. size_t count, loff_t *ppos, bool iswrite)
  1474. {
  1475. size_t done = 0;
  1476. int ret = 0;
  1477. loff_t pos = *ppos;
  1478. pos &= VFIO_PCI_OFFSET_MASK;
  1479. while (count) {
  1480. ret = vfio_config_do_rw(vdev, buf, count, &pos, iswrite);
  1481. if (ret < 0)
  1482. return ret;
  1483. count -= ret;
  1484. done += ret;
  1485. buf += ret;
  1486. pos += ret;
  1487. }
  1488. *ppos += done;
  1489. return done;
  1490. }