vfio.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. /*
  2. * VFIO API definition
  3. *
  4. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  5. * Author: Alex Williamson <alex.williamson@redhat.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef _UAPIVFIO_H
  12. #define _UAPIVFIO_H
  13. #include <linux/types.h>
  14. #include <linux/ioctl.h>
  15. #define VFIO_API_VERSION 0
  16. /* Kernel & User level defines for VFIO IOCTLs. */
  17. /* Extensions */
  18. #define VFIO_TYPE1_IOMMU 1
  19. #define VFIO_SPAPR_TCE_IOMMU 2
  20. #define VFIO_TYPE1v2_IOMMU 3
  21. /*
  22. * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping). This
  23. * capability is subject to change as groups are added or removed.
  24. */
  25. #define VFIO_DMA_CC_IOMMU 4
  26. /* Check if EEH is supported */
  27. #define VFIO_EEH 5
  28. /* Two-stage IOMMU */
  29. #define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
  30. #define VFIO_SPAPR_TCE_v2_IOMMU 7
  31. /*
  32. * The No-IOMMU IOMMU offers no translation or isolation for devices and
  33. * supports no ioctls outside of VFIO_CHECK_EXTENSION. Use of VFIO's No-IOMMU
  34. * code will taint the host kernel and should be used with extreme caution.
  35. */
  36. #define VFIO_NOIOMMU_IOMMU 8
  37. /*
  38. * The IOCTL interface is designed for extensibility by embedding the
  39. * structure length (argsz) and flags into structures passed between
  40. * kernel and userspace. We therefore use the _IO() macro for these
  41. * defines to avoid implicitly embedding a size into the ioctl request.
  42. * As structure fields are added, argsz will increase to match and flag
  43. * bits will be defined to indicate additional fields with valid data.
  44. * It's *always* the caller's responsibility to indicate the size of
  45. * the structure passed by setting argsz appropriately.
  46. */
  47. #define VFIO_TYPE (';')
  48. #define VFIO_BASE 100
  49. /* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
  50. /**
  51. * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
  52. *
  53. * Report the version of the VFIO API. This allows us to bump the entire
  54. * API version should we later need to add or change features in incompatible
  55. * ways.
  56. * Return: VFIO_API_VERSION
  57. * Availability: Always
  58. */
  59. #define VFIO_GET_API_VERSION _IO(VFIO_TYPE, VFIO_BASE + 0)
  60. /**
  61. * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
  62. *
  63. * Check whether an extension is supported.
  64. * Return: 0 if not supported, 1 (or some other positive integer) if supported.
  65. * Availability: Always
  66. */
  67. #define VFIO_CHECK_EXTENSION _IO(VFIO_TYPE, VFIO_BASE + 1)
  68. /**
  69. * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
  70. *
  71. * Set the iommu to the given type. The type must be supported by an
  72. * iommu driver as verified by calling CHECK_EXTENSION using the same
  73. * type. A group must be set to this file descriptor before this
  74. * ioctl is available. The IOMMU interfaces enabled by this call are
  75. * specific to the value set.
  76. * Return: 0 on success, -errno on failure
  77. * Availability: When VFIO group attached
  78. */
  79. #define VFIO_SET_IOMMU _IO(VFIO_TYPE, VFIO_BASE + 2)
  80. /* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
  81. /**
  82. * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
  83. * struct vfio_group_status)
  84. *
  85. * Retrieve information about the group. Fills in provided
  86. * struct vfio_group_info. Caller sets argsz.
  87. * Return: 0 on succes, -errno on failure.
  88. * Availability: Always
  89. */
  90. struct vfio_group_status {
  91. __u32 argsz;
  92. __u32 flags;
  93. #define VFIO_GROUP_FLAGS_VIABLE (1 << 0)
  94. #define VFIO_GROUP_FLAGS_CONTAINER_SET (1 << 1)
  95. };
  96. #define VFIO_GROUP_GET_STATUS _IO(VFIO_TYPE, VFIO_BASE + 3)
  97. /**
  98. * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
  99. *
  100. * Set the container for the VFIO group to the open VFIO file
  101. * descriptor provided. Groups may only belong to a single
  102. * container. Containers may, at their discretion, support multiple
  103. * groups. Only when a container is set are all of the interfaces
  104. * of the VFIO file descriptor and the VFIO group file descriptor
  105. * available to the user.
  106. * Return: 0 on success, -errno on failure.
  107. * Availability: Always
  108. */
  109. #define VFIO_GROUP_SET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 4)
  110. /**
  111. * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
  112. *
  113. * Remove the group from the attached container. This is the
  114. * opposite of the SET_CONTAINER call and returns the group to
  115. * an initial state. All device file descriptors must be released
  116. * prior to calling this interface. When removing the last group
  117. * from a container, the IOMMU will be disabled and all state lost,
  118. * effectively also returning the VFIO file descriptor to an initial
  119. * state.
  120. * Return: 0 on success, -errno on failure.
  121. * Availability: When attached to container
  122. */
  123. #define VFIO_GROUP_UNSET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 5)
  124. /**
  125. * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
  126. *
  127. * Return a new file descriptor for the device object described by
  128. * the provided string. The string should match a device listed in
  129. * the devices subdirectory of the IOMMU group sysfs entry. The
  130. * group containing the device must already be added to this context.
  131. * Return: new file descriptor on success, -errno on failure.
  132. * Availability: When attached to container
  133. */
  134. #define VFIO_GROUP_GET_DEVICE_FD _IO(VFIO_TYPE, VFIO_BASE + 6)
  135. /* --------------- IOCTLs for DEVICE file descriptors --------------- */
  136. /**
  137. * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
  138. * struct vfio_device_info)
  139. *
  140. * Retrieve information about the device. Fills in provided
  141. * struct vfio_device_info. Caller sets argsz.
  142. * Return: 0 on success, -errno on failure.
  143. */
  144. struct vfio_device_info {
  145. __u32 argsz;
  146. __u32 flags;
  147. #define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */
  148. #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
  149. #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
  150. #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
  151. __u32 num_regions; /* Max region index + 1 */
  152. __u32 num_irqs; /* Max IRQ index + 1 */
  153. };
  154. #define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
  155. /**
  156. * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
  157. * struct vfio_region_info)
  158. *
  159. * Retrieve information about a device region. Caller provides
  160. * struct vfio_region_info with index value set. Caller sets argsz.
  161. * Implementation of region mapping is bus driver specific. This is
  162. * intended to describe MMIO, I/O port, as well as bus specific
  163. * regions (ex. PCI config space). Zero sized regions may be used
  164. * to describe unimplemented regions (ex. unimplemented PCI BARs).
  165. * Return: 0 on success, -errno on failure.
  166. */
  167. struct vfio_region_info {
  168. __u32 argsz;
  169. __u32 flags;
  170. #define VFIO_REGION_INFO_FLAG_READ (1 << 0) /* Region supports read */
  171. #define VFIO_REGION_INFO_FLAG_WRITE (1 << 1) /* Region supports write */
  172. #define VFIO_REGION_INFO_FLAG_MMAP (1 << 2) /* Region supports mmap */
  173. __u32 index; /* Region index */
  174. __u32 resv; /* Reserved for alignment */
  175. __u64 size; /* Region size (bytes) */
  176. __u64 offset; /* Region offset from start of device fd */
  177. };
  178. #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
  179. /**
  180. * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
  181. * struct vfio_irq_info)
  182. *
  183. * Retrieve information about a device IRQ. Caller provides
  184. * struct vfio_irq_info with index value set. Caller sets argsz.
  185. * Implementation of IRQ mapping is bus driver specific. Indexes
  186. * using multiple IRQs are primarily intended to support MSI-like
  187. * interrupt blocks. Zero count irq blocks may be used to describe
  188. * unimplemented interrupt types.
  189. *
  190. * The EVENTFD flag indicates the interrupt index supports eventfd based
  191. * signaling.
  192. *
  193. * The MASKABLE flags indicates the index supports MASK and UNMASK
  194. * actions described below.
  195. *
  196. * AUTOMASKED indicates that after signaling, the interrupt line is
  197. * automatically masked by VFIO and the user needs to unmask the line
  198. * to receive new interrupts. This is primarily intended to distinguish
  199. * level triggered interrupts.
  200. *
  201. * The NORESIZE flag indicates that the interrupt lines within the index
  202. * are setup as a set and new subindexes cannot be enabled without first
  203. * disabling the entire index. This is used for interrupts like PCI MSI
  204. * and MSI-X where the driver may only use a subset of the available
  205. * indexes, but VFIO needs to enable a specific number of vectors
  206. * upfront. In the case of MSI-X, where the user can enable MSI-X and
  207. * then add and unmask vectors, it's up to userspace to make the decision
  208. * whether to allocate the maximum supported number of vectors or tear
  209. * down setup and incrementally increase the vectors as each is enabled.
  210. */
  211. struct vfio_irq_info {
  212. __u32 argsz;
  213. __u32 flags;
  214. #define VFIO_IRQ_INFO_EVENTFD (1 << 0)
  215. #define VFIO_IRQ_INFO_MASKABLE (1 << 1)
  216. #define VFIO_IRQ_INFO_AUTOMASKED (1 << 2)
  217. #define VFIO_IRQ_INFO_NORESIZE (1 << 3)
  218. __u32 index; /* IRQ index */
  219. __u32 count; /* Number of IRQs within this index */
  220. };
  221. #define VFIO_DEVICE_GET_IRQ_INFO _IO(VFIO_TYPE, VFIO_BASE + 9)
  222. /**
  223. * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
  224. *
  225. * Set signaling, masking, and unmasking of interrupts. Caller provides
  226. * struct vfio_irq_set with all fields set. 'start' and 'count' indicate
  227. * the range of subindexes being specified.
  228. *
  229. * The DATA flags specify the type of data provided. If DATA_NONE, the
  230. * operation performs the specified action immediately on the specified
  231. * interrupt(s). For example, to unmask AUTOMASKED interrupt [0,0]:
  232. * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
  233. *
  234. * DATA_BOOL allows sparse support for the same on arrays of interrupts.
  235. * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
  236. * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
  237. * data = {1,0,1}
  238. *
  239. * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
  240. * A value of -1 can be used to either de-assign interrupts if already
  241. * assigned or skip un-assigned interrupts. For example, to set an eventfd
  242. * to be trigger for interrupts [0,0] and [0,2]:
  243. * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
  244. * data = {fd1, -1, fd2}
  245. * If index [0,1] is previously set, two count = 1 ioctls calls would be
  246. * required to set [0,0] and [0,2] without changing [0,1].
  247. *
  248. * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
  249. * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
  250. * from userspace (ie. simulate hardware triggering).
  251. *
  252. * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
  253. * enables the interrupt index for the device. Individual subindex interrupts
  254. * can be disabled using the -1 value for DATA_EVENTFD or the index can be
  255. * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
  256. *
  257. * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
  258. * ACTION_TRIGGER specifies kernel->user signaling.
  259. */
  260. struct vfio_irq_set {
  261. __u32 argsz;
  262. __u32 flags;
  263. #define VFIO_IRQ_SET_DATA_NONE (1 << 0) /* Data not present */
  264. #define VFIO_IRQ_SET_DATA_BOOL (1 << 1) /* Data is bool (u8) */
  265. #define VFIO_IRQ_SET_DATA_EVENTFD (1 << 2) /* Data is eventfd (s32) */
  266. #define VFIO_IRQ_SET_ACTION_MASK (1 << 3) /* Mask interrupt */
  267. #define VFIO_IRQ_SET_ACTION_UNMASK (1 << 4) /* Unmask interrupt */
  268. #define VFIO_IRQ_SET_ACTION_TRIGGER (1 << 5) /* Trigger interrupt */
  269. __u32 index;
  270. __u32 start;
  271. __u32 count;
  272. __u8 data[];
  273. };
  274. #define VFIO_DEVICE_SET_IRQS _IO(VFIO_TYPE, VFIO_BASE + 10)
  275. #define VFIO_IRQ_SET_DATA_TYPE_MASK (VFIO_IRQ_SET_DATA_NONE | \
  276. VFIO_IRQ_SET_DATA_BOOL | \
  277. VFIO_IRQ_SET_DATA_EVENTFD)
  278. #define VFIO_IRQ_SET_ACTION_TYPE_MASK (VFIO_IRQ_SET_ACTION_MASK | \
  279. VFIO_IRQ_SET_ACTION_UNMASK | \
  280. VFIO_IRQ_SET_ACTION_TRIGGER)
  281. /**
  282. * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
  283. *
  284. * Reset a device.
  285. */
  286. #define VFIO_DEVICE_RESET _IO(VFIO_TYPE, VFIO_BASE + 11)
  287. /*
  288. * The VFIO-PCI bus driver makes use of the following fixed region and
  289. * IRQ index mapping. Unimplemented regions return a size of zero.
  290. * Unimplemented IRQ types return a count of zero.
  291. */
  292. enum {
  293. VFIO_PCI_BAR0_REGION_INDEX,
  294. VFIO_PCI_BAR1_REGION_INDEX,
  295. VFIO_PCI_BAR2_REGION_INDEX,
  296. VFIO_PCI_BAR3_REGION_INDEX,
  297. VFIO_PCI_BAR4_REGION_INDEX,
  298. VFIO_PCI_BAR5_REGION_INDEX,
  299. VFIO_PCI_ROM_REGION_INDEX,
  300. VFIO_PCI_CONFIG_REGION_INDEX,
  301. /*
  302. * Expose VGA regions defined for PCI base class 03, subclass 00.
  303. * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
  304. * as well as the MMIO range 0xa0000 to 0xbffff. Each implemented
  305. * range is found at it's identity mapped offset from the region
  306. * offset, for example 0x3b0 is region_info.offset + 0x3b0. Areas
  307. * between described ranges are unimplemented.
  308. */
  309. VFIO_PCI_VGA_REGION_INDEX,
  310. VFIO_PCI_NUM_REGIONS
  311. };
  312. enum {
  313. VFIO_PCI_INTX_IRQ_INDEX,
  314. VFIO_PCI_MSI_IRQ_INDEX,
  315. VFIO_PCI_MSIX_IRQ_INDEX,
  316. VFIO_PCI_ERR_IRQ_INDEX,
  317. VFIO_PCI_REQ_IRQ_INDEX,
  318. VFIO_PCI_NUM_IRQS
  319. };
  320. /**
  321. * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
  322. * struct vfio_pci_hot_reset_info)
  323. *
  324. * Return: 0 on success, -errno on failure:
  325. * -enospc = insufficient buffer, -enodev = unsupported for device.
  326. */
  327. struct vfio_pci_dependent_device {
  328. __u32 group_id;
  329. __u16 segment;
  330. __u8 bus;
  331. __u8 devfn; /* Use PCI_SLOT/PCI_FUNC */
  332. };
  333. struct vfio_pci_hot_reset_info {
  334. __u32 argsz;
  335. __u32 flags;
  336. __u32 count;
  337. struct vfio_pci_dependent_device devices[];
  338. };
  339. #define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
  340. /**
  341. * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
  342. * struct vfio_pci_hot_reset)
  343. *
  344. * Return: 0 on success, -errno on failure.
  345. */
  346. struct vfio_pci_hot_reset {
  347. __u32 argsz;
  348. __u32 flags;
  349. __u32 count;
  350. __s32 group_fds[];
  351. };
  352. #define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13)
  353. /* -------- API for Type1 VFIO IOMMU -------- */
  354. /**
  355. * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
  356. *
  357. * Retrieve information about the IOMMU object. Fills in provided
  358. * struct vfio_iommu_info. Caller sets argsz.
  359. *
  360. * XXX Should we do these by CHECK_EXTENSION too?
  361. */
  362. struct vfio_iommu_type1_info {
  363. __u32 argsz;
  364. __u32 flags;
  365. #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
  366. __u64 iova_pgsizes; /* Bitmap of supported page sizes */
  367. };
  368. #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
  369. /**
  370. * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
  371. *
  372. * Map process virtual addresses to IO virtual addresses using the
  373. * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
  374. */
  375. struct vfio_iommu_type1_dma_map {
  376. __u32 argsz;
  377. __u32 flags;
  378. #define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */
  379. #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */
  380. __u64 vaddr; /* Process virtual address */
  381. __u64 iova; /* IO virtual address */
  382. __u64 size; /* Size of mapping (bytes) */
  383. };
  384. #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
  385. /**
  386. * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
  387. * struct vfio_dma_unmap)
  388. *
  389. * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
  390. * Caller sets argsz. The actual unmapped size is returned in the size
  391. * field. No guarantee is made to the user that arbitrary unmaps of iova
  392. * or size different from those used in the original mapping call will
  393. * succeed.
  394. */
  395. struct vfio_iommu_type1_dma_unmap {
  396. __u32 argsz;
  397. __u32 flags;
  398. __u64 iova; /* IO virtual address */
  399. __u64 size; /* Size of mapping (bytes) */
  400. };
  401. #define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
  402. /*
  403. * IOCTLs to enable/disable IOMMU container usage.
  404. * No parameters are supported.
  405. */
  406. #define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15)
  407. #define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16)
  408. /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
  409. /*
  410. * The SPAPR TCE DDW info struct provides the information about
  411. * the details of Dynamic DMA window capability.
  412. *
  413. * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
  414. * @max_dynamic_windows_supported tells the maximum number of windows
  415. * which the platform can create.
  416. * @levels tells the maximum number of levels in multi-level IOMMU tables;
  417. * this allows splitting a table into smaller chunks which reduces
  418. * the amount of physically contiguous memory required for the table.
  419. */
  420. struct vfio_iommu_spapr_tce_ddw_info {
  421. __u64 pgsizes; /* Bitmap of supported page sizes */
  422. __u32 max_dynamic_windows_supported;
  423. __u32 levels;
  424. };
  425. /*
  426. * The SPAPR TCE info struct provides the information about the PCI bus
  427. * address ranges available for DMA, these values are programmed into
  428. * the hardware so the guest has to know that information.
  429. *
  430. * The DMA 32 bit window start is an absolute PCI bus address.
  431. * The IOVA address passed via map/unmap ioctls are absolute PCI bus
  432. * addresses too so the window works as a filter rather than an offset
  433. * for IOVA addresses.
  434. *
  435. * Flags supported:
  436. * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
  437. * (DDW) support is present. @ddw is only supported when DDW is present.
  438. */
  439. struct vfio_iommu_spapr_tce_info {
  440. __u32 argsz;
  441. __u32 flags;
  442. #define VFIO_IOMMU_SPAPR_INFO_DDW (1 << 0) /* DDW supported */
  443. __u32 dma32_window_start; /* 32 bit window start (bytes) */
  444. __u32 dma32_window_size; /* 32 bit window size (bytes) */
  445. struct vfio_iommu_spapr_tce_ddw_info ddw;
  446. };
  447. #define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
  448. /*
  449. * EEH PE operation struct provides ways to:
  450. * - enable/disable EEH functionality;
  451. * - unfreeze IO/DMA for frozen PE;
  452. * - read PE state;
  453. * - reset PE;
  454. * - configure PE;
  455. * - inject EEH error.
  456. */
  457. struct vfio_eeh_pe_err {
  458. __u32 type;
  459. __u32 func;
  460. __u64 addr;
  461. __u64 mask;
  462. };
  463. struct vfio_eeh_pe_op {
  464. __u32 argsz;
  465. __u32 flags;
  466. __u32 op;
  467. union {
  468. struct vfio_eeh_pe_err err;
  469. };
  470. };
  471. #define VFIO_EEH_PE_DISABLE 0 /* Disable EEH functionality */
  472. #define VFIO_EEH_PE_ENABLE 1 /* Enable EEH functionality */
  473. #define VFIO_EEH_PE_UNFREEZE_IO 2 /* Enable IO for frozen PE */
  474. #define VFIO_EEH_PE_UNFREEZE_DMA 3 /* Enable DMA for frozen PE */
  475. #define VFIO_EEH_PE_GET_STATE 4 /* PE state retrieval */
  476. #define VFIO_EEH_PE_STATE_NORMAL 0 /* PE in functional state */
  477. #define VFIO_EEH_PE_STATE_RESET 1 /* PE reset in progress */
  478. #define VFIO_EEH_PE_STATE_STOPPED 2 /* Stopped DMA and IO */
  479. #define VFIO_EEH_PE_STATE_STOPPED_DMA 4 /* Stopped DMA only */
  480. #define VFIO_EEH_PE_STATE_UNAVAIL 5 /* State unavailable */
  481. #define VFIO_EEH_PE_RESET_DEACTIVATE 5 /* Deassert PE reset */
  482. #define VFIO_EEH_PE_RESET_HOT 6 /* Assert hot reset */
  483. #define VFIO_EEH_PE_RESET_FUNDAMENTAL 7 /* Assert fundamental reset */
  484. #define VFIO_EEH_PE_CONFIGURE 8 /* PE configuration */
  485. #define VFIO_EEH_PE_INJECT_ERR 9 /* Inject EEH error */
  486. #define VFIO_EEH_PE_OP _IO(VFIO_TYPE, VFIO_BASE + 21)
  487. /**
  488. * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
  489. *
  490. * Registers user space memory where DMA is allowed. It pins
  491. * user pages and does the locked memory accounting so
  492. * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
  493. * get faster.
  494. */
  495. struct vfio_iommu_spapr_register_memory {
  496. __u32 argsz;
  497. __u32 flags;
  498. __u64 vaddr; /* Process virtual address */
  499. __u64 size; /* Size of mapping (bytes) */
  500. };
  501. #define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
  502. /**
  503. * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
  504. *
  505. * Unregisters user space memory registered with
  506. * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
  507. * Uses vfio_iommu_spapr_register_memory for parameters.
  508. */
  509. #define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 18)
  510. /**
  511. * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
  512. *
  513. * Creates an additional TCE table and programs it (sets a new DMA window)
  514. * to every IOMMU group in the container. It receives page shift, window
  515. * size and number of levels in the TCE table being created.
  516. *
  517. * It allocates and returns an offset on a PCI bus of the new DMA window.
  518. */
  519. struct vfio_iommu_spapr_tce_create {
  520. __u32 argsz;
  521. __u32 flags;
  522. /* in */
  523. __u32 page_shift;
  524. __u64 window_size;
  525. __u32 levels;
  526. /* out */
  527. __u64 start_addr;
  528. };
  529. #define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
  530. /**
  531. * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
  532. *
  533. * Unprograms a TCE table from all groups in the container and destroys it.
  534. * It receives a PCI bus offset as a window id.
  535. */
  536. struct vfio_iommu_spapr_tce_remove {
  537. __u32 argsz;
  538. __u32 flags;
  539. /* in */
  540. __u64 start_addr;
  541. };
  542. #define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
  543. /* ***************************************************************** */
  544. #endif /* _UAPIVFIO_H */