iommu.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. /*
  2. * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #ifndef __LINUX_IOMMU_H
  19. #define __LINUX_IOMMU_H
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/of.h>
  23. #include <linux/types.h>
  24. #include <linux/scatterlist.h>
  25. #include <trace/events/iommu.h>
  26. #define IOMMU_READ (1 << 0)
  27. #define IOMMU_WRITE (1 << 1)
  28. #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
  29. #define IOMMU_NOEXEC (1 << 3)
  30. #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
  31. struct iommu_ops;
  32. struct iommu_group;
  33. struct bus_type;
  34. struct device;
  35. struct iommu_domain;
  36. struct notifier_block;
  37. /* iommu fault flags */
  38. #define IOMMU_FAULT_READ 0x0
  39. #define IOMMU_FAULT_WRITE 0x1
  40. typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
  41. struct device *, unsigned long, int, void *);
  42. struct iommu_domain_geometry {
  43. dma_addr_t aperture_start; /* First address that can be mapped */
  44. dma_addr_t aperture_end; /* Last address that can be mapped */
  45. bool force_aperture; /* DMA only allowed in mappable range? */
  46. };
  47. /* Domain feature flags */
  48. #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
  49. #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
  50. implementation */
  51. #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
  52. /*
  53. * This are the possible domain-types
  54. *
  55. * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
  56. * devices
  57. * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
  58. * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
  59. * for VMs
  60. * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
  61. * This flag allows IOMMU drivers to implement
  62. * certain optimizations for these domains
  63. */
  64. #define IOMMU_DOMAIN_BLOCKED (0U)
  65. #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
  66. #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
  67. #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
  68. __IOMMU_DOMAIN_DMA_API)
  69. struct iommu_domain {
  70. unsigned type;
  71. const struct iommu_ops *ops;
  72. unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
  73. iommu_fault_handler_t handler;
  74. void *handler_token;
  75. struct iommu_domain_geometry geometry;
  76. void *iova_cookie;
  77. };
  78. enum iommu_cap {
  79. IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
  80. transactions */
  81. IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
  82. IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
  83. };
  84. /*
  85. * Following constraints are specifc to FSL_PAMUV1:
  86. * -aperture must be power of 2, and naturally aligned
  87. * -number of windows must be power of 2, and address space size
  88. * of each window is determined by aperture size / # of windows
  89. * -the actual size of the mapped region of a window must be power
  90. * of 2 starting with 4KB and physical address must be naturally
  91. * aligned.
  92. * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
  93. * The caller can invoke iommu_domain_get_attr to check if the underlying
  94. * iommu implementation supports these constraints.
  95. */
  96. enum iommu_attr {
  97. DOMAIN_ATTR_GEOMETRY,
  98. DOMAIN_ATTR_PAGING,
  99. DOMAIN_ATTR_WINDOWS,
  100. DOMAIN_ATTR_FSL_PAMU_STASH,
  101. DOMAIN_ATTR_FSL_PAMU_ENABLE,
  102. DOMAIN_ATTR_FSL_PAMUV1,
  103. DOMAIN_ATTR_NESTING, /* two stages of translation */
  104. DOMAIN_ATTR_MAX,
  105. };
  106. /**
  107. * struct iommu_resv_region - descriptor for a reserved memory region
  108. * @list: Linked list pointers
  109. * @start: System physical start address of the region
  110. * @length: Length of the region in bytes
  111. * @prot: IOMMU Protection flags (READ/WRITE/...)
  112. */
  113. struct iommu_resv_region {
  114. struct list_head list;
  115. phys_addr_t start;
  116. size_t length;
  117. int prot;
  118. };
  119. #ifdef CONFIG_IOMMU_API
  120. /**
  121. * struct iommu_ops - iommu ops and capabilities
  122. * @capable: check capability
  123. * @domain_alloc: allocate iommu domain
  124. * @domain_free: free iommu domain
  125. * @attach_dev: attach device to an iommu domain
  126. * @detach_dev: detach device from an iommu domain
  127. * @map: map a physically contiguous memory region to an iommu domain
  128. * @unmap: unmap a physically contiguous memory region from an iommu domain
  129. * @map_sg: map a scatter-gather list of physically contiguous memory chunks
  130. * to an iommu domain
  131. * @iova_to_phys: translate iova to physical address
  132. * @add_device: add device to iommu grouping
  133. * @remove_device: remove device from iommu grouping
  134. * @device_group: find iommu group for a particular device
  135. * @domain_get_attr: Query domain attributes
  136. * @domain_set_attr: Change domain attributes
  137. * @get_resv_regions: Request list of reserved regions for a device
  138. * @put_resv_regions: Free list of reserved regions for a device
  139. * @apply_resv_region: Temporary helper call-back for iova reserved ranges
  140. * @domain_window_enable: Configure and enable a particular window for a domain
  141. * @domain_window_disable: Disable a particular window for a domain
  142. * @domain_set_windows: Set the number of windows for a domain
  143. * @domain_get_windows: Return the number of windows for a domain
  144. * @of_xlate: add OF master IDs to iommu grouping
  145. * @pgsize_bitmap: bitmap of all possible supported page sizes
  146. */
  147. struct iommu_ops {
  148. bool (*capable)(enum iommu_cap);
  149. /* Domain allocation and freeing by the iommu driver */
  150. struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
  151. void (*domain_free)(struct iommu_domain *);
  152. int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
  153. void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
  154. int (*map)(struct iommu_domain *domain, unsigned long iova,
  155. phys_addr_t paddr, size_t size, int prot);
  156. size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
  157. size_t size);
  158. size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
  159. struct scatterlist *sg, unsigned int nents, int prot);
  160. phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
  161. int (*add_device)(struct device *dev);
  162. void (*remove_device)(struct device *dev);
  163. struct iommu_group *(*device_group)(struct device *dev);
  164. int (*domain_get_attr)(struct iommu_domain *domain,
  165. enum iommu_attr attr, void *data);
  166. int (*domain_set_attr)(struct iommu_domain *domain,
  167. enum iommu_attr attr, void *data);
  168. /* Request/Free a list of reserved regions for a device */
  169. void (*get_resv_regions)(struct device *dev, struct list_head *list);
  170. void (*put_resv_regions)(struct device *dev, struct list_head *list);
  171. void (*apply_resv_region)(struct device *dev,
  172. struct iommu_domain *domain,
  173. struct iommu_resv_region *region);
  174. /* Window handling functions */
  175. int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
  176. phys_addr_t paddr, u64 size, int prot);
  177. void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
  178. /* Set the number of windows per domain */
  179. int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
  180. /* Get the number of windows per domain */
  181. u32 (*domain_get_windows)(struct iommu_domain *domain);
  182. int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
  183. unsigned long pgsize_bitmap;
  184. };
  185. #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
  186. #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
  187. #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
  188. #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
  189. #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
  190. #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
  191. extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
  192. extern bool iommu_present(struct bus_type *bus);
  193. extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
  194. extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
  195. extern struct iommu_group *iommu_group_get_by_id(int id);
  196. extern void iommu_domain_free(struct iommu_domain *domain);
  197. extern int iommu_attach_device(struct iommu_domain *domain,
  198. struct device *dev);
  199. extern void iommu_detach_device(struct iommu_domain *domain,
  200. struct device *dev);
  201. extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
  202. extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
  203. phys_addr_t paddr, size_t size, int prot);
  204. extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
  205. size_t size);
  206. extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
  207. struct scatterlist *sg,unsigned int nents,
  208. int prot);
  209. extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
  210. extern void iommu_set_fault_handler(struct iommu_domain *domain,
  211. iommu_fault_handler_t handler, void *token);
  212. extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
  213. extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
  214. extern int iommu_request_dm_for_dev(struct device *dev);
  215. extern int iommu_attach_group(struct iommu_domain *domain,
  216. struct iommu_group *group);
  217. extern void iommu_detach_group(struct iommu_domain *domain,
  218. struct iommu_group *group);
  219. extern struct iommu_group *iommu_group_alloc(void);
  220. extern void *iommu_group_get_iommudata(struct iommu_group *group);
  221. extern void iommu_group_set_iommudata(struct iommu_group *group,
  222. void *iommu_data,
  223. void (*release)(void *iommu_data));
  224. extern int iommu_group_set_name(struct iommu_group *group, const char *name);
  225. extern int iommu_group_add_device(struct iommu_group *group,
  226. struct device *dev);
  227. extern void iommu_group_remove_device(struct device *dev);
  228. extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
  229. int (*fn)(struct device *, void *));
  230. extern struct iommu_group *iommu_group_get(struct device *dev);
  231. extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
  232. extern void iommu_group_put(struct iommu_group *group);
  233. extern int iommu_group_register_notifier(struct iommu_group *group,
  234. struct notifier_block *nb);
  235. extern int iommu_group_unregister_notifier(struct iommu_group *group,
  236. struct notifier_block *nb);
  237. extern int iommu_group_id(struct iommu_group *group);
  238. extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
  239. extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
  240. extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
  241. void *data);
  242. extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
  243. void *data);
  244. struct device *iommu_device_create(struct device *parent, void *drvdata,
  245. const struct attribute_group **groups,
  246. const char *fmt, ...) __printf(4, 5);
  247. void iommu_device_destroy(struct device *dev);
  248. int iommu_device_link(struct device *dev, struct device *link);
  249. void iommu_device_unlink(struct device *dev, struct device *link);
  250. /* Window handling function prototypes */
  251. extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
  252. phys_addr_t offset, u64 size,
  253. int prot);
  254. extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
  255. /**
  256. * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
  257. * @domain: the iommu domain where the fault has happened
  258. * @dev: the device where the fault has happened
  259. * @iova: the faulting address
  260. * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
  261. *
  262. * This function should be called by the low-level IOMMU implementations
  263. * whenever IOMMU faults happen, to allow high-level users, that are
  264. * interested in such events, to know about them.
  265. *
  266. * This event may be useful for several possible use cases:
  267. * - mere logging of the event
  268. * - dynamic TLB/PTE loading
  269. * - if restarting of the faulting device is required
  270. *
  271. * Returns 0 on success and an appropriate error code otherwise (if dynamic
  272. * PTE/TLB loading will one day be supported, implementations will be able
  273. * to tell whether it succeeded or not according to this return value).
  274. *
  275. * Specifically, -ENOSYS is returned if a fault handler isn't installed
  276. * (though fault handlers can also return -ENOSYS, in case they want to
  277. * elicit the default behavior of the IOMMU drivers).
  278. */
  279. static inline int report_iommu_fault(struct iommu_domain *domain,
  280. struct device *dev, unsigned long iova, int flags)
  281. {
  282. int ret = -ENOSYS;
  283. /*
  284. * if upper layers showed interest and installed a fault handler,
  285. * invoke it.
  286. */
  287. if (domain->handler)
  288. ret = domain->handler(domain, dev, iova, flags,
  289. domain->handler_token);
  290. trace_io_page_fault(dev, iova, flags);
  291. return ret;
  292. }
  293. static inline size_t iommu_map_sg(struct iommu_domain *domain,
  294. unsigned long iova, struct scatterlist *sg,
  295. unsigned int nents, int prot)
  296. {
  297. return domain->ops->map_sg(domain, iova, sg, nents, prot);
  298. }
  299. /* PCI device grouping function */
  300. extern struct iommu_group *pci_device_group(struct device *dev);
  301. /* Generic device grouping function */
  302. extern struct iommu_group *generic_device_group(struct device *dev);
  303. /**
  304. * struct iommu_fwspec - per-device IOMMU instance data
  305. * @ops: ops for this device's IOMMU
  306. * @iommu_fwnode: firmware handle for this device's IOMMU
  307. * @iommu_priv: IOMMU driver private data for this device
  308. * @num_ids: number of associated device IDs
  309. * @ids: IDs which this device may present to the IOMMU
  310. */
  311. struct iommu_fwspec {
  312. const struct iommu_ops *ops;
  313. struct fwnode_handle *iommu_fwnode;
  314. void *iommu_priv;
  315. unsigned int num_ids;
  316. u32 ids[1];
  317. };
  318. int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
  319. const struct iommu_ops *ops);
  320. void iommu_fwspec_free(struct device *dev);
  321. int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
  322. void iommu_register_instance(struct fwnode_handle *fwnode,
  323. const struct iommu_ops *ops);
  324. const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
  325. #else /* CONFIG_IOMMU_API */
  326. struct iommu_ops {};
  327. struct iommu_group {};
  328. struct iommu_fwspec {};
  329. static inline bool iommu_present(struct bus_type *bus)
  330. {
  331. return false;
  332. }
  333. static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
  334. {
  335. return false;
  336. }
  337. static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
  338. {
  339. return NULL;
  340. }
  341. static inline struct iommu_group *iommu_group_get_by_id(int id)
  342. {
  343. return NULL;
  344. }
  345. static inline void iommu_domain_free(struct iommu_domain *domain)
  346. {
  347. }
  348. static inline int iommu_attach_device(struct iommu_domain *domain,
  349. struct device *dev)
  350. {
  351. return -ENODEV;
  352. }
  353. static inline void iommu_detach_device(struct iommu_domain *domain,
  354. struct device *dev)
  355. {
  356. }
  357. static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
  358. {
  359. return NULL;
  360. }
  361. static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
  362. phys_addr_t paddr, int gfp_order, int prot)
  363. {
  364. return -ENODEV;
  365. }
  366. static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
  367. int gfp_order)
  368. {
  369. return -ENODEV;
  370. }
  371. static inline size_t iommu_map_sg(struct iommu_domain *domain,
  372. unsigned long iova, struct scatterlist *sg,
  373. unsigned int nents, int prot)
  374. {
  375. return -ENODEV;
  376. }
  377. static inline int iommu_domain_window_enable(struct iommu_domain *domain,
  378. u32 wnd_nr, phys_addr_t paddr,
  379. u64 size, int prot)
  380. {
  381. return -ENODEV;
  382. }
  383. static inline void iommu_domain_window_disable(struct iommu_domain *domain,
  384. u32 wnd_nr)
  385. {
  386. }
  387. static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
  388. {
  389. return 0;
  390. }
  391. static inline void iommu_set_fault_handler(struct iommu_domain *domain,
  392. iommu_fault_handler_t handler, void *token)
  393. {
  394. }
  395. static inline void iommu_get_resv_regions(struct device *dev,
  396. struct list_head *list)
  397. {
  398. }
  399. static inline void iommu_put_resv_regions(struct device *dev,
  400. struct list_head *list)
  401. {
  402. }
  403. static inline int iommu_request_dm_for_dev(struct device *dev)
  404. {
  405. return -ENODEV;
  406. }
  407. static inline int iommu_attach_group(struct iommu_domain *domain,
  408. struct iommu_group *group)
  409. {
  410. return -ENODEV;
  411. }
  412. static inline void iommu_detach_group(struct iommu_domain *domain,
  413. struct iommu_group *group)
  414. {
  415. }
  416. static inline struct iommu_group *iommu_group_alloc(void)
  417. {
  418. return ERR_PTR(-ENODEV);
  419. }
  420. static inline void *iommu_group_get_iommudata(struct iommu_group *group)
  421. {
  422. return NULL;
  423. }
  424. static inline void iommu_group_set_iommudata(struct iommu_group *group,
  425. void *iommu_data,
  426. void (*release)(void *iommu_data))
  427. {
  428. }
  429. static inline int iommu_group_set_name(struct iommu_group *group,
  430. const char *name)
  431. {
  432. return -ENODEV;
  433. }
  434. static inline int iommu_group_add_device(struct iommu_group *group,
  435. struct device *dev)
  436. {
  437. return -ENODEV;
  438. }
  439. static inline void iommu_group_remove_device(struct device *dev)
  440. {
  441. }
  442. static inline int iommu_group_for_each_dev(struct iommu_group *group,
  443. void *data,
  444. int (*fn)(struct device *, void *))
  445. {
  446. return -ENODEV;
  447. }
  448. static inline struct iommu_group *iommu_group_get(struct device *dev)
  449. {
  450. return NULL;
  451. }
  452. static inline void iommu_group_put(struct iommu_group *group)
  453. {
  454. }
  455. static inline int iommu_group_register_notifier(struct iommu_group *group,
  456. struct notifier_block *nb)
  457. {
  458. return -ENODEV;
  459. }
  460. static inline int iommu_group_unregister_notifier(struct iommu_group *group,
  461. struct notifier_block *nb)
  462. {
  463. return 0;
  464. }
  465. static inline int iommu_group_id(struct iommu_group *group)
  466. {
  467. return -ENODEV;
  468. }
  469. static inline int iommu_domain_get_attr(struct iommu_domain *domain,
  470. enum iommu_attr attr, void *data)
  471. {
  472. return -EINVAL;
  473. }
  474. static inline int iommu_domain_set_attr(struct iommu_domain *domain,
  475. enum iommu_attr attr, void *data)
  476. {
  477. return -EINVAL;
  478. }
  479. static inline struct device *iommu_device_create(struct device *parent,
  480. void *drvdata,
  481. const struct attribute_group **groups,
  482. const char *fmt, ...)
  483. {
  484. return ERR_PTR(-ENODEV);
  485. }
  486. static inline void iommu_device_destroy(struct device *dev)
  487. {
  488. }
  489. static inline int iommu_device_link(struct device *dev, struct device *link)
  490. {
  491. return -EINVAL;
  492. }
  493. static inline void iommu_device_unlink(struct device *dev, struct device *link)
  494. {
  495. }
  496. static inline int iommu_fwspec_init(struct device *dev,
  497. struct fwnode_handle *iommu_fwnode,
  498. const struct iommu_ops *ops)
  499. {
  500. return -ENODEV;
  501. }
  502. static inline void iommu_fwspec_free(struct device *dev)
  503. {
  504. }
  505. static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
  506. int num_ids)
  507. {
  508. return -ENODEV;
  509. }
  510. static inline void iommu_register_instance(struct fwnode_handle *fwnode,
  511. const struct iommu_ops *ops)
  512. {
  513. }
  514. static inline
  515. const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
  516. {
  517. return NULL;
  518. }
  519. #endif /* CONFIG_IOMMU_API */
  520. #endif /* __LINUX_IOMMU_H */