intel_irq_remapping.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define pr_fmt(fmt) "DMAR-IR: " fmt
  3. #include <linux/interrupt.h>
  4. #include <linux/dmar.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/slab.h>
  7. #include <linux/jiffies.h>
  8. #include <linux/hpet.h>
  9. #include <linux/pci.h>
  10. #include <linux/irq.h>
  11. #include <linux/intel-iommu.h>
  12. #include <linux/acpi.h>
  13. #include <linux/irqdomain.h>
  14. #include <linux/crash_dump.h>
  15. #include <asm/io_apic.h>
  16. #include <asm/smp.h>
  17. #include <asm/cpu.h>
  18. #include <asm/irq_remapping.h>
  19. #include <asm/pci-direct.h>
  20. #include <asm/msidef.h>
  21. #include "irq_remapping.h"
  22. enum irq_mode {
  23. IRQ_REMAPPING,
  24. IRQ_POSTING,
  25. };
  26. struct ioapic_scope {
  27. struct intel_iommu *iommu;
  28. unsigned int id;
  29. unsigned int bus; /* PCI bus number */
  30. unsigned int devfn; /* PCI devfn number */
  31. };
  32. struct hpet_scope {
  33. struct intel_iommu *iommu;
  34. u8 id;
  35. unsigned int bus;
  36. unsigned int devfn;
  37. };
  38. struct irq_2_iommu {
  39. struct intel_iommu *iommu;
  40. u16 irte_index;
  41. u16 sub_handle;
  42. u8 irte_mask;
  43. enum irq_mode mode;
  44. };
  45. struct intel_ir_data {
  46. struct irq_2_iommu irq_2_iommu;
  47. struct irte irte_entry;
  48. union {
  49. struct msi_msg msi_entry;
  50. };
  51. };
  52. #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
  53. #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
  54. static int __read_mostly eim_mode;
  55. static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
  56. static struct hpet_scope ir_hpet[MAX_HPET_TBS];
  57. /*
  58. * Lock ordering:
  59. * ->dmar_global_lock
  60. * ->irq_2_ir_lock
  61. * ->qi->q_lock
  62. * ->iommu->register_lock
  63. * Note:
  64. * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
  65. * in single-threaded environment with interrupt disabled, so no need to tabke
  66. * the dmar_global_lock.
  67. */
  68. DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
  69. static const struct irq_domain_ops intel_ir_domain_ops;
  70. static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
  71. static int __init parse_ioapics_under_ir(void);
  72. static bool ir_pre_enabled(struct intel_iommu *iommu)
  73. {
  74. return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
  75. }
  76. static void clear_ir_pre_enabled(struct intel_iommu *iommu)
  77. {
  78. iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
  79. }
  80. static void init_ir_status(struct intel_iommu *iommu)
  81. {
  82. u32 gsts;
  83. gsts = readl(iommu->reg + DMAR_GSTS_REG);
  84. if (gsts & DMA_GSTS_IRES)
  85. iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
  86. }
  87. static int alloc_irte(struct intel_iommu *iommu, int irq,
  88. struct irq_2_iommu *irq_iommu, u16 count)
  89. {
  90. struct ir_table *table = iommu->ir_table;
  91. unsigned int mask = 0;
  92. unsigned long flags;
  93. int index;
  94. if (!count || !irq_iommu)
  95. return -1;
  96. if (count > 1) {
  97. count = __roundup_pow_of_two(count);
  98. mask = ilog2(count);
  99. }
  100. if (mask > ecap_max_handle_mask(iommu->ecap)) {
  101. pr_err("Requested mask %x exceeds the max invalidation handle"
  102. " mask value %Lx\n", mask,
  103. ecap_max_handle_mask(iommu->ecap));
  104. return -1;
  105. }
  106. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  107. index = bitmap_find_free_region(table->bitmap,
  108. INTR_REMAP_TABLE_ENTRIES, mask);
  109. if (index < 0) {
  110. pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
  111. } else {
  112. irq_iommu->iommu = iommu;
  113. irq_iommu->irte_index = index;
  114. irq_iommu->sub_handle = 0;
  115. irq_iommu->irte_mask = mask;
  116. irq_iommu->mode = IRQ_REMAPPING;
  117. }
  118. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  119. return index;
  120. }
  121. static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
  122. {
  123. struct qi_desc desc;
  124. desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
  125. | QI_IEC_SELECTIVE;
  126. desc.high = 0;
  127. return qi_submit_sync(&desc, iommu);
  128. }
  129. static int modify_irte(struct irq_2_iommu *irq_iommu,
  130. struct irte *irte_modified)
  131. {
  132. struct intel_iommu *iommu;
  133. unsigned long flags;
  134. struct irte *irte;
  135. int rc, index;
  136. if (!irq_iommu)
  137. return -1;
  138. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  139. iommu = irq_iommu->iommu;
  140. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  141. irte = &iommu->ir_table->base[index];
  142. #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
  143. if ((irte->pst == 1) || (irte_modified->pst == 1)) {
  144. bool ret;
  145. ret = cmpxchg_double(&irte->low, &irte->high,
  146. irte->low, irte->high,
  147. irte_modified->low, irte_modified->high);
  148. /*
  149. * We use cmpxchg16 to atomically update the 128-bit IRTE,
  150. * and it cannot be updated by the hardware or other processors
  151. * behind us, so the return value of cmpxchg16 should be the
  152. * same as the old value.
  153. */
  154. WARN_ON(!ret);
  155. } else
  156. #endif
  157. {
  158. set_64bit(&irte->low, irte_modified->low);
  159. set_64bit(&irte->high, irte_modified->high);
  160. }
  161. __iommu_flush_cache(iommu, irte, sizeof(*irte));
  162. rc = qi_flush_iec(iommu, index, 0);
  163. /* Update iommu mode according to the IRTE mode */
  164. irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
  165. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  166. return rc;
  167. }
  168. static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
  169. {
  170. int i;
  171. for (i = 0; i < MAX_HPET_TBS; i++)
  172. if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
  173. return ir_hpet[i].iommu;
  174. return NULL;
  175. }
  176. static struct intel_iommu *map_ioapic_to_ir(int apic)
  177. {
  178. int i;
  179. for (i = 0; i < MAX_IO_APICS; i++)
  180. if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
  181. return ir_ioapic[i].iommu;
  182. return NULL;
  183. }
  184. static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
  185. {
  186. struct dmar_drhd_unit *drhd;
  187. drhd = dmar_find_matched_drhd_unit(dev);
  188. if (!drhd)
  189. return NULL;
  190. return drhd->iommu;
  191. }
  192. static int clear_entries(struct irq_2_iommu *irq_iommu)
  193. {
  194. struct irte *start, *entry, *end;
  195. struct intel_iommu *iommu;
  196. int index;
  197. if (irq_iommu->sub_handle)
  198. return 0;
  199. iommu = irq_iommu->iommu;
  200. index = irq_iommu->irte_index;
  201. start = iommu->ir_table->base + index;
  202. end = start + (1 << irq_iommu->irte_mask);
  203. for (entry = start; entry < end; entry++) {
  204. set_64bit(&entry->low, 0);
  205. set_64bit(&entry->high, 0);
  206. }
  207. bitmap_release_region(iommu->ir_table->bitmap, index,
  208. irq_iommu->irte_mask);
  209. return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
  210. }
  211. /*
  212. * source validation type
  213. */
  214. #define SVT_NO_VERIFY 0x0 /* no verification is required */
  215. #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
  216. #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
  217. /*
  218. * source-id qualifier
  219. */
  220. #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
  221. #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
  222. * the third least significant bit
  223. */
  224. #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
  225. * the second and third least significant bits
  226. */
  227. #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
  228. * the least three significant bits
  229. */
  230. /*
  231. * set SVT, SQ and SID fields of irte to verify
  232. * source ids of interrupt requests
  233. */
  234. static void set_irte_sid(struct irte *irte, unsigned int svt,
  235. unsigned int sq, unsigned int sid)
  236. {
  237. if (disable_sourceid_checking)
  238. svt = SVT_NO_VERIFY;
  239. irte->svt = svt;
  240. irte->sq = sq;
  241. irte->sid = sid;
  242. }
  243. static int set_ioapic_sid(struct irte *irte, int apic)
  244. {
  245. int i;
  246. u16 sid = 0;
  247. if (!irte)
  248. return -1;
  249. down_read(&dmar_global_lock);
  250. for (i = 0; i < MAX_IO_APICS; i++) {
  251. if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
  252. sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
  253. break;
  254. }
  255. }
  256. up_read(&dmar_global_lock);
  257. if (sid == 0) {
  258. pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
  259. return -1;
  260. }
  261. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
  262. return 0;
  263. }
  264. static int set_hpet_sid(struct irte *irte, u8 id)
  265. {
  266. int i;
  267. u16 sid = 0;
  268. if (!irte)
  269. return -1;
  270. down_read(&dmar_global_lock);
  271. for (i = 0; i < MAX_HPET_TBS; i++) {
  272. if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
  273. sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
  274. break;
  275. }
  276. }
  277. up_read(&dmar_global_lock);
  278. if (sid == 0) {
  279. pr_warn("Failed to set source-id of HPET block (%d)\n", id);
  280. return -1;
  281. }
  282. /*
  283. * Should really use SQ_ALL_16. Some platforms are broken.
  284. * While we figure out the right quirks for these broken platforms, use
  285. * SQ_13_IGNORE_3 for now.
  286. */
  287. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
  288. return 0;
  289. }
  290. struct set_msi_sid_data {
  291. struct pci_dev *pdev;
  292. u16 alias;
  293. };
  294. static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
  295. {
  296. struct set_msi_sid_data *data = opaque;
  297. data->pdev = pdev;
  298. data->alias = alias;
  299. return 0;
  300. }
  301. static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
  302. {
  303. struct set_msi_sid_data data;
  304. if (!irte || !dev)
  305. return -1;
  306. pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
  307. /*
  308. * DMA alias provides us with a PCI device and alias. The only case
  309. * where the it will return an alias on a different bus than the
  310. * device is the case of a PCIe-to-PCI bridge, where the alias is for
  311. * the subordinate bus. In this case we can only verify the bus.
  312. *
  313. * If the alias device is on a different bus than our source device
  314. * then we have a topology based alias, use it.
  315. *
  316. * Otherwise, the alias is for a device DMA quirk and we cannot
  317. * assume that MSI uses the same requester ID. Therefore use the
  318. * original device.
  319. */
  320. if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
  321. set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
  322. PCI_DEVID(PCI_BUS_NUM(data.alias),
  323. dev->bus->number));
  324. else if (data.pdev->bus->number != dev->bus->number)
  325. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
  326. else
  327. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
  328. PCI_DEVID(dev->bus->number, dev->devfn));
  329. return 0;
  330. }
  331. static int iommu_load_old_irte(struct intel_iommu *iommu)
  332. {
  333. struct irte *old_ir_table;
  334. phys_addr_t irt_phys;
  335. unsigned int i;
  336. size_t size;
  337. u64 irta;
  338. /* Check whether the old ir-table has the same size as ours */
  339. irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
  340. if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
  341. != INTR_REMAP_TABLE_REG_SIZE)
  342. return -EINVAL;
  343. irt_phys = irta & VTD_PAGE_MASK;
  344. size = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
  345. /* Map the old IR table */
  346. old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
  347. if (!old_ir_table)
  348. return -ENOMEM;
  349. /* Copy data over */
  350. memcpy(iommu->ir_table->base, old_ir_table, size);
  351. __iommu_flush_cache(iommu, iommu->ir_table->base, size);
  352. /*
  353. * Now check the table for used entries and mark those as
  354. * allocated in the bitmap
  355. */
  356. for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
  357. if (iommu->ir_table->base[i].present)
  358. bitmap_set(iommu->ir_table->bitmap, i, 1);
  359. }
  360. memunmap(old_ir_table);
  361. return 0;
  362. }
  363. static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
  364. {
  365. unsigned long flags;
  366. u64 addr;
  367. u32 sts;
  368. addr = virt_to_phys((void *)iommu->ir_table->base);
  369. raw_spin_lock_irqsave(&iommu->register_lock, flags);
  370. dmar_writeq(iommu->reg + DMAR_IRTA_REG,
  371. (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
  372. /* Set interrupt-remapping table pointer */
  373. writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
  374. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  375. readl, (sts & DMA_GSTS_IRTPS), sts);
  376. raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
  377. /*
  378. * Global invalidation of interrupt entry cache to make sure the
  379. * hardware uses the new irq remapping table.
  380. */
  381. qi_global_iec(iommu);
  382. }
  383. static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
  384. {
  385. unsigned long flags;
  386. u32 sts;
  387. raw_spin_lock_irqsave(&iommu->register_lock, flags);
  388. /* Enable interrupt-remapping */
  389. iommu->gcmd |= DMA_GCMD_IRE;
  390. iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
  391. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  392. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  393. readl, (sts & DMA_GSTS_IRES), sts);
  394. /*
  395. * With CFI clear in the Global Command register, we should be
  396. * protected from dangerous (i.e. compatibility) interrupts
  397. * regardless of x2apic status. Check just to be sure.
  398. */
  399. if (sts & DMA_GSTS_CFIS)
  400. WARN(1, KERN_WARNING
  401. "Compatibility-format IRQs enabled despite intr remapping;\n"
  402. "you are vulnerable to IRQ injection.\n");
  403. raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
  404. }
  405. static int intel_setup_irq_remapping(struct intel_iommu *iommu)
  406. {
  407. struct ir_table *ir_table;
  408. struct fwnode_handle *fn;
  409. unsigned long *bitmap;
  410. struct page *pages;
  411. if (iommu->ir_table)
  412. return 0;
  413. ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
  414. if (!ir_table)
  415. return -ENOMEM;
  416. pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
  417. INTR_REMAP_PAGE_ORDER);
  418. if (!pages) {
  419. pr_err("IR%d: failed to allocate pages of order %d\n",
  420. iommu->seq_id, INTR_REMAP_PAGE_ORDER);
  421. goto out_free_table;
  422. }
  423. bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
  424. sizeof(long), GFP_ATOMIC);
  425. if (bitmap == NULL) {
  426. pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
  427. goto out_free_pages;
  428. }
  429. fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
  430. if (!fn)
  431. goto out_free_bitmap;
  432. iommu->ir_domain =
  433. irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
  434. 0, INTR_REMAP_TABLE_ENTRIES,
  435. fn, &intel_ir_domain_ops,
  436. iommu);
  437. irq_domain_free_fwnode(fn);
  438. if (!iommu->ir_domain) {
  439. pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
  440. goto out_free_bitmap;
  441. }
  442. iommu->ir_msi_domain =
  443. arch_create_remap_msi_irq_domain(iommu->ir_domain,
  444. "INTEL-IR-MSI",
  445. iommu->seq_id);
  446. ir_table->base = page_address(pages);
  447. ir_table->bitmap = bitmap;
  448. iommu->ir_table = ir_table;
  449. /*
  450. * If the queued invalidation is already initialized,
  451. * shouldn't disable it.
  452. */
  453. if (!iommu->qi) {
  454. /*
  455. * Clear previous faults.
  456. */
  457. dmar_fault(-1, iommu);
  458. dmar_disable_qi(iommu);
  459. if (dmar_enable_qi(iommu)) {
  460. pr_err("Failed to enable queued invalidation\n");
  461. goto out_free_bitmap;
  462. }
  463. }
  464. init_ir_status(iommu);
  465. if (ir_pre_enabled(iommu)) {
  466. if (!is_kdump_kernel()) {
  467. pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
  468. iommu->name);
  469. clear_ir_pre_enabled(iommu);
  470. iommu_disable_irq_remapping(iommu);
  471. } else if (iommu_load_old_irte(iommu))
  472. pr_err("Failed to copy IR table for %s from previous kernel\n",
  473. iommu->name);
  474. else
  475. pr_info("Copied IR table for %s from previous kernel\n",
  476. iommu->name);
  477. }
  478. iommu_set_irq_remapping(iommu, eim_mode);
  479. return 0;
  480. out_free_bitmap:
  481. kfree(bitmap);
  482. out_free_pages:
  483. __free_pages(pages, INTR_REMAP_PAGE_ORDER);
  484. out_free_table:
  485. kfree(ir_table);
  486. iommu->ir_table = NULL;
  487. return -ENOMEM;
  488. }
  489. static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
  490. {
  491. if (iommu && iommu->ir_table) {
  492. if (iommu->ir_msi_domain) {
  493. irq_domain_remove(iommu->ir_msi_domain);
  494. iommu->ir_msi_domain = NULL;
  495. }
  496. if (iommu->ir_domain) {
  497. irq_domain_remove(iommu->ir_domain);
  498. iommu->ir_domain = NULL;
  499. }
  500. free_pages((unsigned long)iommu->ir_table->base,
  501. INTR_REMAP_PAGE_ORDER);
  502. kfree(iommu->ir_table->bitmap);
  503. kfree(iommu->ir_table);
  504. iommu->ir_table = NULL;
  505. }
  506. }
  507. /*
  508. * Disable Interrupt Remapping.
  509. */
  510. static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
  511. {
  512. unsigned long flags;
  513. u32 sts;
  514. if (!ecap_ir_support(iommu->ecap))
  515. return;
  516. /*
  517. * global invalidation of interrupt entry cache before disabling
  518. * interrupt-remapping.
  519. */
  520. qi_global_iec(iommu);
  521. raw_spin_lock_irqsave(&iommu->register_lock, flags);
  522. sts = readl(iommu->reg + DMAR_GSTS_REG);
  523. if (!(sts & DMA_GSTS_IRES))
  524. goto end;
  525. iommu->gcmd &= ~DMA_GCMD_IRE;
  526. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  527. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  528. readl, !(sts & DMA_GSTS_IRES), sts);
  529. end:
  530. raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
  531. }
  532. static int __init dmar_x2apic_optout(void)
  533. {
  534. struct acpi_table_dmar *dmar;
  535. dmar = (struct acpi_table_dmar *)dmar_tbl;
  536. if (!dmar || no_x2apic_optout)
  537. return 0;
  538. return dmar->flags & DMAR_X2APIC_OPT_OUT;
  539. }
  540. static void __init intel_cleanup_irq_remapping(void)
  541. {
  542. struct dmar_drhd_unit *drhd;
  543. struct intel_iommu *iommu;
  544. for_each_iommu(iommu, drhd) {
  545. if (ecap_ir_support(iommu->ecap)) {
  546. iommu_disable_irq_remapping(iommu);
  547. intel_teardown_irq_remapping(iommu);
  548. }
  549. }
  550. if (x2apic_supported())
  551. pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
  552. }
  553. static int __init intel_prepare_irq_remapping(void)
  554. {
  555. struct dmar_drhd_unit *drhd;
  556. struct intel_iommu *iommu;
  557. int eim = 0;
  558. if (irq_remap_broken) {
  559. pr_warn("This system BIOS has enabled interrupt remapping\n"
  560. "on a chipset that contains an erratum making that\n"
  561. "feature unstable. To maintain system stability\n"
  562. "interrupt remapping is being disabled. Please\n"
  563. "contact your BIOS vendor for an update\n");
  564. add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
  565. return -ENODEV;
  566. }
  567. if (dmar_table_init() < 0)
  568. return -ENODEV;
  569. if (!dmar_ir_support())
  570. return -ENODEV;
  571. if (parse_ioapics_under_ir()) {
  572. pr_info("Not enabling interrupt remapping\n");
  573. goto error;
  574. }
  575. /* First make sure all IOMMUs support IRQ remapping */
  576. for_each_iommu(iommu, drhd)
  577. if (!ecap_ir_support(iommu->ecap))
  578. goto error;
  579. /* Detect remapping mode: lapic or x2apic */
  580. if (x2apic_supported()) {
  581. eim = !dmar_x2apic_optout();
  582. if (!eim) {
  583. pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
  584. pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
  585. }
  586. }
  587. for_each_iommu(iommu, drhd) {
  588. if (eim && !ecap_eim_support(iommu->ecap)) {
  589. pr_info("%s does not support EIM\n", iommu->name);
  590. eim = 0;
  591. }
  592. }
  593. eim_mode = eim;
  594. if (eim)
  595. pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
  596. /* Do the initializations early */
  597. for_each_iommu(iommu, drhd) {
  598. if (intel_setup_irq_remapping(iommu)) {
  599. pr_err("Failed to setup irq remapping for %s\n",
  600. iommu->name);
  601. goto error;
  602. }
  603. }
  604. return 0;
  605. error:
  606. intel_cleanup_irq_remapping();
  607. return -ENODEV;
  608. }
  609. /*
  610. * Set Posted-Interrupts capability.
  611. */
  612. static inline void set_irq_posting_cap(void)
  613. {
  614. struct dmar_drhd_unit *drhd;
  615. struct intel_iommu *iommu;
  616. if (!disable_irq_post) {
  617. /*
  618. * If IRTE is in posted format, the 'pda' field goes across the
  619. * 64-bit boundary, we need use cmpxchg16b to atomically update
  620. * it. We only expose posted-interrupt when X86_FEATURE_CX16
  621. * is supported. Actually, hardware platforms supporting PI
  622. * should have X86_FEATURE_CX16 support, this has been confirmed
  623. * with Intel hardware guys.
  624. */
  625. if (boot_cpu_has(X86_FEATURE_CX16))
  626. intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
  627. for_each_iommu(iommu, drhd)
  628. if (!cap_pi_support(iommu->cap)) {
  629. intel_irq_remap_ops.capability &=
  630. ~(1 << IRQ_POSTING_CAP);
  631. break;
  632. }
  633. }
  634. }
  635. static int __init intel_enable_irq_remapping(void)
  636. {
  637. struct dmar_drhd_unit *drhd;
  638. struct intel_iommu *iommu;
  639. bool setup = false;
  640. /*
  641. * Setup Interrupt-remapping for all the DRHD's now.
  642. */
  643. for_each_iommu(iommu, drhd) {
  644. if (!ir_pre_enabled(iommu))
  645. iommu_enable_irq_remapping(iommu);
  646. setup = true;
  647. }
  648. if (!setup)
  649. goto error;
  650. irq_remapping_enabled = 1;
  651. set_irq_posting_cap();
  652. pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic");
  653. return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
  654. error:
  655. intel_cleanup_irq_remapping();
  656. return -1;
  657. }
  658. static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
  659. struct intel_iommu *iommu,
  660. struct acpi_dmar_hardware_unit *drhd)
  661. {
  662. struct acpi_dmar_pci_path *path;
  663. u8 bus;
  664. int count, free = -1;
  665. bus = scope->bus;
  666. path = (struct acpi_dmar_pci_path *)(scope + 1);
  667. count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  668. / sizeof(struct acpi_dmar_pci_path);
  669. while (--count > 0) {
  670. /*
  671. * Access PCI directly due to the PCI
  672. * subsystem isn't initialized yet.
  673. */
  674. bus = read_pci_config_byte(bus, path->device, path->function,
  675. PCI_SECONDARY_BUS);
  676. path++;
  677. }
  678. for (count = 0; count < MAX_HPET_TBS; count++) {
  679. if (ir_hpet[count].iommu == iommu &&
  680. ir_hpet[count].id == scope->enumeration_id)
  681. return 0;
  682. else if (ir_hpet[count].iommu == NULL && free == -1)
  683. free = count;
  684. }
  685. if (free == -1) {
  686. pr_warn("Exceeded Max HPET blocks\n");
  687. return -ENOSPC;
  688. }
  689. ir_hpet[free].iommu = iommu;
  690. ir_hpet[free].id = scope->enumeration_id;
  691. ir_hpet[free].bus = bus;
  692. ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
  693. pr_info("HPET id %d under DRHD base 0x%Lx\n",
  694. scope->enumeration_id, drhd->address);
  695. return 0;
  696. }
  697. static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
  698. struct intel_iommu *iommu,
  699. struct acpi_dmar_hardware_unit *drhd)
  700. {
  701. struct acpi_dmar_pci_path *path;
  702. u8 bus;
  703. int count, free = -1;
  704. bus = scope->bus;
  705. path = (struct acpi_dmar_pci_path *)(scope + 1);
  706. count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  707. / sizeof(struct acpi_dmar_pci_path);
  708. while (--count > 0) {
  709. /*
  710. * Access PCI directly due to the PCI
  711. * subsystem isn't initialized yet.
  712. */
  713. bus = read_pci_config_byte(bus, path->device, path->function,
  714. PCI_SECONDARY_BUS);
  715. path++;
  716. }
  717. for (count = 0; count < MAX_IO_APICS; count++) {
  718. if (ir_ioapic[count].iommu == iommu &&
  719. ir_ioapic[count].id == scope->enumeration_id)
  720. return 0;
  721. else if (ir_ioapic[count].iommu == NULL && free == -1)
  722. free = count;
  723. }
  724. if (free == -1) {
  725. pr_warn("Exceeded Max IO APICS\n");
  726. return -ENOSPC;
  727. }
  728. ir_ioapic[free].bus = bus;
  729. ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
  730. ir_ioapic[free].iommu = iommu;
  731. ir_ioapic[free].id = scope->enumeration_id;
  732. pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
  733. scope->enumeration_id, drhd->address, iommu->seq_id);
  734. return 0;
  735. }
  736. static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
  737. struct intel_iommu *iommu)
  738. {
  739. int ret = 0;
  740. struct acpi_dmar_hardware_unit *drhd;
  741. struct acpi_dmar_device_scope *scope;
  742. void *start, *end;
  743. drhd = (struct acpi_dmar_hardware_unit *)header;
  744. start = (void *)(drhd + 1);
  745. end = ((void *)drhd) + header->length;
  746. while (start < end && ret == 0) {
  747. scope = start;
  748. if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
  749. ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
  750. else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
  751. ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
  752. start += scope->length;
  753. }
  754. return ret;
  755. }
  756. static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
  757. {
  758. int i;
  759. for (i = 0; i < MAX_HPET_TBS; i++)
  760. if (ir_hpet[i].iommu == iommu)
  761. ir_hpet[i].iommu = NULL;
  762. for (i = 0; i < MAX_IO_APICS; i++)
  763. if (ir_ioapic[i].iommu == iommu)
  764. ir_ioapic[i].iommu = NULL;
  765. }
  766. /*
  767. * Finds the assocaition between IOAPIC's and its Interrupt-remapping
  768. * hardware unit.
  769. */
  770. static int __init parse_ioapics_under_ir(void)
  771. {
  772. struct dmar_drhd_unit *drhd;
  773. struct intel_iommu *iommu;
  774. bool ir_supported = false;
  775. int ioapic_idx;
  776. for_each_iommu(iommu, drhd) {
  777. int ret;
  778. if (!ecap_ir_support(iommu->ecap))
  779. continue;
  780. ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
  781. if (ret)
  782. return ret;
  783. ir_supported = true;
  784. }
  785. if (!ir_supported)
  786. return -ENODEV;
  787. for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
  788. int ioapic_id = mpc_ioapic_id(ioapic_idx);
  789. if (!map_ioapic_to_ir(ioapic_id)) {
  790. pr_err(FW_BUG "ioapic %d has no mapping iommu, "
  791. "interrupt remapping will be disabled\n",
  792. ioapic_id);
  793. return -1;
  794. }
  795. }
  796. return 0;
  797. }
  798. static int __init ir_dev_scope_init(void)
  799. {
  800. int ret;
  801. if (!irq_remapping_enabled)
  802. return 0;
  803. down_write(&dmar_global_lock);
  804. ret = dmar_dev_scope_init();
  805. up_write(&dmar_global_lock);
  806. return ret;
  807. }
  808. rootfs_initcall(ir_dev_scope_init);
  809. static void disable_irq_remapping(void)
  810. {
  811. struct dmar_drhd_unit *drhd;
  812. struct intel_iommu *iommu = NULL;
  813. /*
  814. * Disable Interrupt-remapping for all the DRHD's now.
  815. */
  816. for_each_iommu(iommu, drhd) {
  817. if (!ecap_ir_support(iommu->ecap))
  818. continue;
  819. iommu_disable_irq_remapping(iommu);
  820. }
  821. /*
  822. * Clear Posted-Interrupts capability.
  823. */
  824. if (!disable_irq_post)
  825. intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP);
  826. }
  827. static int reenable_irq_remapping(int eim)
  828. {
  829. struct dmar_drhd_unit *drhd;
  830. bool setup = false;
  831. struct intel_iommu *iommu = NULL;
  832. for_each_iommu(iommu, drhd)
  833. if (iommu->qi)
  834. dmar_reenable_qi(iommu);
  835. /*
  836. * Setup Interrupt-remapping for all the DRHD's now.
  837. */
  838. for_each_iommu(iommu, drhd) {
  839. if (!ecap_ir_support(iommu->ecap))
  840. continue;
  841. /* Set up interrupt remapping for iommu.*/
  842. iommu_set_irq_remapping(iommu, eim);
  843. iommu_enable_irq_remapping(iommu);
  844. setup = true;
  845. }
  846. if (!setup)
  847. goto error;
  848. set_irq_posting_cap();
  849. return 0;
  850. error:
  851. /*
  852. * handle error condition gracefully here!
  853. */
  854. return -1;
  855. }
  856. static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
  857. {
  858. memset(irte, 0, sizeof(*irte));
  859. irte->present = 1;
  860. irte->dst_mode = apic->irq_dest_mode;
  861. /*
  862. * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
  863. * actual level or edge trigger will be setup in the IO-APIC
  864. * RTE. This will help simplify level triggered irq migration.
  865. * For more details, see the comments (in io_apic.c) explainig IO-APIC
  866. * irq migration in the presence of interrupt-remapping.
  867. */
  868. irte->trigger_mode = 0;
  869. irte->dlvry_mode = apic->irq_delivery_mode;
  870. irte->vector = vector;
  871. irte->dest_id = IRTE_DEST(dest);
  872. irte->redir_hint = 1;
  873. }
  874. static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info)
  875. {
  876. struct intel_iommu *iommu = NULL;
  877. if (!info)
  878. return NULL;
  879. switch (info->type) {
  880. case X86_IRQ_ALLOC_TYPE_IOAPIC:
  881. iommu = map_ioapic_to_ir(info->ioapic_id);
  882. break;
  883. case X86_IRQ_ALLOC_TYPE_HPET:
  884. iommu = map_hpet_to_ir(info->hpet_id);
  885. break;
  886. case X86_IRQ_ALLOC_TYPE_MSI:
  887. case X86_IRQ_ALLOC_TYPE_MSIX:
  888. iommu = map_dev_to_ir(info->msi_dev);
  889. break;
  890. default:
  891. BUG_ON(1);
  892. break;
  893. }
  894. return iommu ? iommu->ir_domain : NULL;
  895. }
  896. static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info)
  897. {
  898. struct intel_iommu *iommu;
  899. if (!info)
  900. return NULL;
  901. switch (info->type) {
  902. case X86_IRQ_ALLOC_TYPE_MSI:
  903. case X86_IRQ_ALLOC_TYPE_MSIX:
  904. iommu = map_dev_to_ir(info->msi_dev);
  905. if (iommu)
  906. return iommu->ir_msi_domain;
  907. break;
  908. default:
  909. break;
  910. }
  911. return NULL;
  912. }
  913. struct irq_remap_ops intel_irq_remap_ops = {
  914. .prepare = intel_prepare_irq_remapping,
  915. .enable = intel_enable_irq_remapping,
  916. .disable = disable_irq_remapping,
  917. .reenable = reenable_irq_remapping,
  918. .enable_faulting = enable_drhd_fault_handling,
  919. .get_ir_irq_domain = intel_get_ir_irq_domain,
  920. .get_irq_domain = intel_get_irq_domain,
  921. };
  922. static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
  923. {
  924. struct intel_ir_data *ir_data = irqd->chip_data;
  925. struct irte *irte = &ir_data->irte_entry;
  926. struct irq_cfg *cfg = irqd_cfg(irqd);
  927. /*
  928. * Atomically updates the IRTE with the new destination, vector
  929. * and flushes the interrupt entry cache.
  930. */
  931. irte->vector = cfg->vector;
  932. irte->dest_id = IRTE_DEST(cfg->dest_apicid);
  933. /* Update the hardware only if the interrupt is in remapped mode. */
  934. if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
  935. modify_irte(&ir_data->irq_2_iommu, irte);
  936. }
  937. /*
  938. * Migrate the IO-APIC irq in the presence of intr-remapping.
  939. *
  940. * For both level and edge triggered, irq migration is a simple atomic
  941. * update(of vector and cpu destination) of IRTE and flush the hardware cache.
  942. *
  943. * For level triggered, we eliminate the io-apic RTE modification (with the
  944. * updated vector information), by using a virtual vector (io-apic pin number).
  945. * Real vector that is used for interrupting cpu will be coming from
  946. * the interrupt-remapping table entry.
  947. *
  948. * As the migration is a simple atomic update of IRTE, the same mechanism
  949. * is used to migrate MSI irq's in the presence of interrupt-remapping.
  950. */
  951. static int
  952. intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
  953. bool force)
  954. {
  955. struct irq_data *parent = data->parent_data;
  956. struct irq_cfg *cfg = irqd_cfg(data);
  957. int ret;
  958. ret = parent->chip->irq_set_affinity(parent, mask, force);
  959. if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
  960. return ret;
  961. intel_ir_reconfigure_irte(data, false);
  962. /*
  963. * After this point, all the interrupts will start arriving
  964. * at the new destination. So, time to cleanup the previous
  965. * vector allocation.
  966. */
  967. send_cleanup_vector(cfg);
  968. return IRQ_SET_MASK_OK_DONE;
  969. }
  970. static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
  971. struct msi_msg *msg)
  972. {
  973. struct intel_ir_data *ir_data = irq_data->chip_data;
  974. *msg = ir_data->msi_entry;
  975. }
  976. static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
  977. {
  978. struct intel_ir_data *ir_data = data->chip_data;
  979. struct vcpu_data *vcpu_pi_info = info;
  980. /* stop posting interrupts, back to remapping mode */
  981. if (!vcpu_pi_info) {
  982. modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
  983. } else {
  984. struct irte irte_pi;
  985. /*
  986. * We are not caching the posted interrupt entry. We
  987. * copy the data from the remapped entry and modify
  988. * the fields which are relevant for posted mode. The
  989. * cached remapped entry is used for switching back to
  990. * remapped mode.
  991. */
  992. memset(&irte_pi, 0, sizeof(irte_pi));
  993. dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry);
  994. /* Update the posted mode fields */
  995. irte_pi.p_pst = 1;
  996. irte_pi.p_urgent = 0;
  997. irte_pi.p_vector = vcpu_pi_info->vector;
  998. irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
  999. (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
  1000. irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
  1001. ~(-1UL << PDA_HIGH_BIT);
  1002. modify_irte(&ir_data->irq_2_iommu, &irte_pi);
  1003. }
  1004. return 0;
  1005. }
  1006. static struct irq_chip intel_ir_chip = {
  1007. .name = "INTEL-IR",
  1008. .irq_ack = apic_ack_irq,
  1009. .irq_set_affinity = intel_ir_set_affinity,
  1010. .irq_compose_msi_msg = intel_ir_compose_msi_msg,
  1011. .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
  1012. };
  1013. static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
  1014. struct irq_cfg *irq_cfg,
  1015. struct irq_alloc_info *info,
  1016. int index, int sub_handle)
  1017. {
  1018. struct IR_IO_APIC_route_entry *entry;
  1019. struct irte *irte = &data->irte_entry;
  1020. struct msi_msg *msg = &data->msi_entry;
  1021. prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid);
  1022. switch (info->type) {
  1023. case X86_IRQ_ALLOC_TYPE_IOAPIC:
  1024. /* Set source-id of interrupt request */
  1025. set_ioapic_sid(irte, info->ioapic_id);
  1026. apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
  1027. info->ioapic_id, irte->present, irte->fpd,
  1028. irte->dst_mode, irte->redir_hint,
  1029. irte->trigger_mode, irte->dlvry_mode,
  1030. irte->avail, irte->vector, irte->dest_id,
  1031. irte->sid, irte->sq, irte->svt);
  1032. entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry;
  1033. info->ioapic_entry = NULL;
  1034. memset(entry, 0, sizeof(*entry));
  1035. entry->index2 = (index >> 15) & 0x1;
  1036. entry->zero = 0;
  1037. entry->format = 1;
  1038. entry->index = (index & 0x7fff);
  1039. /*
  1040. * IO-APIC RTE will be configured with virtual vector.
  1041. * irq handler will do the explicit EOI to the io-apic.
  1042. */
  1043. entry->vector = info->ioapic_pin;
  1044. entry->mask = 0; /* enable IRQ */
  1045. entry->trigger = info->ioapic_trigger;
  1046. entry->polarity = info->ioapic_polarity;
  1047. if (info->ioapic_trigger)
  1048. entry->mask = 1; /* Mask level triggered irqs. */
  1049. break;
  1050. case X86_IRQ_ALLOC_TYPE_HPET:
  1051. case X86_IRQ_ALLOC_TYPE_MSI:
  1052. case X86_IRQ_ALLOC_TYPE_MSIX:
  1053. if (info->type == X86_IRQ_ALLOC_TYPE_HPET)
  1054. set_hpet_sid(irte, info->hpet_id);
  1055. else
  1056. set_msi_sid(irte, info->msi_dev);
  1057. msg->address_hi = MSI_ADDR_BASE_HI;
  1058. msg->data = sub_handle;
  1059. msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
  1060. MSI_ADDR_IR_SHV |
  1061. MSI_ADDR_IR_INDEX1(index) |
  1062. MSI_ADDR_IR_INDEX2(index);
  1063. break;
  1064. default:
  1065. BUG_ON(1);
  1066. break;
  1067. }
  1068. }
  1069. static void intel_free_irq_resources(struct irq_domain *domain,
  1070. unsigned int virq, unsigned int nr_irqs)
  1071. {
  1072. struct irq_data *irq_data;
  1073. struct intel_ir_data *data;
  1074. struct irq_2_iommu *irq_iommu;
  1075. unsigned long flags;
  1076. int i;
  1077. for (i = 0; i < nr_irqs; i++) {
  1078. irq_data = irq_domain_get_irq_data(domain, virq + i);
  1079. if (irq_data && irq_data->chip_data) {
  1080. data = irq_data->chip_data;
  1081. irq_iommu = &data->irq_2_iommu;
  1082. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  1083. clear_entries(irq_iommu);
  1084. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  1085. irq_domain_reset_irq_data(irq_data);
  1086. kfree(data);
  1087. }
  1088. }
  1089. }
  1090. static int intel_irq_remapping_alloc(struct irq_domain *domain,
  1091. unsigned int virq, unsigned int nr_irqs,
  1092. void *arg)
  1093. {
  1094. struct intel_iommu *iommu = domain->host_data;
  1095. struct irq_alloc_info *info = arg;
  1096. struct intel_ir_data *data, *ird;
  1097. struct irq_data *irq_data;
  1098. struct irq_cfg *irq_cfg;
  1099. int i, ret, index;
  1100. if (!info || !iommu)
  1101. return -EINVAL;
  1102. if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
  1103. info->type != X86_IRQ_ALLOC_TYPE_MSIX)
  1104. return -EINVAL;
  1105. /*
  1106. * With IRQ remapping enabled, don't need contiguous CPU vectors
  1107. * to support multiple MSI interrupts.
  1108. */
  1109. if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
  1110. info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
  1111. ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
  1112. if (ret < 0)
  1113. return ret;
  1114. ret = -ENOMEM;
  1115. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1116. if (!data)
  1117. goto out_free_parent;
  1118. down_read(&dmar_global_lock);
  1119. index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs);
  1120. up_read(&dmar_global_lock);
  1121. if (index < 0) {
  1122. pr_warn("Failed to allocate IRTE\n");
  1123. kfree(data);
  1124. goto out_free_parent;
  1125. }
  1126. for (i = 0; i < nr_irqs; i++) {
  1127. irq_data = irq_domain_get_irq_data(domain, virq + i);
  1128. irq_cfg = irqd_cfg(irq_data);
  1129. if (!irq_data || !irq_cfg) {
  1130. ret = -EINVAL;
  1131. goto out_free_data;
  1132. }
  1133. if (i > 0) {
  1134. ird = kzalloc(sizeof(*ird), GFP_KERNEL);
  1135. if (!ird)
  1136. goto out_free_data;
  1137. /* Initialize the common data */
  1138. ird->irq_2_iommu = data->irq_2_iommu;
  1139. ird->irq_2_iommu.sub_handle = i;
  1140. } else {
  1141. ird = data;
  1142. }
  1143. irq_data->hwirq = (index << 16) + i;
  1144. irq_data->chip_data = ird;
  1145. irq_data->chip = &intel_ir_chip;
  1146. intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
  1147. irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
  1148. }
  1149. return 0;
  1150. out_free_data:
  1151. intel_free_irq_resources(domain, virq, i);
  1152. out_free_parent:
  1153. irq_domain_free_irqs_common(domain, virq, nr_irqs);
  1154. return ret;
  1155. }
  1156. static void intel_irq_remapping_free(struct irq_domain *domain,
  1157. unsigned int virq, unsigned int nr_irqs)
  1158. {
  1159. intel_free_irq_resources(domain, virq, nr_irqs);
  1160. irq_domain_free_irqs_common(domain, virq, nr_irqs);
  1161. }
  1162. static int intel_irq_remapping_activate(struct irq_domain *domain,
  1163. struct irq_data *irq_data, bool reserve)
  1164. {
  1165. intel_ir_reconfigure_irte(irq_data, true);
  1166. return 0;
  1167. }
  1168. static void intel_irq_remapping_deactivate(struct irq_domain *domain,
  1169. struct irq_data *irq_data)
  1170. {
  1171. struct intel_ir_data *data = irq_data->chip_data;
  1172. struct irte entry;
  1173. memset(&entry, 0, sizeof(entry));
  1174. modify_irte(&data->irq_2_iommu, &entry);
  1175. }
  1176. static const struct irq_domain_ops intel_ir_domain_ops = {
  1177. .alloc = intel_irq_remapping_alloc,
  1178. .free = intel_irq_remapping_free,
  1179. .activate = intel_irq_remapping_activate,
  1180. .deactivate = intel_irq_remapping_deactivate,
  1181. };
  1182. /*
  1183. * Support of Interrupt Remapping Unit Hotplug
  1184. */
  1185. static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
  1186. {
  1187. int ret;
  1188. int eim = x2apic_enabled();
  1189. if (eim && !ecap_eim_support(iommu->ecap)) {
  1190. pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
  1191. iommu->reg_phys, iommu->ecap);
  1192. return -ENODEV;
  1193. }
  1194. if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
  1195. pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
  1196. iommu->reg_phys);
  1197. return -ENODEV;
  1198. }
  1199. /* TODO: check all IOAPICs are covered by IOMMU */
  1200. /* Setup Interrupt-remapping now. */
  1201. ret = intel_setup_irq_remapping(iommu);
  1202. if (ret) {
  1203. pr_err("Failed to setup irq remapping for %s\n",
  1204. iommu->name);
  1205. intel_teardown_irq_remapping(iommu);
  1206. ir_remove_ioapic_hpet_scope(iommu);
  1207. } else {
  1208. iommu_enable_irq_remapping(iommu);
  1209. }
  1210. return ret;
  1211. }
  1212. int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
  1213. {
  1214. int ret = 0;
  1215. struct intel_iommu *iommu = dmaru->iommu;
  1216. if (!irq_remapping_enabled)
  1217. return 0;
  1218. if (iommu == NULL)
  1219. return -EINVAL;
  1220. if (!ecap_ir_support(iommu->ecap))
  1221. return 0;
  1222. if (irq_remapping_cap(IRQ_POSTING_CAP) &&
  1223. !cap_pi_support(iommu->cap))
  1224. return -EBUSY;
  1225. if (insert) {
  1226. if (!iommu->ir_table)
  1227. ret = dmar_ir_add(dmaru, iommu);
  1228. } else {
  1229. if (iommu->ir_table) {
  1230. if (!bitmap_empty(iommu->ir_table->bitmap,
  1231. INTR_REMAP_TABLE_ENTRIES)) {
  1232. ret = -EBUSY;
  1233. } else {
  1234. iommu_disable_irq_remapping(iommu);
  1235. intel_teardown_irq_remapping(iommu);
  1236. ir_remove_ioapic_hpet_scope(iommu);
  1237. }
  1238. }
  1239. }
  1240. return ret;
  1241. }