intel_irq_remapping.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174
  1. #include <linux/interrupt.h>
  2. #include <linux/dmar.h>
  3. #include <linux/spinlock.h>
  4. #include <linux/slab.h>
  5. #include <linux/jiffies.h>
  6. #include <linux/hpet.h>
  7. #include <linux/pci.h>
  8. #include <linux/irq.h>
  9. #include <linux/intel-iommu.h>
  10. #include <linux/acpi.h>
  11. #include <asm/io_apic.h>
  12. #include <asm/smp.h>
  13. #include <asm/cpu.h>
  14. #include <asm/irq_remapping.h>
  15. #include <asm/pci-direct.h>
  16. #include <asm/msidef.h>
  17. #include "irq_remapping.h"
  18. struct ioapic_scope {
  19. struct intel_iommu *iommu;
  20. unsigned int id;
  21. unsigned int bus; /* PCI bus number */
  22. unsigned int devfn; /* PCI devfn number */
  23. };
  24. struct hpet_scope {
  25. struct intel_iommu *iommu;
  26. u8 id;
  27. unsigned int bus;
  28. unsigned int devfn;
  29. };
  30. #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
  31. #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
  32. static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
  33. static struct hpet_scope ir_hpet[MAX_HPET_TBS];
  34. static int ir_ioapic_num, ir_hpet_num;
  35. /*
  36. * Lock ordering:
  37. * ->dmar_global_lock
  38. * ->irq_2_ir_lock
  39. * ->qi->q_lock
  40. * ->iommu->register_lock
  41. * Note:
  42. * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
  43. * in single-threaded environment with interrupt disabled, so no need to tabke
  44. * the dmar_global_lock.
  45. */
  46. static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
  47. static int __init parse_ioapics_under_ir(void);
  48. static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
  49. {
  50. struct irq_cfg *cfg = irq_get_chip_data(irq);
  51. return cfg ? &cfg->irq_2_iommu : NULL;
  52. }
  53. static int get_irte(int irq, struct irte *entry)
  54. {
  55. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  56. unsigned long flags;
  57. int index;
  58. if (!entry || !irq_iommu)
  59. return -1;
  60. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  61. if (unlikely(!irq_iommu->iommu)) {
  62. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  63. return -1;
  64. }
  65. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  66. *entry = *(irq_iommu->iommu->ir_table->base + index);
  67. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  68. return 0;
  69. }
  70. static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
  71. {
  72. struct ir_table *table = iommu->ir_table;
  73. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  74. struct irq_cfg *cfg = irq_get_chip_data(irq);
  75. unsigned int mask = 0;
  76. unsigned long flags;
  77. int index;
  78. if (!count || !irq_iommu)
  79. return -1;
  80. if (count > 1) {
  81. count = __roundup_pow_of_two(count);
  82. mask = ilog2(count);
  83. }
  84. if (mask > ecap_max_handle_mask(iommu->ecap)) {
  85. printk(KERN_ERR
  86. "Requested mask %x exceeds the max invalidation handle"
  87. " mask value %Lx\n", mask,
  88. ecap_max_handle_mask(iommu->ecap));
  89. return -1;
  90. }
  91. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  92. index = bitmap_find_free_region(table->bitmap,
  93. INTR_REMAP_TABLE_ENTRIES, mask);
  94. if (index < 0) {
  95. pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
  96. } else {
  97. cfg->remapped = 1;
  98. irq_iommu->iommu = iommu;
  99. irq_iommu->irte_index = index;
  100. irq_iommu->sub_handle = 0;
  101. irq_iommu->irte_mask = mask;
  102. }
  103. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  104. return index;
  105. }
  106. static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
  107. {
  108. struct qi_desc desc;
  109. desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
  110. | QI_IEC_SELECTIVE;
  111. desc.high = 0;
  112. return qi_submit_sync(&desc, iommu);
  113. }
  114. static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
  115. {
  116. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  117. unsigned long flags;
  118. int index;
  119. if (!irq_iommu)
  120. return -1;
  121. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  122. *sub_handle = irq_iommu->sub_handle;
  123. index = irq_iommu->irte_index;
  124. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  125. return index;
  126. }
  127. static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
  128. {
  129. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  130. struct irq_cfg *cfg = irq_get_chip_data(irq);
  131. unsigned long flags;
  132. if (!irq_iommu)
  133. return -1;
  134. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  135. cfg->remapped = 1;
  136. irq_iommu->iommu = iommu;
  137. irq_iommu->irte_index = index;
  138. irq_iommu->sub_handle = subhandle;
  139. irq_iommu->irte_mask = 0;
  140. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  141. return 0;
  142. }
  143. static int modify_irte(int irq, struct irte *irte_modified)
  144. {
  145. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  146. struct intel_iommu *iommu;
  147. unsigned long flags;
  148. struct irte *irte;
  149. int rc, index;
  150. if (!irq_iommu)
  151. return -1;
  152. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  153. iommu = irq_iommu->iommu;
  154. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  155. irte = &iommu->ir_table->base[index];
  156. set_64bit(&irte->low, irte_modified->low);
  157. set_64bit(&irte->high, irte_modified->high);
  158. __iommu_flush_cache(iommu, irte, sizeof(*irte));
  159. rc = qi_flush_iec(iommu, index, 0);
  160. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  161. return rc;
  162. }
  163. static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
  164. {
  165. int i;
  166. for (i = 0; i < MAX_HPET_TBS; i++)
  167. if (ir_hpet[i].id == hpet_id)
  168. return ir_hpet[i].iommu;
  169. return NULL;
  170. }
  171. static struct intel_iommu *map_ioapic_to_ir(int apic)
  172. {
  173. int i;
  174. for (i = 0; i < MAX_IO_APICS; i++)
  175. if (ir_ioapic[i].id == apic)
  176. return ir_ioapic[i].iommu;
  177. return NULL;
  178. }
  179. static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
  180. {
  181. struct dmar_drhd_unit *drhd;
  182. drhd = dmar_find_matched_drhd_unit(dev);
  183. if (!drhd)
  184. return NULL;
  185. return drhd->iommu;
  186. }
  187. static int clear_entries(struct irq_2_iommu *irq_iommu)
  188. {
  189. struct irte *start, *entry, *end;
  190. struct intel_iommu *iommu;
  191. int index;
  192. if (irq_iommu->sub_handle)
  193. return 0;
  194. iommu = irq_iommu->iommu;
  195. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  196. start = iommu->ir_table->base + index;
  197. end = start + (1 << irq_iommu->irte_mask);
  198. for (entry = start; entry < end; entry++) {
  199. set_64bit(&entry->low, 0);
  200. set_64bit(&entry->high, 0);
  201. }
  202. bitmap_release_region(iommu->ir_table->bitmap, index,
  203. irq_iommu->irte_mask);
  204. return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
  205. }
  206. static int free_irte(int irq)
  207. {
  208. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  209. unsigned long flags;
  210. int rc;
  211. if (!irq_iommu)
  212. return -1;
  213. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  214. rc = clear_entries(irq_iommu);
  215. irq_iommu->iommu = NULL;
  216. irq_iommu->irte_index = 0;
  217. irq_iommu->sub_handle = 0;
  218. irq_iommu->irte_mask = 0;
  219. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  220. return rc;
  221. }
  222. /*
  223. * source validation type
  224. */
  225. #define SVT_NO_VERIFY 0x0 /* no verification is required */
  226. #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
  227. #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
  228. /*
  229. * source-id qualifier
  230. */
  231. #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
  232. #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
  233. * the third least significant bit
  234. */
  235. #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
  236. * the second and third least significant bits
  237. */
  238. #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
  239. * the least three significant bits
  240. */
  241. /*
  242. * set SVT, SQ and SID fields of irte to verify
  243. * source ids of interrupt requests
  244. */
  245. static void set_irte_sid(struct irte *irte, unsigned int svt,
  246. unsigned int sq, unsigned int sid)
  247. {
  248. if (disable_sourceid_checking)
  249. svt = SVT_NO_VERIFY;
  250. irte->svt = svt;
  251. irte->sq = sq;
  252. irte->sid = sid;
  253. }
  254. static int set_ioapic_sid(struct irte *irte, int apic)
  255. {
  256. int i;
  257. u16 sid = 0;
  258. if (!irte)
  259. return -1;
  260. down_read(&dmar_global_lock);
  261. for (i = 0; i < MAX_IO_APICS; i++) {
  262. if (ir_ioapic[i].id == apic) {
  263. sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
  264. break;
  265. }
  266. }
  267. up_read(&dmar_global_lock);
  268. if (sid == 0) {
  269. pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
  270. return -1;
  271. }
  272. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
  273. return 0;
  274. }
  275. static int set_hpet_sid(struct irte *irte, u8 id)
  276. {
  277. int i;
  278. u16 sid = 0;
  279. if (!irte)
  280. return -1;
  281. down_read(&dmar_global_lock);
  282. for (i = 0; i < MAX_HPET_TBS; i++) {
  283. if (ir_hpet[i].id == id) {
  284. sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
  285. break;
  286. }
  287. }
  288. up_read(&dmar_global_lock);
  289. if (sid == 0) {
  290. pr_warning("Failed to set source-id of HPET block (%d)\n", id);
  291. return -1;
  292. }
  293. /*
  294. * Should really use SQ_ALL_16. Some platforms are broken.
  295. * While we figure out the right quirks for these broken platforms, use
  296. * SQ_13_IGNORE_3 for now.
  297. */
  298. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
  299. return 0;
  300. }
  301. struct set_msi_sid_data {
  302. struct pci_dev *pdev;
  303. u16 alias;
  304. };
  305. static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
  306. {
  307. struct set_msi_sid_data *data = opaque;
  308. data->pdev = pdev;
  309. data->alias = alias;
  310. return 0;
  311. }
  312. static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
  313. {
  314. struct set_msi_sid_data data;
  315. if (!irte || !dev)
  316. return -1;
  317. pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
  318. /*
  319. * DMA alias provides us with a PCI device and alias. The only case
  320. * where the it will return an alias on a different bus than the
  321. * device is the case of a PCIe-to-PCI bridge, where the alias is for
  322. * the subordinate bus. In this case we can only verify the bus.
  323. *
  324. * If the alias device is on a different bus than our source device
  325. * then we have a topology based alias, use it.
  326. *
  327. * Otherwise, the alias is for a device DMA quirk and we cannot
  328. * assume that MSI uses the same requester ID. Therefore use the
  329. * original device.
  330. */
  331. if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
  332. set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
  333. PCI_DEVID(PCI_BUS_NUM(data.alias),
  334. dev->bus->number));
  335. else if (data.pdev->bus->number != dev->bus->number)
  336. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
  337. else
  338. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
  339. PCI_DEVID(dev->bus->number, dev->devfn));
  340. return 0;
  341. }
  342. static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
  343. {
  344. u64 addr;
  345. u32 sts;
  346. unsigned long flags;
  347. addr = virt_to_phys((void *)iommu->ir_table->base);
  348. raw_spin_lock_irqsave(&iommu->register_lock, flags);
  349. dmar_writeq(iommu->reg + DMAR_IRTA_REG,
  350. (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
  351. /* Set interrupt-remapping table pointer */
  352. iommu->gcmd |= DMA_GCMD_SIRTP;
  353. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  354. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  355. readl, (sts & DMA_GSTS_IRTPS), sts);
  356. raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
  357. /*
  358. * global invalidation of interrupt entry cache before enabling
  359. * interrupt-remapping.
  360. */
  361. qi_global_iec(iommu);
  362. raw_spin_lock_irqsave(&iommu->register_lock, flags);
  363. /* Enable interrupt-remapping */
  364. iommu->gcmd |= DMA_GCMD_IRE;
  365. iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
  366. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  367. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  368. readl, (sts & DMA_GSTS_IRES), sts);
  369. /*
  370. * With CFI clear in the Global Command register, we should be
  371. * protected from dangerous (i.e. compatibility) interrupts
  372. * regardless of x2apic status. Check just to be sure.
  373. */
  374. if (sts & DMA_GSTS_CFIS)
  375. WARN(1, KERN_WARNING
  376. "Compatibility-format IRQs enabled despite intr remapping;\n"
  377. "you are vulnerable to IRQ injection.\n");
  378. raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
  379. }
  380. static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
  381. {
  382. struct ir_table *ir_table;
  383. struct page *pages;
  384. unsigned long *bitmap;
  385. ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
  386. GFP_ATOMIC);
  387. if (!iommu->ir_table)
  388. return -ENOMEM;
  389. pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
  390. INTR_REMAP_PAGE_ORDER);
  391. if (!pages) {
  392. pr_err("IR%d: failed to allocate pages of order %d\n",
  393. iommu->seq_id, INTR_REMAP_PAGE_ORDER);
  394. kfree(iommu->ir_table);
  395. return -ENOMEM;
  396. }
  397. bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
  398. sizeof(long), GFP_ATOMIC);
  399. if (bitmap == NULL) {
  400. pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
  401. __free_pages(pages, INTR_REMAP_PAGE_ORDER);
  402. kfree(ir_table);
  403. return -ENOMEM;
  404. }
  405. ir_table->base = page_address(pages);
  406. ir_table->bitmap = bitmap;
  407. iommu_set_irq_remapping(iommu, mode);
  408. return 0;
  409. }
  410. /*
  411. * Disable Interrupt Remapping.
  412. */
  413. static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
  414. {
  415. unsigned long flags;
  416. u32 sts;
  417. if (!ecap_ir_support(iommu->ecap))
  418. return;
  419. /*
  420. * global invalidation of interrupt entry cache before disabling
  421. * interrupt-remapping.
  422. */
  423. qi_global_iec(iommu);
  424. raw_spin_lock_irqsave(&iommu->register_lock, flags);
  425. sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
  426. if (!(sts & DMA_GSTS_IRES))
  427. goto end;
  428. iommu->gcmd &= ~DMA_GCMD_IRE;
  429. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  430. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  431. readl, !(sts & DMA_GSTS_IRES), sts);
  432. end:
  433. raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
  434. }
  435. static int __init dmar_x2apic_optout(void)
  436. {
  437. struct acpi_table_dmar *dmar;
  438. dmar = (struct acpi_table_dmar *)dmar_tbl;
  439. if (!dmar || no_x2apic_optout)
  440. return 0;
  441. return dmar->flags & DMAR_X2APIC_OPT_OUT;
  442. }
  443. static int __init intel_irq_remapping_supported(void)
  444. {
  445. struct dmar_drhd_unit *drhd;
  446. struct intel_iommu *iommu;
  447. if (disable_irq_remap)
  448. return 0;
  449. if (irq_remap_broken) {
  450. printk(KERN_WARNING
  451. "This system BIOS has enabled interrupt remapping\n"
  452. "on a chipset that contains an erratum making that\n"
  453. "feature unstable. To maintain system stability\n"
  454. "interrupt remapping is being disabled. Please\n"
  455. "contact your BIOS vendor for an update\n");
  456. add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
  457. disable_irq_remap = 1;
  458. return 0;
  459. }
  460. if (!dmar_ir_support())
  461. return 0;
  462. for_each_iommu(iommu, drhd)
  463. if (!ecap_ir_support(iommu->ecap))
  464. return 0;
  465. return 1;
  466. }
  467. static int __init intel_enable_irq_remapping(void)
  468. {
  469. struct dmar_drhd_unit *drhd;
  470. struct intel_iommu *iommu;
  471. bool x2apic_present;
  472. int setup = 0;
  473. int eim = 0;
  474. x2apic_present = x2apic_supported();
  475. if (parse_ioapics_under_ir() != 1) {
  476. printk(KERN_INFO "Not enable interrupt remapping\n");
  477. goto error;
  478. }
  479. if (x2apic_present) {
  480. pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
  481. eim = !dmar_x2apic_optout();
  482. if (!eim)
  483. printk(KERN_WARNING
  484. "Your BIOS is broken and requested that x2apic be disabled.\n"
  485. "This will slightly decrease performance.\n"
  486. "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
  487. }
  488. for_each_iommu(iommu, drhd) {
  489. /*
  490. * If the queued invalidation is already initialized,
  491. * shouldn't disable it.
  492. */
  493. if (iommu->qi)
  494. continue;
  495. /*
  496. * Clear previous faults.
  497. */
  498. dmar_fault(-1, iommu);
  499. /*
  500. * Disable intr remapping and queued invalidation, if already
  501. * enabled prior to OS handover.
  502. */
  503. iommu_disable_irq_remapping(iommu);
  504. dmar_disable_qi(iommu);
  505. }
  506. /*
  507. * check for the Interrupt-remapping support
  508. */
  509. for_each_iommu(iommu, drhd) {
  510. if (!ecap_ir_support(iommu->ecap))
  511. continue;
  512. if (eim && !ecap_eim_support(iommu->ecap)) {
  513. printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
  514. " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
  515. goto error;
  516. }
  517. }
  518. /*
  519. * Enable queued invalidation for all the DRHD's.
  520. */
  521. for_each_iommu(iommu, drhd) {
  522. int ret = dmar_enable_qi(iommu);
  523. if (ret) {
  524. printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
  525. " invalidation, ecap %Lx, ret %d\n",
  526. drhd->reg_base_addr, iommu->ecap, ret);
  527. goto error;
  528. }
  529. }
  530. /*
  531. * Setup Interrupt-remapping for all the DRHD's now.
  532. */
  533. for_each_iommu(iommu, drhd) {
  534. if (!ecap_ir_support(iommu->ecap))
  535. continue;
  536. if (intel_setup_irq_remapping(iommu, eim))
  537. goto error;
  538. setup = 1;
  539. }
  540. if (!setup)
  541. goto error;
  542. irq_remapping_enabled = 1;
  543. /*
  544. * VT-d has a different layout for IO-APIC entries when
  545. * interrupt remapping is enabled. So it needs a special routine
  546. * to print IO-APIC entries for debugging purposes too.
  547. */
  548. x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
  549. pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
  550. return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
  551. error:
  552. /*
  553. * handle error condition gracefully here!
  554. */
  555. if (x2apic_present)
  556. pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
  557. return -1;
  558. }
  559. static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
  560. struct intel_iommu *iommu)
  561. {
  562. struct acpi_dmar_pci_path *path;
  563. u8 bus;
  564. int count;
  565. bus = scope->bus;
  566. path = (struct acpi_dmar_pci_path *)(scope + 1);
  567. count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  568. / sizeof(struct acpi_dmar_pci_path);
  569. while (--count > 0) {
  570. /*
  571. * Access PCI directly due to the PCI
  572. * subsystem isn't initialized yet.
  573. */
  574. bus = read_pci_config_byte(bus, path->device, path->function,
  575. PCI_SECONDARY_BUS);
  576. path++;
  577. }
  578. ir_hpet[ir_hpet_num].bus = bus;
  579. ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
  580. ir_hpet[ir_hpet_num].iommu = iommu;
  581. ir_hpet[ir_hpet_num].id = scope->enumeration_id;
  582. ir_hpet_num++;
  583. }
  584. static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
  585. struct intel_iommu *iommu)
  586. {
  587. struct acpi_dmar_pci_path *path;
  588. u8 bus;
  589. int count;
  590. bus = scope->bus;
  591. path = (struct acpi_dmar_pci_path *)(scope + 1);
  592. count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  593. / sizeof(struct acpi_dmar_pci_path);
  594. while (--count > 0) {
  595. /*
  596. * Access PCI directly due to the PCI
  597. * subsystem isn't initialized yet.
  598. */
  599. bus = read_pci_config_byte(bus, path->device, path->function,
  600. PCI_SECONDARY_BUS);
  601. path++;
  602. }
  603. ir_ioapic[ir_ioapic_num].bus = bus;
  604. ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
  605. ir_ioapic[ir_ioapic_num].iommu = iommu;
  606. ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
  607. ir_ioapic_num++;
  608. }
  609. static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
  610. struct intel_iommu *iommu)
  611. {
  612. struct acpi_dmar_hardware_unit *drhd;
  613. struct acpi_dmar_device_scope *scope;
  614. void *start, *end;
  615. drhd = (struct acpi_dmar_hardware_unit *)header;
  616. start = (void *)(drhd + 1);
  617. end = ((void *)drhd) + header->length;
  618. while (start < end) {
  619. scope = start;
  620. if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
  621. if (ir_ioapic_num == MAX_IO_APICS) {
  622. printk(KERN_WARNING "Exceeded Max IO APICS\n");
  623. return -1;
  624. }
  625. printk(KERN_INFO "IOAPIC id %d under DRHD base "
  626. " 0x%Lx IOMMU %d\n", scope->enumeration_id,
  627. drhd->address, iommu->seq_id);
  628. ir_parse_one_ioapic_scope(scope, iommu);
  629. } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
  630. if (ir_hpet_num == MAX_HPET_TBS) {
  631. printk(KERN_WARNING "Exceeded Max HPET blocks\n");
  632. return -1;
  633. }
  634. printk(KERN_INFO "HPET id %d under DRHD base"
  635. " 0x%Lx\n", scope->enumeration_id,
  636. drhd->address);
  637. ir_parse_one_hpet_scope(scope, iommu);
  638. }
  639. start += scope->length;
  640. }
  641. return 0;
  642. }
  643. /*
  644. * Finds the assocaition between IOAPIC's and its Interrupt-remapping
  645. * hardware unit.
  646. */
  647. static int __init parse_ioapics_under_ir(void)
  648. {
  649. struct dmar_drhd_unit *drhd;
  650. struct intel_iommu *iommu;
  651. int ir_supported = 0;
  652. int ioapic_idx;
  653. for_each_iommu(iommu, drhd)
  654. if (ecap_ir_support(iommu->ecap)) {
  655. if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
  656. return -1;
  657. ir_supported = 1;
  658. }
  659. if (!ir_supported)
  660. return 0;
  661. for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
  662. int ioapic_id = mpc_ioapic_id(ioapic_idx);
  663. if (!map_ioapic_to_ir(ioapic_id)) {
  664. pr_err(FW_BUG "ioapic %d has no mapping iommu, "
  665. "interrupt remapping will be disabled\n",
  666. ioapic_id);
  667. return -1;
  668. }
  669. }
  670. return 1;
  671. }
  672. static int __init ir_dev_scope_init(void)
  673. {
  674. int ret;
  675. if (!irq_remapping_enabled)
  676. return 0;
  677. down_write(&dmar_global_lock);
  678. ret = dmar_dev_scope_init();
  679. up_write(&dmar_global_lock);
  680. return ret;
  681. }
  682. rootfs_initcall(ir_dev_scope_init);
  683. static void disable_irq_remapping(void)
  684. {
  685. struct dmar_drhd_unit *drhd;
  686. struct intel_iommu *iommu = NULL;
  687. /*
  688. * Disable Interrupt-remapping for all the DRHD's now.
  689. */
  690. for_each_iommu(iommu, drhd) {
  691. if (!ecap_ir_support(iommu->ecap))
  692. continue;
  693. iommu_disable_irq_remapping(iommu);
  694. }
  695. }
  696. static int reenable_irq_remapping(int eim)
  697. {
  698. struct dmar_drhd_unit *drhd;
  699. int setup = 0;
  700. struct intel_iommu *iommu = NULL;
  701. for_each_iommu(iommu, drhd)
  702. if (iommu->qi)
  703. dmar_reenable_qi(iommu);
  704. /*
  705. * Setup Interrupt-remapping for all the DRHD's now.
  706. */
  707. for_each_iommu(iommu, drhd) {
  708. if (!ecap_ir_support(iommu->ecap))
  709. continue;
  710. /* Set up interrupt remapping for iommu.*/
  711. iommu_set_irq_remapping(iommu, eim);
  712. setup = 1;
  713. }
  714. if (!setup)
  715. goto error;
  716. return 0;
  717. error:
  718. /*
  719. * handle error condition gracefully here!
  720. */
  721. return -1;
  722. }
  723. static void prepare_irte(struct irte *irte, int vector,
  724. unsigned int dest)
  725. {
  726. memset(irte, 0, sizeof(*irte));
  727. irte->present = 1;
  728. irte->dst_mode = apic->irq_dest_mode;
  729. /*
  730. * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
  731. * actual level or edge trigger will be setup in the IO-APIC
  732. * RTE. This will help simplify level triggered irq migration.
  733. * For more details, see the comments (in io_apic.c) explainig IO-APIC
  734. * irq migration in the presence of interrupt-remapping.
  735. */
  736. irte->trigger_mode = 0;
  737. irte->dlvry_mode = apic->irq_delivery_mode;
  738. irte->vector = vector;
  739. irte->dest_id = IRTE_DEST(dest);
  740. irte->redir_hint = 1;
  741. }
  742. static int intel_setup_ioapic_entry(int irq,
  743. struct IO_APIC_route_entry *route_entry,
  744. unsigned int destination, int vector,
  745. struct io_apic_irq_attr *attr)
  746. {
  747. int ioapic_id = mpc_ioapic_id(attr->ioapic);
  748. struct intel_iommu *iommu;
  749. struct IR_IO_APIC_route_entry *entry;
  750. struct irte irte;
  751. int index;
  752. down_read(&dmar_global_lock);
  753. iommu = map_ioapic_to_ir(ioapic_id);
  754. if (!iommu) {
  755. pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
  756. index = -ENODEV;
  757. } else {
  758. index = alloc_irte(iommu, irq, 1);
  759. if (index < 0) {
  760. pr_warn("Failed to allocate IRTE for ioapic %d\n",
  761. ioapic_id);
  762. index = -ENOMEM;
  763. }
  764. }
  765. up_read(&dmar_global_lock);
  766. if (index < 0)
  767. return index;
  768. prepare_irte(&irte, vector, destination);
  769. /* Set source-id of interrupt request */
  770. set_ioapic_sid(&irte, ioapic_id);
  771. modify_irte(irq, &irte);
  772. apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
  773. "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
  774. "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
  775. "Avail:%X Vector:%02X Dest:%08X "
  776. "SID:%04X SQ:%X SVT:%X)\n",
  777. attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
  778. irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
  779. irte.avail, irte.vector, irte.dest_id,
  780. irte.sid, irte.sq, irte.svt);
  781. entry = (struct IR_IO_APIC_route_entry *)route_entry;
  782. memset(entry, 0, sizeof(*entry));
  783. entry->index2 = (index >> 15) & 0x1;
  784. entry->zero = 0;
  785. entry->format = 1;
  786. entry->index = (index & 0x7fff);
  787. /*
  788. * IO-APIC RTE will be configured with virtual vector.
  789. * irq handler will do the explicit EOI to the io-apic.
  790. */
  791. entry->vector = attr->ioapic_pin;
  792. entry->mask = 0; /* enable IRQ */
  793. entry->trigger = attr->trigger;
  794. entry->polarity = attr->polarity;
  795. /* Mask level triggered irqs.
  796. * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
  797. */
  798. if (attr->trigger)
  799. entry->mask = 1;
  800. return 0;
  801. }
  802. /*
  803. * Migrate the IO-APIC irq in the presence of intr-remapping.
  804. *
  805. * For both level and edge triggered, irq migration is a simple atomic
  806. * update(of vector and cpu destination) of IRTE and flush the hardware cache.
  807. *
  808. * For level triggered, we eliminate the io-apic RTE modification (with the
  809. * updated vector information), by using a virtual vector (io-apic pin number).
  810. * Real vector that is used for interrupting cpu will be coming from
  811. * the interrupt-remapping table entry.
  812. *
  813. * As the migration is a simple atomic update of IRTE, the same mechanism
  814. * is used to migrate MSI irq's in the presence of interrupt-remapping.
  815. */
  816. static int
  817. intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
  818. bool force)
  819. {
  820. struct irq_cfg *cfg = data->chip_data;
  821. unsigned int dest, irq = data->irq;
  822. struct irte irte;
  823. int err;
  824. if (!config_enabled(CONFIG_SMP))
  825. return -EINVAL;
  826. if (!cpumask_intersects(mask, cpu_online_mask))
  827. return -EINVAL;
  828. if (get_irte(irq, &irte))
  829. return -EBUSY;
  830. err = assign_irq_vector(irq, cfg, mask);
  831. if (err)
  832. return err;
  833. err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
  834. if (err) {
  835. if (assign_irq_vector(irq, cfg, data->affinity))
  836. pr_err("Failed to recover vector for irq %d\n", irq);
  837. return err;
  838. }
  839. irte.vector = cfg->vector;
  840. irte.dest_id = IRTE_DEST(dest);
  841. /*
  842. * Atomically updates the IRTE with the new destination, vector
  843. * and flushes the interrupt entry cache.
  844. */
  845. modify_irte(irq, &irte);
  846. /*
  847. * After this point, all the interrupts will start arriving
  848. * at the new destination. So, time to cleanup the previous
  849. * vector allocation.
  850. */
  851. if (cfg->move_in_progress)
  852. send_cleanup_vector(cfg);
  853. cpumask_copy(data->affinity, mask);
  854. return 0;
  855. }
  856. static void intel_compose_msi_msg(struct pci_dev *pdev,
  857. unsigned int irq, unsigned int dest,
  858. struct msi_msg *msg, u8 hpet_id)
  859. {
  860. struct irq_cfg *cfg;
  861. struct irte irte;
  862. u16 sub_handle = 0;
  863. int ir_index;
  864. cfg = irq_get_chip_data(irq);
  865. ir_index = map_irq_to_irte_handle(irq, &sub_handle);
  866. BUG_ON(ir_index == -1);
  867. prepare_irte(&irte, cfg->vector, dest);
  868. /* Set source-id of interrupt request */
  869. if (pdev)
  870. set_msi_sid(&irte, pdev);
  871. else
  872. set_hpet_sid(&irte, hpet_id);
  873. modify_irte(irq, &irte);
  874. msg->address_hi = MSI_ADDR_BASE_HI;
  875. msg->data = sub_handle;
  876. msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
  877. MSI_ADDR_IR_SHV |
  878. MSI_ADDR_IR_INDEX1(ir_index) |
  879. MSI_ADDR_IR_INDEX2(ir_index);
  880. }
  881. /*
  882. * Map the PCI dev to the corresponding remapping hardware unit
  883. * and allocate 'nvec' consecutive interrupt-remapping table entries
  884. * in it.
  885. */
  886. static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
  887. {
  888. struct intel_iommu *iommu;
  889. int index;
  890. down_read(&dmar_global_lock);
  891. iommu = map_dev_to_ir(dev);
  892. if (!iommu) {
  893. printk(KERN_ERR
  894. "Unable to map PCI %s to iommu\n", pci_name(dev));
  895. index = -ENOENT;
  896. } else {
  897. index = alloc_irte(iommu, irq, nvec);
  898. if (index < 0) {
  899. printk(KERN_ERR
  900. "Unable to allocate %d IRTE for PCI %s\n",
  901. nvec, pci_name(dev));
  902. index = -ENOSPC;
  903. }
  904. }
  905. up_read(&dmar_global_lock);
  906. return index;
  907. }
  908. static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
  909. int index, int sub_handle)
  910. {
  911. struct intel_iommu *iommu;
  912. int ret = -ENOENT;
  913. down_read(&dmar_global_lock);
  914. iommu = map_dev_to_ir(pdev);
  915. if (iommu) {
  916. /*
  917. * setup the mapping between the irq and the IRTE
  918. * base index, the sub_handle pointing to the
  919. * appropriate interrupt remap table entry.
  920. */
  921. set_irte_irq(irq, iommu, index, sub_handle);
  922. ret = 0;
  923. }
  924. up_read(&dmar_global_lock);
  925. return ret;
  926. }
  927. static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
  928. {
  929. int ret = -1;
  930. struct intel_iommu *iommu;
  931. int index;
  932. down_read(&dmar_global_lock);
  933. iommu = map_hpet_to_ir(id);
  934. if (iommu) {
  935. index = alloc_irte(iommu, irq, 1);
  936. if (index >= 0)
  937. ret = 0;
  938. }
  939. up_read(&dmar_global_lock);
  940. return ret;
  941. }
  942. struct irq_remap_ops intel_irq_remap_ops = {
  943. .supported = intel_irq_remapping_supported,
  944. .prepare = dmar_table_init,
  945. .enable = intel_enable_irq_remapping,
  946. .disable = disable_irq_remapping,
  947. .reenable = reenable_irq_remapping,
  948. .enable_faulting = enable_drhd_fault_handling,
  949. .setup_ioapic_entry = intel_setup_ioapic_entry,
  950. .set_affinity = intel_ioapic_set_affinity,
  951. .free_irq = free_irte,
  952. .compose_msi_msg = intel_compose_msi_msg,
  953. .msi_alloc_irq = intel_msi_alloc_irq,
  954. .msi_setup_irq = intel_msi_setup_irq,
  955. .setup_hpet_msi = intel_setup_hpet_msi,
  956. };