pci-dma.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/dma-direct.h>
  3. #include <linux/dma-debug.h>
  4. #include <linux/dmar.h>
  5. #include <linux/export.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/gfp.h>
  8. #include <linux/pci.h>
  9. #include <asm/proto.h>
  10. #include <asm/dma.h>
  11. #include <asm/iommu.h>
  12. #include <asm/gart.h>
  13. #include <asm/calgary.h>
  14. #include <asm/x86_init.h>
  15. #include <asm/iommu_table.h>
  16. static bool disable_dac_quirk __read_mostly;
  17. const struct dma_map_ops *dma_ops = &dma_direct_ops;
  18. EXPORT_SYMBOL(dma_ops);
  19. #ifdef CONFIG_IOMMU_DEBUG
  20. int panic_on_overflow __read_mostly = 1;
  21. int force_iommu __read_mostly = 1;
  22. #else
  23. int panic_on_overflow __read_mostly = 0;
  24. int force_iommu __read_mostly = 0;
  25. #endif
  26. int iommu_merge __read_mostly = 0;
  27. int no_iommu __read_mostly;
  28. /* Set this to 1 if there is a HW IOMMU in the system */
  29. int iommu_detected __read_mostly = 0;
  30. /*
  31. * This variable becomes 1 if iommu=pt is passed on the kernel command line.
  32. * If this variable is 1, IOMMU implementations do no DMA translation for
  33. * devices and allow every device to access to whole physical memory. This is
  34. * useful if a user wants to use an IOMMU only for KVM device assignment to
  35. * guests and not for driver dma translation.
  36. */
  37. int iommu_pass_through __read_mostly;
  38. extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
  39. /* Dummy device used for NULL arguments (normally ISA). */
  40. struct device x86_dma_fallback_dev = {
  41. .init_name = "fallback device",
  42. .coherent_dma_mask = ISA_DMA_BIT_MASK,
  43. .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
  44. };
  45. EXPORT_SYMBOL(x86_dma_fallback_dev);
  46. void __init pci_iommu_alloc(void)
  47. {
  48. struct iommu_table_entry *p;
  49. sort_iommu_table(__iommu_table, __iommu_table_end);
  50. check_iommu_entries(__iommu_table, __iommu_table_end);
  51. for (p = __iommu_table; p < __iommu_table_end; p++) {
  52. if (p && p->detect && p->detect() > 0) {
  53. p->flags |= IOMMU_DETECTED;
  54. if (p->early_init)
  55. p->early_init();
  56. if (p->flags & IOMMU_FINISH_IF_DETECTED)
  57. break;
  58. }
  59. }
  60. }
  61. bool arch_dma_alloc_attrs(struct device **dev)
  62. {
  63. if (!*dev)
  64. *dev = &x86_dma_fallback_dev;
  65. if (!is_device_dma_capable(*dev))
  66. return false;
  67. return true;
  68. }
  69. EXPORT_SYMBOL(arch_dma_alloc_attrs);
  70. /*
  71. * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
  72. * parameter documentation.
  73. */
  74. static __init int iommu_setup(char *p)
  75. {
  76. iommu_merge = 1;
  77. if (!p)
  78. return -EINVAL;
  79. while (*p) {
  80. if (!strncmp(p, "off", 3))
  81. no_iommu = 1;
  82. /* gart_parse_options has more force support */
  83. if (!strncmp(p, "force", 5))
  84. force_iommu = 1;
  85. if (!strncmp(p, "noforce", 7)) {
  86. iommu_merge = 0;
  87. force_iommu = 0;
  88. }
  89. if (!strncmp(p, "biomerge", 8)) {
  90. iommu_merge = 1;
  91. force_iommu = 1;
  92. }
  93. if (!strncmp(p, "panic", 5))
  94. panic_on_overflow = 1;
  95. if (!strncmp(p, "nopanic", 7))
  96. panic_on_overflow = 0;
  97. if (!strncmp(p, "merge", 5)) {
  98. iommu_merge = 1;
  99. force_iommu = 1;
  100. }
  101. if (!strncmp(p, "nomerge", 7))
  102. iommu_merge = 0;
  103. if (!strncmp(p, "forcesac", 8))
  104. pr_warn("forcesac option ignored.\n");
  105. if (!strncmp(p, "allowdac", 8))
  106. pr_warn("allowdac option ignored.\n");
  107. if (!strncmp(p, "nodac", 5))
  108. pr_warn("nodac option ignored.\n");
  109. if (!strncmp(p, "usedac", 6)) {
  110. disable_dac_quirk = true;
  111. return 1;
  112. }
  113. #ifdef CONFIG_SWIOTLB
  114. if (!strncmp(p, "soft", 4))
  115. swiotlb = 1;
  116. #endif
  117. if (!strncmp(p, "pt", 2))
  118. iommu_pass_through = 1;
  119. gart_parse_options(p);
  120. #ifdef CONFIG_CALGARY_IOMMU
  121. if (!strncmp(p, "calgary", 7))
  122. use_calgary = 1;
  123. #endif /* CONFIG_CALGARY_IOMMU */
  124. p += strcspn(p, ",");
  125. if (*p == ',')
  126. ++p;
  127. }
  128. return 0;
  129. }
  130. early_param("iommu", iommu_setup);
  131. static int __init pci_iommu_init(void)
  132. {
  133. struct iommu_table_entry *p;
  134. #ifdef CONFIG_PCI
  135. dma_debug_add_bus(&pci_bus_type);
  136. #endif
  137. x86_init.iommu.iommu_init();
  138. for (p = __iommu_table; p < __iommu_table_end; p++) {
  139. if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
  140. p->late_init();
  141. }
  142. return 0;
  143. }
  144. /* Must execute after PCI subsystem */
  145. rootfs_initcall(pci_iommu_init);
  146. #ifdef CONFIG_PCI
  147. /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
  148. static int via_no_dac_cb(struct pci_dev *pdev, void *data)
  149. {
  150. pdev->dev.dma_32bit_limit = true;
  151. return 0;
  152. }
  153. static void via_no_dac(struct pci_dev *dev)
  154. {
  155. if (!disable_dac_quirk) {
  156. dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
  157. pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
  158. }
  159. }
  160. DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
  161. PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
  162. #endif