pci-dma.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/dma-map-ops.h>
  3. #include <linux/dma-direct.h>
  4. #include <linux/iommu.h>
  5. #include <linux/dmar.h>
  6. #include <linux/export.h>
  7. #include <linux/memblock.h>
  8. #include <linux/gfp.h>
  9. #include <linux/pci.h>
  10. #include <linux/amd-iommu.h>
  11. #include <asm/proto.h>
  12. #include <asm/dma.h>
  13. #include <asm/iommu.h>
  14. #include <asm/gart.h>
  15. #include <asm/x86_init.h>
  16. #include <xen/xen.h>
  17. #include <xen/swiotlb-xen.h>
  18. static bool disable_dac_quirk __read_mostly;
  19. const struct dma_map_ops *dma_ops;
  20. EXPORT_SYMBOL(dma_ops);
  21. #ifdef CONFIG_IOMMU_DEBUG
  22. int panic_on_overflow __read_mostly = 1;
  23. int force_iommu __read_mostly = 1;
  24. #else
  25. int panic_on_overflow __read_mostly = 0;
  26. int force_iommu __read_mostly = 0;
  27. #endif
  28. int iommu_merge __read_mostly = 0;
  29. int no_iommu __read_mostly;
  30. /* Set this to 1 if there is a HW IOMMU in the system */
  31. int iommu_detected __read_mostly = 0;
  32. #ifdef CONFIG_SWIOTLB
  33. bool x86_swiotlb_enable;
  34. static unsigned int x86_swiotlb_flags;
  35. static void __init pci_swiotlb_detect(void)
  36. {
  37. /* don't initialize swiotlb if iommu=off (no_iommu=1) */
  38. if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
  39. x86_swiotlb_enable = true;
  40. /*
  41. * Set swiotlb to 1 so that bounce buffers are allocated and used for
  42. * devices that can't support DMA to encrypted memory.
  43. */
  44. if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
  45. x86_swiotlb_enable = true;
  46. /*
  47. * Guest with guest memory encryption currently perform all DMA through
  48. * bounce buffers as the hypervisor can't access arbitrary VM memory
  49. * that is not explicitly shared with it.
  50. */
  51. if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
  52. x86_swiotlb_enable = true;
  53. x86_swiotlb_flags |= SWIOTLB_FORCE;
  54. }
  55. }
  56. #else
  57. static inline void __init pci_swiotlb_detect(void)
  58. {
  59. }
  60. #define x86_swiotlb_flags 0
  61. #endif /* CONFIG_SWIOTLB */
  62. #ifdef CONFIG_SWIOTLB_XEN
  63. static void __init pci_xen_swiotlb_init(void)
  64. {
  65. if (!xen_initial_domain() && !x86_swiotlb_enable)
  66. return;
  67. x86_swiotlb_enable = true;
  68. x86_swiotlb_flags |= SWIOTLB_ANY;
  69. swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
  70. dma_ops = &xen_swiotlb_dma_ops;
  71. if (IS_ENABLED(CONFIG_PCI))
  72. pci_request_acs();
  73. }
  74. int pci_xen_swiotlb_init_late(void)
  75. {
  76. if (dma_ops == &xen_swiotlb_dma_ops)
  77. return 0;
  78. /* we can work with the default swiotlb */
  79. if (!io_tlb_default_mem.nslabs) {
  80. int rc = swiotlb_init_late(swiotlb_size_or_default(),
  81. GFP_KERNEL, xen_swiotlb_fixup);
  82. if (rc < 0)
  83. return rc;
  84. }
  85. /* XXX: this switches the dma ops under live devices! */
  86. dma_ops = &xen_swiotlb_dma_ops;
  87. if (IS_ENABLED(CONFIG_PCI))
  88. pci_request_acs();
  89. return 0;
  90. }
  91. EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late);
  92. #else
  93. static inline void __init pci_xen_swiotlb_init(void)
  94. {
  95. }
  96. #endif /* CONFIG_SWIOTLB_XEN */
  97. void __init pci_iommu_alloc(void)
  98. {
  99. if (xen_pv_domain()) {
  100. pci_xen_swiotlb_init();
  101. return;
  102. }
  103. pci_swiotlb_detect();
  104. gart_iommu_hole_init();
  105. amd_iommu_detect();
  106. detect_intel_iommu();
  107. swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags);
  108. }
  109. /*
  110. * See <Documentation/x86/x86_64/boot-options.rst> for the iommu kernel
  111. * parameter documentation.
  112. */
  113. static __init int iommu_setup(char *p)
  114. {
  115. iommu_merge = 1;
  116. if (!p)
  117. return -EINVAL;
  118. while (*p) {
  119. if (!strncmp(p, "off", 3))
  120. no_iommu = 1;
  121. /* gart_parse_options has more force support */
  122. if (!strncmp(p, "force", 5))
  123. force_iommu = 1;
  124. if (!strncmp(p, "noforce", 7)) {
  125. iommu_merge = 0;
  126. force_iommu = 0;
  127. }
  128. if (!strncmp(p, "biomerge", 8)) {
  129. iommu_merge = 1;
  130. force_iommu = 1;
  131. }
  132. if (!strncmp(p, "panic", 5))
  133. panic_on_overflow = 1;
  134. if (!strncmp(p, "nopanic", 7))
  135. panic_on_overflow = 0;
  136. if (!strncmp(p, "merge", 5)) {
  137. iommu_merge = 1;
  138. force_iommu = 1;
  139. }
  140. if (!strncmp(p, "nomerge", 7))
  141. iommu_merge = 0;
  142. if (!strncmp(p, "forcesac", 8))
  143. pr_warn("forcesac option ignored.\n");
  144. if (!strncmp(p, "allowdac", 8))
  145. pr_warn("allowdac option ignored.\n");
  146. if (!strncmp(p, "nodac", 5))
  147. pr_warn("nodac option ignored.\n");
  148. if (!strncmp(p, "usedac", 6)) {
  149. disable_dac_quirk = true;
  150. return 1;
  151. }
  152. #ifdef CONFIG_SWIOTLB
  153. if (!strncmp(p, "soft", 4))
  154. x86_swiotlb_enable = true;
  155. #endif
  156. if (!strncmp(p, "pt", 2))
  157. iommu_set_default_passthrough(true);
  158. if (!strncmp(p, "nopt", 4))
  159. iommu_set_default_translated(true);
  160. gart_parse_options(p);
  161. p += strcspn(p, ",");
  162. if (*p == ',')
  163. ++p;
  164. }
  165. return 0;
  166. }
  167. early_param("iommu", iommu_setup);
  168. static int __init pci_iommu_init(void)
  169. {
  170. x86_init.iommu.iommu_init();
  171. #ifdef CONFIG_SWIOTLB
  172. /* An IOMMU turned us off. */
  173. if (x86_swiotlb_enable) {
  174. pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
  175. swiotlb_print_info();
  176. } else {
  177. swiotlb_exit();
  178. }
  179. #endif
  180. return 0;
  181. }
  182. /* Must execute after PCI subsystem */
  183. rootfs_initcall(pci_iommu_init);
  184. #ifdef CONFIG_PCI
  185. /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
  186. static int via_no_dac_cb(struct pci_dev *pdev, void *data)
  187. {
  188. pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
  189. return 0;
  190. }
  191. static void via_no_dac(struct pci_dev *dev)
  192. {
  193. if (!disable_dac_quirk) {
  194. dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
  195. pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
  196. }
  197. }
  198. DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
  199. PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
  200. #endif