|
@@ -53,7 +53,7 @@
|
|
*/
|
|
*/
|
|
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
|
|
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
|
|
|
|
|
|
-int swiotlb_force;
|
|
|
|
|
|
+enum swiotlb_force swiotlb_force;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Used to do a quick range check in swiotlb_tbl_unmap_single and
|
|
* Used to do a quick range check in swiotlb_tbl_unmap_single and
|
|
@@ -107,7 +107,7 @@ setup_io_tlb_npages(char *str)
|
|
if (*str == ',')
|
|
if (*str == ',')
|
|
++str;
|
|
++str;
|
|
if (!strcmp(str, "force"))
|
|
if (!strcmp(str, "force"))
|
|
- swiotlb_force = 1;
|
|
|
|
|
|
+ swiotlb_force = SWIOTLB_FORCE;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -763,7 +763,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
|
* we can safely return the device addr and not worry about bounce
|
|
* we can safely return the device addr and not worry about bounce
|
|
* buffering it.
|
|
* buffering it.
|
|
*/
|
|
*/
|
|
- if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
|
|
|
|
|
|
+ if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
|
|
return dev_addr;
|
|
return dev_addr;
|
|
|
|
|
|
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
|
|
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
|
|
@@ -904,7 +904,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
|
phys_addr_t paddr = sg_phys(sg);
|
|
phys_addr_t paddr = sg_phys(sg);
|
|
dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
|
|
dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
|
|
|
|
|
|
- if (swiotlb_force ||
|
|
|
|
|
|
+ if (swiotlb_force == SWIOTLB_FORCE ||
|
|
!dma_capable(hwdev, dev_addr, sg->length)) {
|
|
!dma_capable(hwdev, dev_addr, sg->length)) {
|
|
phys_addr_t map = map_single(hwdev, sg_phys(sg),
|
|
phys_addr_t map = map_single(hwdev, sg_phys(sg),
|
|
sg->length, dir, attrs);
|
|
sg->length, dir, attrs);
|