|
@@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
|
|
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
|
|
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
|
|
|
|
+ * that the intention is to allow exporting memory allocated via the
|
|
|
|
+ * coherent DMA APIs through the dma_buf API, which only accepts a
|
|
|
|
+ * scattertable. This presents a couple of problems:
|
|
|
|
+ * 1. Not all memory allocated via the coherent DMA APIs is backed by
|
|
|
|
+ * a struct page
|
|
|
|
+ * 2. Passing coherent DMA memory into the streaming APIs is not allowed
|
|
|
|
+ * as we will try to flush the memory through a different alias to that
|
|
|
|
+ * actually being used (and the flushes are redundant.)
|
|
|
|
+ */
|
|
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
void *cpu_addr, dma_addr_t handle, size_t size,
|
|
void *cpu_addr, dma_addr_t handle, size_t size,
|
|
unsigned long attrs)
|
|
unsigned long attrs)
|
|
{
|
|
{
|
|
- struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
|
|
|
|
|
+ unsigned long pfn = dma_to_pfn(dev, handle);
|
|
|
|
+ struct page *page;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
|
|
+ /* If the PFN is not valid, we do not have a struct page */
|
|
|
|
+ if (!pfn_valid(pfn))
|
|
|
|
+ return -ENXIO;
|
|
|
|
+
|
|
|
|
+ page = pfn_to_page(pfn);
|
|
|
|
+
|
|
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
|
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
|
if (unlikely(ret))
|
|
if (unlikely(ret))
|
|
return ret;
|
|
return ret;
|