|
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
|
|
|
{
|
|
|
if (dev && dev->dma_mem)
|
|
|
return dev->dma_mem;
|
|
|
- return dma_coherent_default_memory;
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
static inline dma_addr_t dma_get_device_base(struct device *dev,
|
|
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
|
|
|
}
|
|
|
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
|
|
|
|
|
-/**
|
|
|
- * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
|
|
|
- *
|
|
|
- * @dev: device from which we allocate memory
|
|
|
- * @size: size of requested memory area
|
|
|
- * @dma_handle: This will be filled with the correct dma handle
|
|
|
- * @ret: This pointer will be filled with the virtual address
|
|
|
- * to allocated area.
|
|
|
- *
|
|
|
- * This function should be only called from per-arch dma_alloc_coherent()
|
|
|
- * to support allocation from per-device coherent memory pools.
|
|
|
- *
|
|
|
- * Returns 0 if dma_alloc_coherent should continue with allocating from
|
|
|
- * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
|
|
|
- */
|
|
|
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
|
|
- dma_addr_t *dma_handle, void **ret)
|
|
|
+static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
|
|
|
+ ssize_t size, dma_addr_t *dma_handle)
|
|
|
{
|
|
|
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
|
|
|
int order = get_order(size);
|
|
|
unsigned long flags;
|
|
|
int pageno;
|
|
|
int dma_memory_map;
|
|
|
+ void *ret;
|
|
|
|
|
|
- if (!mem)
|
|
|
- return 0;
|
|
|
-
|
|
|
- *ret = NULL;
|
|
|
spin_lock_irqsave(&mem->spinlock, flags);
|
|
|
|
|
|
if (unlikely(size > (mem->size << PAGE_SHIFT)))
|
|
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
|
|
goto err;
|
|
|
|
|
|
/*
|
|
|
- * Memory was found in the per-device area.
|
|
|
+ * Memory was found in the coherent area.
|
|
|
*/
|
|
|
- *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
|
|
|
- *ret = mem->virt_base + (pageno << PAGE_SHIFT);
|
|
|
+ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
|
|
|
+ ret = mem->virt_base + (pageno << PAGE_SHIFT);
|
|
|
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
|
|
|
spin_unlock_irqrestore(&mem->spinlock, flags);
|
|
|
if (dma_memory_map)
|
|
|
- memset(*ret, 0, size);
|
|
|
+ memset(ret, 0, size);
|
|
|
else
|
|
|
- memset_io(*ret, 0, size);
|
|
|
+ memset_io(ret, 0, size);
|
|
|
|
|
|
- return 1;
|
|
|
+ return ret;
|
|
|
|
|
|
err:
|
|
|
spin_unlock_irqrestore(&mem->spinlock, flags);
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
|
|
|
+ * @dev: device from which we allocate memory
|
|
|
+ * @size: size of requested memory area
|
|
|
+ * @dma_handle: This will be filled with the correct dma handle
|
|
|
+ * @ret: This pointer will be filled with the virtual address
|
|
|
+ * to allocated area.
|
|
|
+ *
|
|
|
+ * This function should be only called from per-arch dma_alloc_coherent()
|
|
|
+ * to support allocation from per-device coherent memory pools.
|
|
|
+ *
|
|
|
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
|
|
|
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
|
|
|
+ */
|
|
|
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
|
|
|
+ dma_addr_t *dma_handle, void **ret)
|
|
|
+{
|
|
|
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
|
|
|
+
|
|
|
+ if (!mem)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
|
|
|
+ if (*ret)
|
|
|
+ return 1;
|
|
|
+
|
|
|
/*
|
|
|
* In the case where the allocation can not be satisfied from the
|
|
|
* per-device area, try to fall back to generic memory if the
|
|
@@ -225,25 +235,20 @@ err:
|
|
|
*/
|
|
|
return mem->flags & DMA_MEMORY_EXCLUSIVE;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_alloc_from_coherent);
|
|
|
+EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
|
|
|
|
|
|
-/**
|
|
|
- * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
|
|
|
- * @dev: device from which the memory was allocated
|
|
|
- * @order: the order of pages allocated
|
|
|
- * @vaddr: virtual address of allocated pages
|
|
|
- *
|
|
|
- * This checks whether the memory was allocated from the per-device
|
|
|
- * coherent memory pool and if so, releases that memory.
|
|
|
- *
|
|
|
- * Returns 1 if we correctly released the memory, or 0 if
|
|
|
- * dma_release_coherent() should proceed with releasing memory from
|
|
|
- * generic pools.
|
|
|
- */
|
|
|
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
|
|
|
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
|
|
|
{
|
|
|
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
|
|
|
+ if (!dma_coherent_default_memory)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
|
|
|
+ dma_handle);
|
|
|
+}
|
|
|
|
|
|
+static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
|
|
|
+ int order, void *vaddr)
|
|
|
+{
|
|
|
if (mem && vaddr >= mem->virt_base && vaddr <
|
|
|
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
|
|
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
|
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_release_from_coherent);
|
|
|
|
|
|
/**
|
|
|
- * dma_mmap_from_coherent() - try to mmap the memory allocated from
|
|
|
- * per-device coherent memory pool to userspace
|
|
|
+ * dma_release_from_dev_coherent() - free memory to device coherent memory pool
|
|
|
* @dev: device from which the memory was allocated
|
|
|
- * @vma: vm_area for the userspace memory
|
|
|
- * @vaddr: cpu address returned by dma_alloc_from_coherent
|
|
|
- * @size: size of the memory buffer allocated by dma_alloc_from_coherent
|
|
|
- * @ret: result from remap_pfn_range()
|
|
|
+ * @order: the order of pages allocated
|
|
|
+ * @vaddr: virtual address of allocated pages
|
|
|
*
|
|
|
* This checks whether the memory was allocated from the per-device
|
|
|
- * coherent memory pool and if so, maps that memory to the provided vma.
|
|
|
+ * coherent memory pool and if so, releases that memory.
|
|
|
*
|
|
|
- * Returns 1 if we correctly mapped the memory, or 0 if the caller should
|
|
|
- * proceed with mapping memory from generic pools.
|
|
|
+ * Returns 1 if we correctly released the memory, or 0 if the caller should
|
|
|
+ * proceed with releasing memory from generic pools.
|
|
|
*/
|
|
|
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
|
|
|
- void *vaddr, size_t size, int *ret)
|
|
|
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
|
|
|
{
|
|
|
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
|
|
|
|
|
|
+ return __dma_release_from_coherent(mem, order, vaddr);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(dma_release_from_dev_coherent);
|
|
|
+
|
|
|
+int dma_release_from_global_coherent(int order, void *vaddr)
|
|
|
+{
|
|
|
+ if (!dma_coherent_default_memory)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return __dma_release_from_coherent(dma_coherent_default_memory, order,
|
|
|
+ vaddr);
|
|
|
+}
|
|
|
+
|
|
|
+static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
|
|
|
+ struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
|
|
|
+{
|
|
|
if (mem && vaddr >= mem->virt_base && vaddr + size <=
|
|
|
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
|
|
unsigned long off = vma->vm_pgoff;
|
|
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_mmap_from_coherent);
|
|
|
+
|
|
|
+/**
|
|
|
+ * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
|
|
|
+ * @dev: device from which the memory was allocated
|
|
|
+ * @vma: vm_area for the userspace memory
|
|
|
+ * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
|
|
|
+ * @size: size of the memory buffer allocated
|
|
|
+ * @ret: result from remap_pfn_range()
|
|
|
+ *
|
|
|
+ * This checks whether the memory was allocated from the per-device
|
|
|
+ * coherent memory pool and if so, maps that memory to the provided vma.
|
|
|
+ *
|
|
|
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
|
|
|
+ * proceed with mapping memory from generic pools.
|
|
|
+ */
|
|
|
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
|
|
|
+ void *vaddr, size_t size, int *ret)
|
|
|
+{
|
|
|
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
|
|
|
+
|
|
|
+ return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
|
|
|
+
|
|
|
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
|
|
|
+ size_t size, int *ret)
|
|
|
+{
|
|
|
+ if (!dma_coherent_default_memory)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
|
|
|
+ vaddr, size, ret);
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* Support for reserved memory regions defined in device tree
|