|
@@ -162,6 +162,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
|
|
buffer->dev = dev;
|
|
|
buffer->size = len;
|
|
|
INIT_LIST_HEAD(&buffer->vmas);
|
|
|
+ INIT_LIST_HEAD(&buffer->attachments);
|
|
|
mutex_init(&buffer->lock);
|
|
|
/*
|
|
|
* this will set up dma addresses for the sglist -- it is not
|
|
@@ -796,10 +797,6 @@ void ion_client_destroy(struct ion_client *client)
|
|
|
}
|
|
|
EXPORT_SYMBOL(ion_client_destroy);
|
|
|
|
|
|
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
|
|
|
- struct device *dev,
|
|
|
- enum dma_data_direction direction);
|
|
|
-
|
|
|
static struct sg_table *dup_sg_table(struct sg_table *table)
|
|
|
{
|
|
|
struct sg_table *new_table;
|
|
@@ -826,22 +823,89 @@ static struct sg_table *dup_sg_table(struct sg_table *table)
|
|
|
return new_table;
|
|
|
}
|
|
|
|
|
|
+static void free_duped_table(struct sg_table *table)
|
|
|
+{
|
|
|
+ sg_free_table(table);
|
|
|
+ kfree(table);
|
|
|
+}
|
|
|
+
|
|
|
+struct ion_dma_buf_attachment {
|
|
|
+ struct device *dev;
|
|
|
+ struct sg_table *table;
|
|
|
+ struct list_head list;
|
|
|
+};
|
|
|
+
|
|
|
+static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
|
|
|
+ struct dma_buf_attachment *attachment)
|
|
|
+{
|
|
|
+ struct ion_dma_buf_attachment *a;
|
|
|
+ struct sg_table *table;
|
|
|
+ struct ion_buffer *buffer = dmabuf->priv;
|
|
|
+
|
|
|
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
|
|
|
+ if (!a)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ table = dup_sg_table(buffer->sg_table);
|
|
|
+ if (IS_ERR(table)) {
|
|
|
+ kfree(a);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ a->table = table;
|
|
|
+ a->dev = dev;
|
|
|
+ INIT_LIST_HEAD(&a->list);
|
|
|
+
|
|
|
+ attachment->priv = a;
|
|
|
+
|
|
|
+ mutex_lock(&buffer->lock);
|
|
|
+ list_add(&a->list, &buffer->attachments);
|
|
|
+ mutex_unlock(&buffer->lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
|
|
|
+ struct dma_buf_attachment *attachment)
|
|
|
+{
|
|
|
+ struct ion_dma_buf_attachment *a = attachment->priv;
|
|
|
+ struct ion_buffer *buffer = dmabuf->priv;
|
|
|
+
|
|
|
+ free_duped_table(a->table);
|
|
|
+ mutex_lock(&buffer->lock);
|
|
|
+ list_del(&a->list);
|
|
|
+ mutex_unlock(&buffer->lock);
|
|
|
+
|
|
|
+ kfree(a);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
|
|
|
enum dma_data_direction direction)
|
|
|
{
|
|
|
- struct dma_buf *dmabuf = attachment->dmabuf;
|
|
|
- struct ion_buffer *buffer = dmabuf->priv;
|
|
|
+ struct ion_dma_buf_attachment *a = attachment->priv;
|
|
|
+ struct sg_table *table;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ table = a->table;
|
|
|
|
|
|
- ion_buffer_sync_for_device(buffer, attachment->dev, direction);
|
|
|
- return dup_sg_table(buffer->sg_table);
|
|
|
+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
|
|
|
+ direction)){
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+ return table;
|
|
|
+
|
|
|
+err:
|
|
|
+ free_duped_table(table);
|
|
|
+ return ERR_PTR(ret);
|
|
|
}
|
|
|
|
|
|
static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
|
|
struct sg_table *table,
|
|
|
enum dma_data_direction direction)
|
|
|
{
|
|
|
- sg_free_table(table);
|
|
|
- kfree(table);
|
|
|
+ dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
|
|
|
}
|
|
|
|
|
|
void ion_pages_sync_for_device(struct device *dev, struct page *page,
|
|
@@ -865,38 +929,6 @@ struct ion_vma_list {
|
|
|
struct vm_area_struct *vma;
|
|
|
};
|
|
|
|
|
|
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
|
|
|
- struct device *dev,
|
|
|
- enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct ion_vma_list *vma_list;
|
|
|
- int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
|
|
|
- int i;
|
|
|
-
|
|
|
- pr_debug("%s: syncing for device %s\n", __func__,
|
|
|
- dev ? dev_name(dev) : "null");
|
|
|
-
|
|
|
- if (!ion_buffer_fault_user_mappings(buffer))
|
|
|
- return;
|
|
|
-
|
|
|
- mutex_lock(&buffer->lock);
|
|
|
- for (i = 0; i < pages; i++) {
|
|
|
- struct page *page = buffer->pages[i];
|
|
|
-
|
|
|
- if (ion_buffer_page_is_dirty(page))
|
|
|
- ion_pages_sync_for_device(dev, ion_buffer_page(page),
|
|
|
- PAGE_SIZE, dir);
|
|
|
-
|
|
|
- ion_buffer_page_clean(buffer->pages + i);
|
|
|
- }
|
|
|
- list_for_each_entry(vma_list, &buffer->vmas, list) {
|
|
|
- struct vm_area_struct *vma = vma_list->vma;
|
|
|
-
|
|
|
- zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
|
|
|
- }
|
|
|
- mutex_unlock(&buffer->lock);
|
|
|
-}
|
|
|
-
|
|
|
static int ion_vm_fault(struct vm_fault *vmf)
|
|
|
{
|
|
|
struct ion_buffer *buffer = vmf->vma->vm_private_data;
|
|
@@ -1014,26 +1046,45 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
|
|
{
|
|
|
struct ion_buffer *buffer = dmabuf->priv;
|
|
|
void *vaddr;
|
|
|
+ struct ion_dma_buf_attachment *a;
|
|
|
|
|
|
- if (!buffer->heap->ops->map_kernel) {
|
|
|
- pr_err("%s: map kernel is not implemented by this heap.\n",
|
|
|
- __func__);
|
|
|
- return -ENODEV;
|
|
|
+ /*
|
|
|
+ * TODO: Move this elsewhere because we don't always need a vaddr
|
|
|
+ */
|
|
|
+ if (buffer->heap->ops->map_kernel) {
|
|
|
+ mutex_lock(&buffer->lock);
|
|
|
+ vaddr = ion_buffer_kmap_get(buffer);
|
|
|
+ mutex_unlock(&buffer->lock);
|
|
|
}
|
|
|
|
|
|
+
|
|
|
mutex_lock(&buffer->lock);
|
|
|
- vaddr = ion_buffer_kmap_get(buffer);
|
|
|
+ list_for_each_entry(a, &buffer->attachments, list) {
|
|
|
+ dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
+ }
|
|
|
mutex_unlock(&buffer->lock);
|
|
|
- return PTR_ERR_OR_ZERO(vaddr);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
|
|
enum dma_data_direction direction)
|
|
|
{
|
|
|
struct ion_buffer *buffer = dmabuf->priv;
|
|
|
+ struct ion_dma_buf_attachment *a;
|
|
|
+
|
|
|
+ if (buffer->heap->ops->map_kernel) {
|
|
|
+ mutex_lock(&buffer->lock);
|
|
|
+ ion_buffer_kmap_put(buffer);
|
|
|
+ mutex_unlock(&buffer->lock);
|
|
|
+ }
|
|
|
|
|
|
mutex_lock(&buffer->lock);
|
|
|
- ion_buffer_kmap_put(buffer);
|
|
|
+ list_for_each_entry(a, &buffer->attachments, list) {
|
|
|
+ dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
+ }
|
|
|
mutex_unlock(&buffer->lock);
|
|
|
|
|
|
return 0;
|
|
@@ -1044,6 +1095,8 @@ static const struct dma_buf_ops dma_buf_ops = {
|
|
|
.unmap_dma_buf = ion_unmap_dma_buf,
|
|
|
.mmap = ion_mmap,
|
|
|
.release = ion_dma_buf_release,
|
|
|
+ .attach = ion_dma_buf_attach,
|
|
|
+ .detach = ion_dma_buf_detatch,
|
|
|
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
|
|
|
.end_cpu_access = ion_dma_buf_end_cpu_access,
|
|
|
.kmap_atomic = ion_dma_buf_kmap,
|