|
@@ -147,8 +147,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
|
|
* No need to sync to the device, this will happen later when the
|
|
* No need to sync to the device, this will happen later when the
|
|
* prepare() memop is called.
|
|
* prepare() memop is called.
|
|
*/
|
|
*/
|
|
- if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
|
|
|
|
- buf->dma_dir, &attrs) == 0)
|
|
|
|
|
|
+ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
|
|
|
+ buf->dma_dir, &attrs);
|
|
|
|
+ if (!sgt->nents)
|
|
goto fail_map;
|
|
goto fail_map;
|
|
|
|
|
|
buf->handler.refcount = &buf->refcount;
|
|
buf->handler.refcount = &buf->refcount;
|
|
@@ -187,7 +188,7 @@ static void vb2_dma_sg_put(void *buf_priv)
|
|
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
|
|
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
|
|
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
|
|
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
|
|
buf->num_pages);
|
|
buf->num_pages);
|
|
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
|
|
|
|
|
|
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
|
buf->dma_dir, &attrs);
|
|
buf->dma_dir, &attrs);
|
|
if (buf->vaddr)
|
|
if (buf->vaddr)
|
|
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
|
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
|
@@ -316,9 +317,11 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
|
* No need to sync to the device, this will happen later when the
|
|
* No need to sync to the device, this will happen later when the
|
|
* prepare() memop is called.
|
|
* prepare() memop is called.
|
|
*/
|
|
*/
|
|
- if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
|
|
|
|
- buf->dma_dir, &attrs) == 0)
|
|
|
|
|
|
+ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
|
|
|
+ buf->dma_dir, &attrs);
|
|
|
|
+ if (!sgt->nents)
|
|
goto userptr_fail_map;
|
|
goto userptr_fail_map;
|
|
|
|
+
|
|
return buf;
|
|
return buf;
|
|
|
|
|
|
userptr_fail_map:
|
|
userptr_fail_map:
|
|
@@ -355,7 +358,8 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
|
|
|
|
|
|
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
|
|
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
|
|
__func__, buf->num_pages);
|
|
__func__, buf->num_pages);
|
|
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs);
|
|
|
|
|
|
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
|
|
|
|
+ &attrs);
|
|
if (buf->vaddr)
|
|
if (buf->vaddr)
|
|
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
|
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
|
sg_free_table(buf->dma_sgt);
|
|
sg_free_table(buf->dma_sgt);
|
|
@@ -508,7 +512,6 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
|
|
/* stealing dmabuf mutex to serialize map/unmap operations */
|
|
/* stealing dmabuf mutex to serialize map/unmap operations */
|
|
struct mutex *lock = &db_attach->dmabuf->lock;
|
|
struct mutex *lock = &db_attach->dmabuf->lock;
|
|
struct sg_table *sgt;
|
|
struct sg_table *sgt;
|
|
- int ret;
|
|
|
|
|
|
|
|
mutex_lock(lock);
|
|
mutex_lock(lock);
|
|
|
|
|
|
@@ -527,8 +530,9 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
|
|
}
|
|
}
|
|
|
|
|
|
/* mapping to the client with new direction */
|
|
/* mapping to the client with new direction */
|
|
- ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
|
|
|
|
- if (ret <= 0) {
|
|
|
|
|
|
+ sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
|
|
|
+ dma_dir);
|
|
|
|
+ if (!sgt->nents) {
|
|
pr_err("failed to map scatterlist\n");
|
|
pr_err("failed to map scatterlist\n");
|
|
mutex_unlock(lock);
|
|
mutex_unlock(lock);
|
|
return ERR_PTR(-EIO);
|
|
return ERR_PTR(-EIO);
|