|
@@ -26,6 +26,9 @@
|
|
|
#include "msm_gpu.h"
|
|
|
#include "msm_mmu.h"
|
|
|
|
|
|
+static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
|
|
|
+
|
|
|
+
|
|
|
static dma_addr_t physaddr(struct drm_gem_object *obj)
|
|
|
{
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
@@ -41,8 +44,7 @@ static bool use_pages(struct drm_gem_object *obj)
|
|
|
}
|
|
|
|
|
|
/* allocate pages from VRAM carveout, used when no IOMMU: */
|
|
|
-static struct page **get_pages_vram(struct drm_gem_object *obj,
|
|
|
- int npages)
|
|
|
+static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
|
|
|
{
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
struct msm_drm_private *priv = obj->dev->dev_private;
|
|
@@ -54,7 +56,9 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
|
|
|
if (!p)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
+ spin_lock(&priv->vram.lock);
|
|
|
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
|
|
|
+ spin_unlock(&priv->vram.lock);
|
|
|
if (ret) {
|
|
|
kvfree(p);
|
|
|
return ERR_PTR(ret);
|
|
@@ -69,7 +73,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
|
|
|
return p;
|
|
|
}
|
|
|
|
|
|
-/* called with dev->struct_mutex held */
|
|
|
static struct page **get_pages(struct drm_gem_object *obj)
|
|
|
{
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
@@ -109,6 +112,18 @@ static struct page **get_pages(struct drm_gem_object *obj)
|
|
|
return msm_obj->pages;
|
|
|
}
|
|
|
|
|
|
+static void put_pages_vram(struct drm_gem_object *obj)
|
|
|
+{
|
|
|
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
+ struct msm_drm_private *priv = obj->dev->dev_private;
|
|
|
+
|
|
|
+ spin_lock(&priv->vram.lock);
|
|
|
+ drm_mm_remove_node(msm_obj->vram_node);
|
|
|
+ spin_unlock(&priv->vram.lock);
|
|
|
+
|
|
|
+ kvfree(msm_obj->pages);
|
|
|
+}
|
|
|
+
|
|
|
static void put_pages(struct drm_gem_object *obj)
|
|
|
{
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
@@ -125,10 +140,8 @@ static void put_pages(struct drm_gem_object *obj)
|
|
|
|
|
|
if (use_pages(obj))
|
|
|
drm_gem_put_pages(obj, msm_obj->pages, true, false);
|
|
|
- else {
|
|
|
- drm_mm_remove_node(msm_obj->vram_node);
|
|
|
- kvfree(msm_obj->pages);
|
|
|
- }
|
|
|
+ else
|
|
|
+ put_pages_vram(obj);
|
|
|
|
|
|
msm_obj->pages = NULL;
|
|
|
}
|
|
@@ -136,11 +149,18 @@ static void put_pages(struct drm_gem_object *obj)
|
|
|
|
|
|
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
|
|
|
{
|
|
|
- struct drm_device *dev = obj->dev;
|
|
|
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
struct page **p;
|
|
|
- mutex_lock(&dev->struct_mutex);
|
|
|
+
|
|
|
+ mutex_lock(&msm_obj->lock);
|
|
|
+
|
|
|
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
+ return ERR_PTR(-EBUSY);
|
|
|
+ }
|
|
|
+
|
|
|
p = get_pages(obj);
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
return p;
|
|
|
}
|
|
|
|
|
@@ -195,28 +215,25 @@ int msm_gem_fault(struct vm_fault *vmf)
|
|
|
{
|
|
|
struct vm_area_struct *vma = vmf->vma;
|
|
|
struct drm_gem_object *obj = vma->vm_private_data;
|
|
|
- struct drm_device *dev = obj->dev;
|
|
|
- struct msm_drm_private *priv = dev->dev_private;
|
|
|
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
struct page **pages;
|
|
|
unsigned long pfn;
|
|
|
pgoff_t pgoff;
|
|
|
int ret;
|
|
|
|
|
|
- /* This should only happen if userspace tries to pass a mmap'd
|
|
|
- * but unfaulted gem bo vaddr into submit ioctl, triggering
|
|
|
- * a page fault while struct_mutex is already held. This is
|
|
|
- * not a valid use-case so just bail.
|
|
|
- */
|
|
|
- if (priv->struct_mutex_task == current)
|
|
|
- return VM_FAULT_SIGBUS;
|
|
|
-
|
|
|
- /* Make sure we don't parallel update on a fault, nor move or remove
|
|
|
- * something from beneath our feet
|
|
|
+ /*
|
|
|
+ * vm_ops.open/drm_gem_mmap_obj and close get and put
|
|
|
+ * a reference on obj. So, we dont need to hold one here.
|
|
|
*/
|
|
|
- ret = mutex_lock_interruptible(&dev->struct_mutex);
|
|
|
+ ret = mutex_lock_interruptible(&msm_obj->lock);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
|
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
|
+ }
|
|
|
+
|
|
|
/* make sure we have pages attached now */
|
|
|
pages = get_pages(obj);
|
|
|
if (IS_ERR(pages)) {
|
|
@@ -235,7 +252,7 @@ int msm_gem_fault(struct vm_fault *vmf)
|
|
|
ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
|
|
|
|
|
|
out_unlock:
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
out:
|
|
|
switch (ret) {
|
|
|
case -EAGAIN:
|
|
@@ -259,9 +276,10 @@ out:
|
|
|
static uint64_t mmap_offset(struct drm_gem_object *obj)
|
|
|
{
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
int ret;
|
|
|
|
|
|
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
|
|
|
|
|
/* Make it mmapable */
|
|
|
ret = drm_gem_create_mmap_offset(obj);
|
|
@@ -277,9 +295,11 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
|
|
|
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
|
|
|
{
|
|
|
uint64_t offset;
|
|
|
- mutex_lock(&obj->dev->struct_mutex);
|
|
|
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
+
|
|
|
+ mutex_lock(&msm_obj->lock);
|
|
|
offset = mmap_offset(obj);
|
|
|
- mutex_unlock(&obj->dev->struct_mutex);
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
return offset;
|
|
|
}
|
|
|
|
|
@@ -289,6 +309,8 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
struct msm_gem_vma *vma;
|
|
|
|
|
|
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
|
|
+
|
|
|
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
|
|
|
if (!vma)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
@@ -306,7 +328,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
struct msm_gem_vma *vma;
|
|
|
|
|
|
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
|
|
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
|
|
|
|
|
list_for_each_entry(vma, &msm_obj->vmas, list) {
|
|
|
if (vma->aspace == aspace)
|
|
@@ -325,13 +347,14 @@ static void del_vma(struct msm_gem_vma *vma)
|
|
|
kfree(vma);
|
|
|
}
|
|
|
|
|
|
+/* Called with msm_obj->lock locked */
|
|
|
static void
|
|
|
put_iova(struct drm_gem_object *obj)
|
|
|
{
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
struct msm_gem_vma *vma, *tmp;
|
|
|
|
|
|
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
|
|
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
|
|
|
|
|
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
|
|
|
msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
|
|
@@ -339,21 +362,20 @@ put_iova(struct drm_gem_object *obj)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/* should be called under struct_mutex.. although it can be called
|
|
|
- * from atomic context without struct_mutex to acquire an extra
|
|
|
- * iova ref if you know one is already held.
|
|
|
- *
|
|
|
- * That means when I do eventually need to add support for unpinning
|
|
|
- * the refcnt counter needs to be atomic_t.
|
|
|
- */
|
|
|
-int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
|
|
+/* get iova, taking a reference. Should have a matching put */
|
|
|
+int msm_gem_get_iova(struct drm_gem_object *obj,
|
|
|
struct msm_gem_address_space *aspace, uint64_t *iova)
|
|
|
{
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
struct msm_gem_vma *vma;
|
|
|
int ret = 0;
|
|
|
|
|
|
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
|
|
+ mutex_lock(&msm_obj->lock);
|
|
|
+
|
|
|
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
+ return -EBUSY;
|
|
|
+ }
|
|
|
|
|
|
vma = lookup_vma(obj, aspace);
|
|
|
|
|
@@ -377,24 +399,14 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
|
|
}
|
|
|
|
|
|
*iova = vma->iova;
|
|
|
+
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
return 0;
|
|
|
|
|
|
fail:
|
|
|
del_vma(vma);
|
|
|
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-/* get iova, taking a reference. Should have a matching put */
|
|
|
-int msm_gem_get_iova(struct drm_gem_object *obj,
|
|
|
- struct msm_gem_address_space *aspace, uint64_t *iova)
|
|
|
-{
|
|
|
- int ret;
|
|
|
-
|
|
|
- mutex_lock(&obj->dev->struct_mutex);
|
|
|
- ret = msm_gem_get_iova_locked(obj, aspace, iova);
|
|
|
- mutex_unlock(&obj->dev->struct_mutex);
|
|
|
-
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -404,11 +416,12 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
|
|
|
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
|
|
struct msm_gem_address_space *aspace)
|
|
|
{
|
|
|
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
struct msm_gem_vma *vma;
|
|
|
|
|
|
- mutex_lock(&obj->dev->struct_mutex);
|
|
|
+ mutex_lock(&msm_obj->lock);
|
|
|
vma = lookup_vma(obj, aspace);
|
|
|
- mutex_unlock(&obj->dev->struct_mutex);
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
WARN_ON(!vma);
|
|
|
|
|
|
return vma ? vma->iova : 0;
|
|
@@ -455,45 +468,57 @@ fail:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
|
|
|
+void *msm_gem_get_vaddr(struct drm_gem_object *obj)
|
|
|
{
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ mutex_lock(&msm_obj->lock);
|
|
|
+
|
|
|
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
+ return ERR_PTR(-EBUSY);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* increment vmap_count *before* vmap() call, so shrinker can
|
|
|
+ * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
|
|
|
+ * This guarantees that we won't try to msm_gem_vunmap() this
|
|
|
+ * same object from within the vmap() call (while we already
|
|
|
+ * hold msm_obj->lock)
|
|
|
+ */
|
|
|
+ msm_obj->vmap_count++;
|
|
|
+
|
|
|
if (!msm_obj->vaddr) {
|
|
|
struct page **pages = get_pages(obj);
|
|
|
- if (IS_ERR(pages))
|
|
|
- return ERR_CAST(pages);
|
|
|
+ if (IS_ERR(pages)) {
|
|
|
+ ret = PTR_ERR(pages);
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
|
|
|
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
|
|
- if (msm_obj->vaddr == NULL)
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
+ if (msm_obj->vaddr == NULL) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
}
|
|
|
- msm_obj->vmap_count++;
|
|
|
+
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
return msm_obj->vaddr;
|
|
|
-}
|
|
|
|
|
|
-void *msm_gem_get_vaddr(struct drm_gem_object *obj)
|
|
|
-{
|
|
|
- void *ret;
|
|
|
- mutex_lock(&obj->dev->struct_mutex);
|
|
|
- ret = msm_gem_get_vaddr_locked(obj);
|
|
|
- mutex_unlock(&obj->dev->struct_mutex);
|
|
|
- return ret;
|
|
|
+fail:
|
|
|
+ msm_obj->vmap_count--;
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
+ return ERR_PTR(ret);
|
|
|
}
|
|
|
|
|
|
-void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
|
|
|
+void msm_gem_put_vaddr(struct drm_gem_object *obj)
|
|
|
{
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
|
|
+
|
|
|
+ mutex_lock(&msm_obj->lock);
|
|
|
WARN_ON(msm_obj->vmap_count < 1);
|
|
|
msm_obj->vmap_count--;
|
|
|
-}
|
|
|
-
|
|
|
-void msm_gem_put_vaddr(struct drm_gem_object *obj)
|
|
|
-{
|
|
|
- mutex_lock(&obj->dev->struct_mutex);
|
|
|
- msm_gem_put_vaddr_locked(obj);
|
|
|
- mutex_unlock(&obj->dev->struct_mutex);
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
}
|
|
|
|
|
|
/* Update madvise status, returns true if not purged, else
|
|
@@ -503,15 +528,21 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
|
|
|
{
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
|
|
|
+ mutex_lock(&msm_obj->lock);
|
|
|
+
|
|
|
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
|
|
|
|
|
if (msm_obj->madv != __MSM_MADV_PURGED)
|
|
|
msm_obj->madv = madv;
|
|
|
|
|
|
- return (msm_obj->madv != __MSM_MADV_PURGED);
|
|
|
+ madv = msm_obj->madv;
|
|
|
+
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
+
|
|
|
+ return (madv != __MSM_MADV_PURGED);
|
|
|
}
|
|
|
|
|
|
-void msm_gem_purge(struct drm_gem_object *obj)
|
|
|
+void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
|
|
|
{
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
@@ -520,9 +551,11 @@ void msm_gem_purge(struct drm_gem_object *obj)
|
|
|
WARN_ON(!is_purgeable(msm_obj));
|
|
|
WARN_ON(obj->import_attach);
|
|
|
|
|
|
+ mutex_lock_nested(&msm_obj->lock, subclass);
|
|
|
+
|
|
|
put_iova(obj);
|
|
|
|
|
|
- msm_gem_vunmap(obj);
|
|
|
+ msm_gem_vunmap_locked(obj);
|
|
|
|
|
|
put_pages(obj);
|
|
|
|
|
@@ -540,12 +573,16 @@ void msm_gem_purge(struct drm_gem_object *obj)
|
|
|
|
|
|
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
|
|
|
0, (loff_t)-1);
|
|
|
+
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
}
|
|
|
|
|
|
-void msm_gem_vunmap(struct drm_gem_object *obj)
|
|
|
+static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
|
|
|
{
|
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
|
|
|
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
|
|
+
|
|
|
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
|
|
|
return;
|
|
|
|
|
@@ -553,6 +590,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
|
|
|
msm_obj->vaddr = NULL;
|
|
|
}
|
|
|
|
|
|
+void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
|
|
|
+{
|
|
|
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
+
|
|
|
+ mutex_lock_nested(&msm_obj->lock, subclass);
|
|
|
+ msm_gem_vunmap_locked(obj);
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
+}
|
|
|
+
|
|
|
/* must be called before _move_to_active().. */
|
|
|
int msm_gem_sync_object(struct drm_gem_object *obj,
|
|
|
struct msm_fence_context *fctx, bool exclusive)
|
|
@@ -674,7 +720,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
|
|
uint64_t off = drm_vma_node_start(&obj->vma_node);
|
|
|
const char *madv;
|
|
|
|
|
|
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
|
|
+ mutex_lock(&msm_obj->lock);
|
|
|
|
|
|
switch (msm_obj->madv) {
|
|
|
case __MSM_MADV_PURGED:
|
|
@@ -715,6 +761,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
|
|
if (fence)
|
|
|
describe_fence(fence, "Exclusive", m);
|
|
|
rcu_read_unlock();
|
|
|
+
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
}
|
|
|
|
|
|
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
|
|
@@ -747,6 +795,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
|
|
|
|
|
list_del(&msm_obj->mm_list);
|
|
|
|
|
|
+ mutex_lock(&msm_obj->lock);
|
|
|
+
|
|
|
put_iova(obj);
|
|
|
|
|
|
if (obj->import_attach) {
|
|
@@ -761,7 +811,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
|
|
|
|
|
drm_prime_gem_destroy(obj, msm_obj->sgt);
|
|
|
} else {
|
|
|
- msm_gem_vunmap(obj);
|
|
|
+ msm_gem_vunmap_locked(obj);
|
|
|
put_pages(obj);
|
|
|
}
|
|
|
|
|
@@ -770,6 +820,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
|
|
|
|
|
drm_gem_object_release(obj);
|
|
|
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
kfree(msm_obj);
|
|
|
}
|
|
|
|
|
@@ -780,14 +831,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
|
|
struct drm_gem_object *obj;
|
|
|
int ret;
|
|
|
|
|
|
- ret = mutex_lock_interruptible(&dev->struct_mutex);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
obj = msm_gem_new(dev, size, flags);
|
|
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
-
|
|
|
if (IS_ERR(obj))
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
@@ -802,13 +847,12 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
|
|
static int msm_gem_new_impl(struct drm_device *dev,
|
|
|
uint32_t size, uint32_t flags,
|
|
|
struct reservation_object *resv,
|
|
|
- struct drm_gem_object **obj)
|
|
|
+ struct drm_gem_object **obj,
|
|
|
+ bool struct_mutex_locked)
|
|
|
{
|
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
|
struct msm_gem_object *msm_obj;
|
|
|
|
|
|
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
-
|
|
|
switch (flags & MSM_BO_CACHE_MASK) {
|
|
|
case MSM_BO_UNCACHED:
|
|
|
case MSM_BO_CACHED:
|
|
@@ -824,6 +868,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
|
|
if (!msm_obj)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ mutex_init(&msm_obj->lock);
|
|
|
+
|
|
|
msm_obj->flags = flags;
|
|
|
msm_obj->madv = MSM_MADV_WILLNEED;
|
|
|
|
|
@@ -837,23 +883,28 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
|
|
INIT_LIST_HEAD(&msm_obj->submit_entry);
|
|
|
INIT_LIST_HEAD(&msm_obj->vmas);
|
|
|
|
|
|
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
|
|
+ if (struct_mutex_locked) {
|
|
|
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
|
|
+ } else {
|
|
|
+ mutex_lock(&dev->struct_mutex);
|
|
|
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
+ }
|
|
|
|
|
|
*obj = &msm_obj->base;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
|
|
- uint32_t size, uint32_t flags)
|
|
|
+static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
|
|
+ uint32_t size, uint32_t flags, bool struct_mutex_locked)
|
|
|
{
|
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
|
struct drm_gem_object *obj = NULL;
|
|
|
bool use_vram = false;
|
|
|
int ret;
|
|
|
|
|
|
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
-
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
|
|
if (!iommu_present(&platform_bus_type))
|
|
@@ -870,7 +921,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
|
|
if (size == 0)
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
- ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
|
|
|
+ ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
|
|
|
if (ret)
|
|
|
goto fail;
|
|
|
|
|
@@ -904,10 +955,22 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
|
|
return obj;
|
|
|
|
|
|
fail:
|
|
|
- drm_gem_object_unreference(obj);
|
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
|
return ERR_PTR(ret);
|
|
|
}
|
|
|
|
|
|
+struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
|
|
|
+ uint32_t size, uint32_t flags)
|
|
|
+{
|
|
|
+ return _msm_gem_new(dev, size, flags, true);
|
|
|
+}
|
|
|
+
|
|
|
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
|
|
+ uint32_t size, uint32_t flags)
|
|
|
+{
|
|
|
+ return _msm_gem_new(dev, size, flags, false);
|
|
|
+}
|
|
|
+
|
|
|
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
|
|
struct dma_buf *dmabuf, struct sg_table *sgt)
|
|
|
{
|
|
@@ -924,11 +987,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
|
|
|
|
|
size = PAGE_ALIGN(dmabuf->size);
|
|
|
|
|
|
- /* Take mutex so we can modify the inactive list in msm_gem_new_impl */
|
|
|
- mutex_lock(&dev->struct_mutex);
|
|
|
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
-
|
|
|
+ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
|
|
|
if (ret)
|
|
|
goto fail;
|
|
|
|
|
@@ -937,17 +996,22 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
|
|
npages = size / PAGE_SIZE;
|
|
|
|
|
|
msm_obj = to_msm_bo(obj);
|
|
|
+ mutex_lock(&msm_obj->lock);
|
|
|
msm_obj->sgt = sgt;
|
|
|
msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
|
|
if (!msm_obj->pages) {
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
ret = -ENOMEM;
|
|
|
goto fail;
|
|
|
}
|
|
|
|
|
|
ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
|
|
|
- if (ret)
|
|
|
+ if (ret) {
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
goto fail;
|
|
|
+ }
|
|
|
|
|
|
+ mutex_unlock(&msm_obj->lock);
|
|
|
return obj;
|
|
|
|
|
|
fail:
|