Browse Source

Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next

this cycle has been fairly calm in etnaviv land with most of the action
happening on the userspace side.

Notable changes:
- Improvements to CONFIG option handling to make it harder for users to
shoot themselves in the foot due to kernel misconfiguration.
- Tweaked GEM object population, so that userspace can take considerate
action when memory allocation fails, rather than waking the raging OOM
killer beast.

* 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux:
  drm/etnaviv: switch GEM allocations to __GFP_RETRY_MAYFAIL
  drm/etnaviv: don't fail GPU bind when CONFIG_THERMAL isn't enabled
  drm/etnaviv: switch to drm_*{get,put} helpers
  drm/etnaviv: select CMA and DMA_CMA if available
  drm/etnaviv: populate GEM objects on cpu_prep
  drm/etnaviv: reduce allocation failure message severity
  drm/etnaviv: don't trigger OOM killer when page allocation fails
Dave Airlie 8 years ago
parent
commit
e5fa05b96b

+ 2 - 0
drivers/gpu/drm/etnaviv/Kconfig

@@ -10,6 +10,8 @@ config DRM_ETNAVIV
 	select IOMMU_API
 	select IOMMU_API
 	select IOMMU_SUPPORT
 	select IOMMU_SUPPORT
 	select WANT_DEV_COREDUMP
 	select WANT_DEV_COREDUMP
+	select CMA if HAVE_DMA_CONTIGUOUS
+	select DMA_CMA if HAVE_DMA_CONTIGUOUS
 	help
 	help
 	  DRM driver for Vivante GPUs.
 	  DRM driver for Vivante GPUs.
 
 

+ 4 - 4
drivers/gpu/drm/etnaviv/etnaviv_drv.c

@@ -316,7 +316,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
 
 
 	ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
 	ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
 
 
-	drm_gem_object_unreference_unlocked(obj);
+	drm_gem_object_put_unlocked(obj);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -337,7 +337,7 @@ static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
 
 
 	ret = etnaviv_gem_cpu_fini(obj);
 	ret = etnaviv_gem_cpu_fini(obj);
 
 
-	drm_gem_object_unreference_unlocked(obj);
+	drm_gem_object_put_unlocked(obj);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -357,7 +357,7 @@ static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
 		return -ENOENT;
 		return -ENOENT;
 
 
 	ret = etnaviv_gem_mmap_offset(obj, &args->offset);
 	ret = etnaviv_gem_mmap_offset(obj, &args->offset);
-	drm_gem_object_unreference_unlocked(obj);
+	drm_gem_object_put_unlocked(obj);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -446,7 +446,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
 
 
 	ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
 	ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
 
 
-	drm_gem_object_unreference_unlocked(obj);
+	drm_gem_object_put_unlocked(obj);
 
 
 	return ret;
 	return ret;
 }
 }

+ 23 - 22
drivers/gpu/drm/etnaviv/etnaviv_gem.c

@@ -68,7 +68,7 @@ static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
 	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
 
 
 	if (IS_ERR(p)) {
 	if (IS_ERR(p)) {
-		dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
+		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
 		return PTR_ERR(p);
 		return PTR_ERR(p);
 	}
 	}
 
 
@@ -265,7 +265,7 @@ void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
 {
 {
 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
 
 
-	drm_gem_object_reference(&etnaviv_obj->base);
+	drm_gem_object_get(&etnaviv_obj->base);
 
 
 	mutex_lock(&etnaviv_obj->lock);
 	mutex_lock(&etnaviv_obj->lock);
 	WARN_ON(mapping->use == 0);
 	WARN_ON(mapping->use == 0);
@@ -282,7 +282,7 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
 	mapping->use -= 1;
 	mapping->use -= 1;
 	mutex_unlock(&etnaviv_obj->lock);
 	mutex_unlock(&etnaviv_obj->lock);
 
 
-	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+	drm_gem_object_put_unlocked(&etnaviv_obj->base);
 }
 }
 
 
 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
@@ -358,7 +358,7 @@ out:
 		return ERR_PTR(ret);
 		return ERR_PTR(ret);
 
 
 	/* Take a reference on the object */
 	/* Take a reference on the object */
-	drm_gem_object_reference(obj);
+	drm_gem_object_get(obj);
 	return mapping;
 	return mapping;
 }
 }
 
 
@@ -413,6 +413,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 	bool write = !!(op & ETNA_PREP_WRITE);
 	bool write = !!(op & ETNA_PREP_WRITE);
 	int ret;
 	int ret;
 
 
+	if (!etnaviv_obj->sgt) {
+		void *ret;
+
+		mutex_lock(&etnaviv_obj->lock);
+		ret = etnaviv_gem_get_pages(etnaviv_obj);
+		mutex_unlock(&etnaviv_obj->lock);
+		if (IS_ERR(ret))
+			return PTR_ERR(ret);
+	}
+
 	if (op & ETNA_PREP_NOSYNC) {
 	if (op & ETNA_PREP_NOSYNC) {
 		if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
 		if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
 							  write))
 							  write))
@@ -427,16 +437,6 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 	}
 	}
 
 
 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
-		if (!etnaviv_obj->sgt) {
-			void *ret;
-
-			mutex_lock(&etnaviv_obj->lock);
-			ret = etnaviv_gem_get_pages(etnaviv_obj);
-			mutex_unlock(&etnaviv_obj->lock);
-			if (IS_ERR(ret))
-				return PTR_ERR(ret);
-		}
-
 		dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
 		dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
 				    etnaviv_obj->sgt->nents,
 				    etnaviv_obj->sgt->nents,
 				    etnaviv_op_to_dma_dir(op));
 				    etnaviv_op_to_dma_dir(op));
@@ -662,7 +662,8 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
 		 * going to pin these pages.
 		 * going to pin these pages.
 		 */
 		 */
 		mapping = obj->filp->f_mapping;
 		mapping = obj->filp->f_mapping;
-		mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+		mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
+				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 	}
 	}
 
 
 	if (ret)
 	if (ret)
@@ -671,7 +672,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
 	return obj;
 	return obj;
 
 
 fail:
 fail:
-	drm_gem_object_unreference_unlocked(obj);
+	drm_gem_object_put_unlocked(obj);
 	return ERR_PTR(ret);
 	return ERR_PTR(ret);
 }
 }
 
 
@@ -688,14 +689,14 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 
 
 	ret = etnaviv_gem_obj_add(dev, obj);
 	ret = etnaviv_gem_obj_add(dev, obj);
 	if (ret < 0) {
 	if (ret < 0) {
-		drm_gem_object_unreference_unlocked(obj);
+		drm_gem_object_put_unlocked(obj);
 		return ret;
 		return ret;
 	}
 	}
 
 
 	ret = drm_gem_handle_create(file, obj, handle);
 	ret = drm_gem_handle_create(file, obj, handle);
 
 
 	/* drop reference from allocate - handle holds it now */
 	/* drop reference from allocate - handle holds it now */
-	drm_gem_object_unreference_unlocked(obj);
+	drm_gem_object_put_unlocked(obj);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -712,7 +713,7 @@ struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
 
 
 	ret = etnaviv_gem_obj_add(dev, obj);
 	ret = etnaviv_gem_obj_add(dev, obj);
 	if (ret < 0) {
 	if (ret < 0) {
-		drm_gem_object_unreference_unlocked(obj);
+		drm_gem_object_put_unlocked(obj);
 		return ERR_PTR(ret);
 		return ERR_PTR(ret);
 	}
 	}
 
 
@@ -800,7 +801,7 @@ static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
 	}
 	}
 
 
 	mutex_unlock(&etnaviv_obj->lock);
 	mutex_unlock(&etnaviv_obj->lock);
-	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+	drm_gem_object_put_unlocked(&etnaviv_obj->base);
 
 
 	mmput(work->mm);
 	mmput(work->mm);
 	put_task_struct(work->task);
 	put_task_struct(work->task);
@@ -858,7 +859,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 	}
 	}
 
 
 	get_task_struct(current);
 	get_task_struct(current);
-	drm_gem_object_reference(&etnaviv_obj->base);
+	drm_gem_object_get(&etnaviv_obj->base);
 
 
 	work->mm = mm;
 	work->mm = mm;
 	work->task = current;
 	work->task = current;
@@ -924,6 +925,6 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
 	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
 	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
 unreference:
 unreference:
 	/* drop reference from allocate - handle holds it now */
 	/* drop reference from allocate - handle holds it now */
-	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+	drm_gem_object_put_unlocked(&etnaviv_obj->base);
 	return ret;
 	return ret;
 }
 }

+ 1 - 1
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c

@@ -146,7 +146,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
 	return &etnaviv_obj->base;
 	return &etnaviv_obj->base;
 
 
 fail:
 fail:
-	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+	drm_gem_object_put_unlocked(&etnaviv_obj->base);
 
 
 	return ERR_PTR(ret);
 	return ERR_PTR(ret);
 }
 }

+ 2 - 2
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c

@@ -88,7 +88,7 @@ static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
 		 * Take a refcount on the object. The file table lock
 		 * Take a refcount on the object. The file table lock
 		 * prevents the object_idr's refcount on this being dropped.
 		 * prevents the object_idr's refcount on this being dropped.
 		 */
 		 */
-		drm_gem_object_reference(obj);
+		drm_gem_object_get(obj);
 
 
 		submit->bos[i].obj = to_etnaviv_bo(obj);
 		submit->bos[i].obj = to_etnaviv_bo(obj);
 	}
 	}
@@ -291,7 +291,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
 		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
 		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
 
 
 		submit_unlock_object(submit, i);
 		submit_unlock_object(submit, i);
-		drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+		drm_gem_object_put_unlocked(&etnaviv_obj->base);
 	}
 	}
 
 
 	ww_acquire_fini(&submit->ticket);
 	ww_acquire_fini(&submit->ticket);

+ 5 - 3
drivers/gpu/drm/etnaviv/etnaviv_gpu.c

@@ -1622,10 +1622,12 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
 	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
 	int ret;
 	int ret;
 
 
-	gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+	if (IS_ENABLED(CONFIG_THERMAL)) {
+		gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
 				(char *)dev_name(dev), gpu, &cooling_ops);
 				(char *)dev_name(dev), gpu, &cooling_ops);
-	if (IS_ERR(gpu->cooling))
-		return PTR_ERR(gpu->cooling);
+		if (IS_ERR(gpu->cooling))
+			return PTR_ERR(gpu->cooling);
+	}
 
 
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
 	ret = pm_runtime_get_sync(gpu->dev);
 	ret = pm_runtime_get_sync(gpu->dev);