فهرست منبع

Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next

Highlights:
- Cooling device support from Russell, to allow GPU throttling on system
thermal overload.
- Explicit fencing support from Philipp, implemented in a similar way to
drm/msm.

* 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux:
  drm/etnaviv: submit support for out-fences
  drm/etnaviv: return GPU fence through the submit structure
  drm/etnaviv: submit support for in-fences
  drm/etnaviv: add etnaviv cooling device
  drm/etnaviv: switch to postclose
  drm/etnaviv: add lockdep assert to fence allocation
Dave Airlie 8 سال پیش
والد
کامیت
aed93ee7d0

+ 1 - 0
drivers/gpu/drm/etnaviv/Kconfig

@@ -5,6 +5,7 @@ config DRM_ETNAVIV
 	depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST)
 	depends on MMU
 	select SHMEM
+	select SYNC_FILE
 	select TMPFS
 	select IOMMU_API
 	select IOMMU_SUPPORT

+ 3 - 3
drivers/gpu/drm/etnaviv/etnaviv_drv.c

@@ -111,7 +111,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
 	return 0;
 }
 
-static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
+static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct etnaviv_drm_private *priv = dev->dev_private;
 	struct etnaviv_file_private *ctx = file->driver_priv;
@@ -488,7 +488,7 @@ static struct drm_driver etnaviv_drm_driver = {
 				DRIVER_PRIME |
 				DRIVER_RENDER,
 	.open               = etnaviv_open,
-	.preclose           = etnaviv_preclose,
+	.postclose           = etnaviv_postclose,
 	.gem_free_object_unlocked = etnaviv_gem_free_object,
 	.gem_vm_ops         = &vm_ops,
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -512,7 +512,7 @@ static struct drm_driver etnaviv_drm_driver = {
 	.desc               = "etnaviv DRM",
 	.date               = "20151214",
 	.major              = 1,
-	.minor              = 0,
+	.minor              = 1,
 };
 
 /*

+ 3 - 1
drivers/gpu/drm/etnaviv/etnaviv_gem.h

@@ -20,6 +20,7 @@
 #include <linux/reservation.h>
 #include "etnaviv_drv.h"
 
+struct dma_fence;
 struct etnaviv_gem_ops;
 struct etnaviv_gem_object;
 
@@ -104,9 +105,10 @@ struct etnaviv_gem_submit {
 	struct drm_device *dev;
 	struct etnaviv_gpu *gpu;
 	struct ww_acquire_ctx ticket;
-	u32 fence;
+	struct dma_fence *fence;
 	unsigned int nr_bos;
 	struct etnaviv_gem_submit_bo bos[0];
+	u32 flags;
 };
 
 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,

+ 63 - 2
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c

@@ -14,7 +14,9 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/dma-fence-array.h>
 #include <linux/reservation.h>
+#include <linux/sync_file.h>
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gpu.h"
@@ -169,8 +171,10 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
 		bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
+		bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
 
-		ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
+		ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
+						 explicit);
 		if (ret)
 			break;
 	}
@@ -290,6 +294,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
 	}
 
 	ww_acquire_fini(&submit->ticket);
+	dma_fence_put(submit->fence);
 	kfree(submit);
 }
 
@@ -303,6 +308,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
 	struct etnaviv_gem_submit *submit;
 	struct etnaviv_cmdbuf *cmdbuf;
 	struct etnaviv_gpu *gpu;
+	struct dma_fence *in_fence = NULL;
+	struct sync_file *sync_file = NULL;
+	int out_fence_fd = -1;
 	void *stream;
 	int ret;
 
@@ -326,6 +334,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 
+	if (args->flags & ~ETNA_SUBMIT_FLAGS) {
+		DRM_ERROR("invalid flags: 0x%x\n", args->flags);
+		return -EINVAL;
+	}
+
 	/*
 	 * Copy the command submission and bo array to kernel space in
 	 * one go, and do this outside of any locks.
@@ -365,12 +378,22 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
 		goto err_submit_cmds;
 	}
 
+	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+		if (out_fence_fd < 0) {
+			ret = out_fence_fd;
+			goto err_submit_cmds;
+		}
+	}
+
 	submit = submit_create(dev, gpu, args->nr_bos);
 	if (!submit) {
 		ret = -ENOMEM;
 		goto err_submit_cmds;
 	}
 
+	submit->flags = args->flags;
+
 	ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
 	if (ret)
 		goto err_submit_objects;
@@ -385,6 +408,24 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
 		goto err_submit_objects;
 	}
 
+	if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
+		in_fence = sync_file_get_fence(args->fence_fd);
+		if (!in_fence) {
+			ret = -EINVAL;
+			goto err_submit_objects;
+		}
+
+		/*
+		 * Wait if the fence is from a foreign context, or if the fence
+		 * array contains any fence from a foreign context.
+		 */
+		if (!dma_fence_match_context(in_fence, gpu->fence_context)) {
+			ret = dma_fence_wait(in_fence, true);
+			if (ret)
+				goto err_submit_objects;
+		}
+	}
+
 	ret = submit_fence_sync(submit);
 	if (ret)
 		goto err_submit_objects;
@@ -405,7 +446,23 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
 	if (ret == 0)
 		cmdbuf = NULL;
 
-	args->fence = submit->fence;
+	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+		/*
+		 * This can be improved: ideally we want to allocate the sync
+		 * file before kicking off the GPU job and just attach the
+		 * fence to the sync file here, eliminating the ENOMEM
+		 * possibility at this stage.
+		 */
+		sync_file = sync_file_create(submit->fence);
+		if (!sync_file) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		fd_install(out_fence_fd, sync_file->file);
+	}
+
+	args->fence_fd = out_fence_fd;
+	args->fence = submit->fence->seqno;
 
 out:
 	submit_unpin_objects(submit);
@@ -419,9 +476,13 @@ out:
 		flush_workqueue(priv->wq);
 
 err_submit_objects:
+	if (in_fence)
+		dma_fence_put(in_fence);
 	submit_cleanup(submit);
 
 err_submit_cmds:
+	if (ret && (out_fence_fd >= 0))
+		put_unused_fd(out_fence_fd);
 	/* if we still own the cmdbuf */
 	if (cmdbuf)
 		etnaviv_cmdbuf_free(cmdbuf);

+ 81 - 18
drivers/gpu/drm/etnaviv/etnaviv_gpu.c

@@ -18,6 +18,7 @@
 #include <linux/dma-fence.h>
 #include <linux/moduleparam.h>
 #include <linux/of_device.h>
+#include <linux/thermal.h>
 
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_dump.h"
@@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
 	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
 }
 
+static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
+{
+	unsigned int fscale = 1 << (6 - gpu->freq_scale);
+	u32 clock;
+
+	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+
+	etnaviv_gpu_load_clock(gpu, clock);
+}
+
 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 {
 	u32 control, idle;
@@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 	timeout = jiffies + msecs_to_jiffies(1000);
 
 	while (time_is_after_jiffies(timeout)) {
-		control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-			  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
 		/* enable clock */
-		etnaviv_gpu_load_clock(gpu, control);
+		etnaviv_gpu_update_clock(gpu);
+
+		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 
 		/* Wait for stable clock.  Vivante's code waited for 1ms */
 		usleep_range(1000, 10000);
@@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 	}
 
 	/* We rely on the GPU running, so program the clock */
-	control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-		  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
-	/* enable clock */
-	etnaviv_gpu_load_clock(gpu, control);
+	etnaviv_gpu_update_clock(gpu);
 
 	return 0;
 }
@@ -1051,6 +1058,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
 {
 	struct etnaviv_fence *f;
 
+	/*
+	 * GPU lock must already be held, otherwise fence completion order might
+	 * not match the seqno order assigned here.
+	 */
+	lockdep_assert_held(&gpu->lock);
+
 	f = kzalloc(sizeof(*f), GFP_KERNEL);
 	if (!f)
 		return NULL;
@@ -1064,7 +1077,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
 }
 
 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
-	unsigned int context, bool exclusive)
+	unsigned int context, bool exclusive, bool explicit)
 {
 	struct reservation_object *robj = etnaviv_obj->resv;
 	struct reservation_object_list *fobj;
@@ -1077,6 +1090,9 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
 			return ret;
 	}
 
+	if (explicit)
+		return 0;
+
 	/*
 	 * If we have any shared fences, then the exclusive fence
 	 * should be ignored as it will already have been signalled.
@@ -1321,8 +1337,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 	mutex_lock(&gpu->lock);
 
 	gpu->event[event].fence = fence;
-	submit->fence = fence->seqno;
-	gpu->active_fence = submit->fence;
+	submit->fence = dma_fence_get(fence);
+	gpu->active_fence = submit->fence->seqno;
 
 	if (gpu->lastctx != cmdbuf->ctx) {
 		gpu->mmu->need_flush = true;
@@ -1526,17 +1542,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
 #ifdef CONFIG_PM
 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
 {
-	u32 clock;
 	int ret;
 
 	ret = mutex_lock_killable(&gpu->lock);
 	if (ret)
 		return ret;
 
-	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
-	etnaviv_gpu_load_clock(gpu, clock);
+	etnaviv_gpu_update_clock(gpu);
 	etnaviv_gpu_hw_init(gpu);
 
 	gpu->switch_context = true;
@@ -1548,6 +1560,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
 }
 #endif
 
+static int
+etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
+				  unsigned long *state)
+{
+	*state = 6;
+
+	return 0;
+}
+
+static int
+etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+				  unsigned long *state)
+{
+	struct etnaviv_gpu *gpu = cdev->devdata;
+
+	*state = gpu->freq_scale;
+
+	return 0;
+}
+
+static int
+etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+				  unsigned long state)
+{
+	struct etnaviv_gpu *gpu = cdev->devdata;
+
+	mutex_lock(&gpu->lock);
+	gpu->freq_scale = state;
+	if (!pm_runtime_suspended(gpu->dev))
+		etnaviv_gpu_update_clock(gpu);
+	mutex_unlock(&gpu->lock);
+
+	return 0;
+}
+
+static struct thermal_cooling_device_ops cooling_ops = {
+	.get_max_state = etnaviv_gpu_cooling_get_max_state,
+	.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
+	.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
+};
+
 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 	void *data)
 {
@@ -1556,13 +1609,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
 	int ret;
 
+	gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+				(char *)dev_name(dev), gpu, &cooling_ops);
+	if (IS_ERR(gpu->cooling))
+		return PTR_ERR(gpu->cooling);
+
 #ifdef CONFIG_PM
 	ret = pm_runtime_get_sync(gpu->dev);
 #else
 	ret = etnaviv_gpu_clk_enable(gpu);
 #endif
-	if (ret < 0)
+	if (ret < 0) {
+		thermal_cooling_device_unregister(gpu->cooling);
 		return ret;
+	}
 
 	gpu->drm = drm;
 	gpu->fence_context = dma_fence_context_alloc(1);
@@ -1616,6 +1676,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
 	}
 
 	gpu->drm = NULL;
+
+	thermal_cooling_device_unregister(gpu->cooling);
+	gpu->cooling = NULL;
 }
 
 static const struct component_ops gpu_ops = {

+ 3 - 1
drivers/gpu/drm/etnaviv/etnaviv_gpu.h

@@ -97,6 +97,7 @@ struct etnaviv_cmdbuf;
 
 struct etnaviv_gpu {
 	struct drm_device *drm;
+	struct thermal_cooling_device *cooling;
 	struct device *dev;
 	struct mutex lock;
 	struct etnaviv_chip_identity identity;
@@ -150,6 +151,7 @@ struct etnaviv_gpu {
 	u32 hangcheck_fence;
 	u32 hangcheck_dma_addr;
 	struct work_struct recover_work;
+	unsigned int freq_scale;
 };
 
 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
@@ -181,7 +183,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
 #endif
 
 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
-	unsigned int context, bool exclusive);
+	unsigned int context, bool exclusive, bool implicit);
 
 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,

+ 8 - 0
include/uapi/drm/etnaviv_drm.h

@@ -154,6 +154,12 @@ struct drm_etnaviv_gem_submit_bo {
  * one or more cmdstream buffers.  This allows for conditional execution
  * (context-restore), and IB buffers needed for per tile/bin draw cmds.
  */
+#define ETNA_SUBMIT_NO_IMPLICIT         0x0001
+#define ETNA_SUBMIT_FENCE_FD_IN         0x0002
+#define ETNA_SUBMIT_FENCE_FD_OUT        0x0004
+#define ETNA_SUBMIT_FLAGS		(ETNA_SUBMIT_NO_IMPLICIT | \
+					 ETNA_SUBMIT_FENCE_FD_IN | \
+					 ETNA_SUBMIT_FENCE_FD_OUT)
 #define ETNA_PIPE_3D      0x00
 #define ETNA_PIPE_2D      0x01
 #define ETNA_PIPE_VG      0x02
@@ -167,6 +173,8 @@ struct drm_etnaviv_gem_submit {
 	__u64 bos;            /* in, ptr to array of submit_bo's */
 	__u64 relocs;         /* in, ptr to array of submit_reloc's */
 	__u64 stream;         /* in, ptr to cmdstream */
+	__u32 flags;          /* in, mask of ETNA_SUBMIT_x */
+	__s32 fence_fd;       /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
 };
 
 /* The normal way to synchronize with the GPU is just to CPU_PREP on