Browse Source

Merge branch 'drm-vmwgfx-next' of git://people.freedesktop.org/~syeh/repos_linux into drm-next

vmwgfx add fence fd support.

* 'drm-vmwgfx-next' of git://people.freedesktop.org/~syeh/repos_linux:
  drm/vmwgfx: Bump the version for fence FD support
  drm/vmwgfx: Add export fence to file descriptor support
  drm/vmwgfx: Add support for imported Fence File Descriptor
  drm/vmwgfx: Prepare to support fence fd
  drm/vmwgfx: Fix incorrect command header offset at restart
  drm/vmwgfx: Support the NOP_ERROR command
  drm/vmwgfx: Restart command buffers after errors
  drm/vmwgfx: Move irq bottom half processing to threads
  drm/vmwgfx: Don't use drm_irq_[un]install
Dave Airlie 8 years ago
parent
commit
7846b12fe0

+ 178 - 64
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c

@@ -51,6 +51,7 @@ struct vmw_cmdbuf_context {
 	struct list_head hw_submitted;
 	struct list_head hw_submitted;
 	struct list_head preempted;
 	struct list_head preempted;
 	unsigned num_hw_submitted;
 	unsigned num_hw_submitted;
+	bool block_submission;
 };
 };
 
 
 /**
 /**
@@ -60,6 +61,9 @@ struct vmw_cmdbuf_context {
  * kernel command submissions, @cur.
  * kernel command submissions, @cur.
  * @space_mutex: Mutex to protect against starvation when we allocate
  * @space_mutex: Mutex to protect against starvation when we allocate
  * main pool buffer space.
  * main pool buffer space.
+ * @error_mutex: Mutex to serialize the work queue error handling.
+ * Note this is not needed if the same workqueue handler
+ * can't race with itself...
  * @work: A struct work_struct implementeing command buffer error handling.
  * @work: A struct work_struct implementeing command buffer error handling.
  * Immutable.
  * Immutable.
  * @dev_priv: Pointer to the device private struct. Immutable.
  * @dev_priv: Pointer to the device private struct. Immutable.
@@ -85,7 +89,6 @@ struct vmw_cmdbuf_context {
  * Internal protection.
  * Internal protection.
  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
  * space for inline data. Internal protection.
  * space for inline data. Internal protection.
- * @tasklet: Tasklet struct for irq processing. Immutable.
  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
  * space.
  * space.
  * @idle_queue: Wait queue for processes waiting for command buffer idle.
  * @idle_queue: Wait queue for processes waiting for command buffer idle.
@@ -102,6 +105,7 @@ struct vmw_cmdbuf_context {
 struct vmw_cmdbuf_man {
 struct vmw_cmdbuf_man {
 	struct mutex cur_mutex;
 	struct mutex cur_mutex;
 	struct mutex space_mutex;
 	struct mutex space_mutex;
+	struct mutex error_mutex;
 	struct work_struct work;
 	struct work_struct work;
 	struct vmw_private *dev_priv;
 	struct vmw_private *dev_priv;
 	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
 	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
@@ -117,7 +121,6 @@ struct vmw_cmdbuf_man {
 	spinlock_t lock;
 	spinlock_t lock;
 	struct dma_pool *headers;
 	struct dma_pool *headers;
 	struct dma_pool *dheaders;
 	struct dma_pool *dheaders;
-	struct tasklet_struct tasklet;
 	wait_queue_head_t alloc_queue;
 	wait_queue_head_t alloc_queue;
 	wait_queue_head_t idle_queue;
 	wait_queue_head_t idle_queue;
 	bool irq_on;
 	bool irq_on;
@@ -181,12 +184,13 @@ struct vmw_cmdbuf_alloc_info {
 };
 };
 
 
 /* Loop over each context in the command buffer manager. */
 /* Loop over each context in the command buffer manager. */
-#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
+#define for_each_cmdbuf_ctx(_man, _i, _ctx)				\
 	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
 	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
 	     ++(_i), ++(_ctx))
 	     ++(_i), ++(_ctx))
 
 
-static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
-
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
+				bool enable);
+static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
 
 
 /**
 /**
  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
@@ -278,9 +282,9 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 		vmw_cmdbuf_header_inline_free(header);
 		vmw_cmdbuf_header_inline_free(header);
 		return;
 		return;
 	}
 	}
-	spin_lock_bh(&man->lock);
+	spin_lock(&man->lock);
 	__vmw_cmdbuf_header_free(header);
 	__vmw_cmdbuf_header_free(header);
-	spin_unlock_bh(&man->lock);
+	spin_unlock(&man->lock);
 }
 }
 
 
 
 
@@ -331,7 +335,8 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
 				  struct vmw_cmdbuf_context *ctx)
 				  struct vmw_cmdbuf_context *ctx)
 {
 {
 	while (ctx->num_hw_submitted < man->max_hw_submitted &&
 	while (ctx->num_hw_submitted < man->max_hw_submitted &&
-	      !list_empty(&ctx->submitted)) {
+	       !list_empty(&ctx->submitted) &&
+	       !ctx->block_submission) {
 		struct vmw_cmdbuf_header *entry;
 		struct vmw_cmdbuf_header *entry;
 		SVGACBStatus status;
 		SVGACBStatus status;
 
 
@@ -386,12 +391,17 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
 			__vmw_cmdbuf_header_free(entry);
 			__vmw_cmdbuf_header_free(entry);
 			break;
 			break;
 		case SVGA_CB_STATUS_COMMAND_ERROR:
 		case SVGA_CB_STATUS_COMMAND_ERROR:
-		case SVGA_CB_STATUS_CB_HEADER_ERROR:
+			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 			list_add_tail(&entry->list, &man->error);
 			list_add_tail(&entry->list, &man->error);
 			schedule_work(&man->work);
 			schedule_work(&man->work);
 			break;
 			break;
 		case SVGA_CB_STATUS_PREEMPTED:
 		case SVGA_CB_STATUS_PREEMPTED:
-			list_add(&entry->list, &ctx->preempted);
+			entry->cb_header->status = SVGA_CB_STATUS_NONE;
+			list_add_tail(&entry->list, &ctx->preempted);
+			break;
+		case SVGA_CB_STATUS_CB_HEADER_ERROR:
+			WARN_ONCE(true, "Command buffer header error.\n");
+			__vmw_cmdbuf_header_free(entry);
 			break;
 			break;
 		default:
 		default:
 			WARN_ONCE(true, "Undefined command buffer status.\n");
 			WARN_ONCE(true, "Undefined command buffer status.\n");
@@ -468,20 +478,17 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
 }
 }
 
 
 /**
 /**
- * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
- * handler implemented as a tasklet.
+ * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
+ * handler implemented as a threaded irq task.
  *
  *
- * @data: Tasklet closure. A pointer to the command buffer manager cast to
- * an unsigned long.
+ * @man: Pointer to the command buffer manager.
  *
  *
- * The bottom half (tasklet) of the interrupt handler simply calls into the
+ * The bottom half of the interrupt handler simply calls into the
  * command buffer processor to free finished buffers and submit any
  * command buffer processor to free finished buffers and submit any
  * queued buffers to hardware.
  * queued buffers to hardware.
  */
  */
-static void vmw_cmdbuf_man_tasklet(unsigned long data)
+void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
 {
 {
-	struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
-
 	spin_lock(&man->lock);
 	spin_lock(&man->lock);
 	vmw_cmdbuf_man_process(man);
 	vmw_cmdbuf_man_process(man);
 	spin_unlock(&man->lock);
 	spin_unlock(&man->lock);
@@ -502,24 +509,112 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
 		container_of(work, struct vmw_cmdbuf_man, work);
 		container_of(work, struct vmw_cmdbuf_man, work);
 	struct vmw_cmdbuf_header *entry, *next;
 	struct vmw_cmdbuf_header *entry, *next;
 	uint32_t dummy;
 	uint32_t dummy;
-	bool restart = false;
+	bool restart[SVGA_CB_CONTEXT_MAX];
+	bool send_fence = false;
+	struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
+	int i;
+	struct vmw_cmdbuf_context *ctx;
 
 
-	spin_lock_bh(&man->lock);
+	for_each_cmdbuf_ctx(man, i, ctx) {
+		INIT_LIST_HEAD(&restart_head[i]);
+		restart[i] = false;
+	}
+
+	mutex_lock(&man->error_mutex);
+	spin_lock(&man->lock);
 	list_for_each_entry_safe(entry, next, &man->error, list) {
 	list_for_each_entry_safe(entry, next, &man->error, list) {
-		restart = true;
-		DRM_ERROR("Command buffer error.\n");
+		SVGACBHeader *cb_hdr = entry->cb_header;
+		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
+			(entry->cmd + cb_hdr->errorOffset);
+		u32 error_cmd_size, new_start_offset;
+		const char *cmd_name;
+
+		list_del_init(&entry->list);
+		restart[entry->cb_context] = true;
+
+		if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
+			DRM_ERROR("Unknown command causing device error.\n");
+			DRM_ERROR("Command buffer offset is %lu\n",
+				  (unsigned long) cb_hdr->errorOffset);
+			__vmw_cmdbuf_header_free(entry);
+			send_fence = true;
+			continue;
+		}
 
 
-		list_del(&entry->list);
-		__vmw_cmdbuf_header_free(entry);
-		wake_up_all(&man->idle_queue);
+		DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
+		DRM_ERROR("Command buffer offset is %lu\n",
+			  (unsigned long) cb_hdr->errorOffset);
+		DRM_ERROR("Command size is %lu\n",
+			  (unsigned long) error_cmd_size);
+
+		new_start_offset = cb_hdr->errorOffset + error_cmd_size;
+
+		if (new_start_offset >= cb_hdr->length) {
+			__vmw_cmdbuf_header_free(entry);
+			send_fence = true;
+			continue;
+		}
+
+		if (man->using_mob)
+			cb_hdr->ptr.mob.mobOffset += new_start_offset;
+		else
+			cb_hdr->ptr.pa += (u64) new_start_offset;
+
+		entry->cmd += new_start_offset;
+		cb_hdr->length -= new_start_offset;
+		cb_hdr->errorOffset = 0;
+		cb_hdr->offset = 0;
+		list_add_tail(&entry->list, &restart_head[entry->cb_context]);
+		man->ctx[entry->cb_context].block_submission = true;
+	}
+	spin_unlock(&man->lock);
+
+	/* Preempt all contexts with errors */
+	for_each_cmdbuf_ctx(man, i, ctx) {
+		if (ctx->block_submission && vmw_cmdbuf_preempt(man, i))
+			DRM_ERROR("Failed preempting command buffer "
+				  "context %u.\n", i);
+	}
+
+	spin_lock(&man->lock);
+	for_each_cmdbuf_ctx(man, i, ctx) {
+		if (!ctx->block_submission)
+			continue;
+
+		/* Move preempted command buffers to the preempted queue. */
+		vmw_cmdbuf_ctx_process(man, ctx, &dummy);
+
+		/*
+		 * Add the preempted queue after the command buffer
+		 * that caused an error.
+		 */
+		list_splice_init(&ctx->preempted, restart_head[i].prev);
+
+		/*
+		 * Finally add all command buffers first in the submitted
+		 * queue, to rerun them.
+		 */
+		list_splice_init(&restart_head[i], &ctx->submitted);
+
+		ctx->block_submission = false;
 	}
 	}
-	spin_unlock_bh(&man->lock);
 
 
-	if (restart && vmw_cmdbuf_startstop(man, true))
-		DRM_ERROR("Failed restarting command buffer context 0.\n");
+	vmw_cmdbuf_man_process(man);
+	spin_unlock(&man->lock);
+
+	for_each_cmdbuf_ctx(man, i, ctx) {
+		if (restart[i] && vmw_cmdbuf_startstop(man, i, true))
+			DRM_ERROR("Failed restarting command buffer "
+				  "context %u.\n", i);
+	}
 
 
 	/* Send a new fence in case one was removed */
 	/* Send a new fence in case one was removed */
-	vmw_fifo_send_fence(man->dev_priv, &dummy);
+	if (send_fence) {
+		vmw_fifo_send_fence(man->dev_priv, &dummy);
+		wake_up_all(&man->idle_queue);
+	}
+
+	mutex_unlock(&man->error_mutex);
 }
 }
 
 
 /**
 /**
@@ -536,7 +631,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 	bool idle = false;
 	bool idle = false;
 	int i;
 	int i;
 
 
-	spin_lock_bh(&man->lock);
+	spin_lock(&man->lock);
 	vmw_cmdbuf_man_process(man);
 	vmw_cmdbuf_man_process(man);
 	for_each_cmdbuf_ctx(man, i, ctx) {
 	for_each_cmdbuf_ctx(man, i, ctx) {
 		if (!list_empty(&ctx->submitted) ||
 		if (!list_empty(&ctx->submitted) ||
@@ -548,7 +643,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 	idle = list_empty(&man->error);
 	idle = list_empty(&man->error);
 
 
 out_unlock:
 out_unlock:
-	spin_unlock_bh(&man->lock);
+	spin_unlock(&man->lock);
 
 
 	return idle;
 	return idle;
 }
 }
@@ -571,7 +666,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
 	if (!cur)
 	if (!cur)
 		return;
 		return;
 
 
-	spin_lock_bh(&man->lock);
+	spin_lock(&man->lock);
 	if (man->cur_pos == 0) {
 	if (man->cur_pos == 0) {
 		__vmw_cmdbuf_header_free(cur);
 		__vmw_cmdbuf_header_free(cur);
 		goto out_unlock;
 		goto out_unlock;
@@ -580,7 +675,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
 	man->cur->cb_header->length = man->cur_pos;
 	man->cur->cb_header->length = man->cur_pos;
 	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
 	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
 out_unlock:
 out_unlock:
-	spin_unlock_bh(&man->lock);
+	spin_unlock(&man->lock);
 	man->cur = NULL;
 	man->cur = NULL;
 	man->cur_pos = 0;
 	man->cur_pos = 0;
 }
 }
@@ -673,14 +768,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
 		return true;
 		return true;
  
  
 	memset(info->node, 0, sizeof(*info->node));
 	memset(info->node, 0, sizeof(*info->node));
-	spin_lock_bh(&man->lock);
+	spin_lock(&man->lock);
 	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 	if (ret) {
 	if (ret) {
 		vmw_cmdbuf_man_process(man);
 		vmw_cmdbuf_man_process(man);
 		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 	}
 	}
 
 
-	spin_unlock_bh(&man->lock);
+	spin_unlock(&man->lock);
 	info->done = !ret;
 	info->done = !ret;
 
 
 	return info->done;
 	return info->done;
@@ -801,9 +896,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
 	return 0;
 	return 0;
 
 
 out_no_cb_header:
 out_no_cb_header:
-	spin_lock_bh(&man->lock);
+	spin_lock(&man->lock);
 	drm_mm_remove_node(&header->node);
 	drm_mm_remove_node(&header->node);
-	spin_unlock_bh(&man->lock);
+	spin_unlock(&man->lock);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -1023,18 +1118,6 @@ void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
 	vmw_cmdbuf_cur_unlock(man);
 	vmw_cmdbuf_cur_unlock(man);
 }
 }
 
 
-/**
- * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
- *
- * @man: The command buffer manager.
- */
-void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
-{
-	if (!man)
-		return;
-
-	tasklet_schedule(&man->tasklet);
-}
 
 
 /**
 /**
  * vmw_cmdbuf_send_device_command - Send a command through the device context.
  * vmw_cmdbuf_send_device_command - Send a command through the device context.
@@ -1059,9 +1142,9 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
 	memcpy(cmd, command, size);
 	memcpy(cmd, command, size);
 	header->cb_header->length = size;
 	header->cb_header->length = size;
 	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
 	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
-	spin_lock_bh(&man->lock);
+	spin_lock(&man->lock);
 	status = vmw_cmdbuf_header_submit(header);
 	status = vmw_cmdbuf_header_submit(header);
-	spin_unlock_bh(&man->lock);
+	spin_unlock(&man->lock);
 	vmw_cmdbuf_header_free(header);
 	vmw_cmdbuf_header_free(header);
 
 
 	if (status != SVGA_CB_STATUS_COMPLETED) {
 	if (status != SVGA_CB_STATUS_COMPLETED) {
@@ -1073,6 +1156,29 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ * vmw_cmdbuf_preempt - Send a preempt command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ *
+ * Synchronously sends a preempt command.
+ */
+static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
+{
+	struct {
+		uint32 id;
+		SVGADCCmdPreempt body;
+	} __packed cmd;
+
+	cmd.id = SVGA_DC_CMD_PREEMPT;
+	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
+	cmd.body.ignoreIDZero = 0;
+
+	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+
 /**
 /**
  * vmw_cmdbuf_startstop - Send a start / stop command through the device
  * vmw_cmdbuf_startstop - Send a start / stop command through the device
  * context.
  * context.
@@ -1082,7 +1188,7 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
  *
  *
  * Synchronously sends a device start / stop context command.
  * Synchronously sends a device start / stop context command.
  */
  */
-static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
 				bool enable)
 				bool enable)
 {
 {
 	struct {
 	struct {
@@ -1092,7 +1198,7 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
 
 
 	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
 	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
 	cmd.body.enable = (enable) ? 1 : 0;
 	cmd.body.enable = (enable) ? 1 : 0;
-	cmd.body.context = SVGA_CB_CONTEXT_0;
+	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
 
 
 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
 }
 }
@@ -1191,7 +1297,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
 {
 {
 	struct vmw_cmdbuf_man *man;
 	struct vmw_cmdbuf_man *man;
 	struct vmw_cmdbuf_context *ctx;
 	struct vmw_cmdbuf_context *ctx;
-	int i;
+	unsigned int i;
 	int ret;
 	int ret;
 
 
 	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
 	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
@@ -1226,8 +1332,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
 	spin_lock_init(&man->lock);
 	spin_lock_init(&man->lock);
 	mutex_init(&man->cur_mutex);
 	mutex_init(&man->cur_mutex);
 	mutex_init(&man->space_mutex);
 	mutex_init(&man->space_mutex);
-	tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
-		     (unsigned long) man);
+	mutex_init(&man->error_mutex);
 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
 	init_waitqueue_head(&man->alloc_queue);
 	init_waitqueue_head(&man->alloc_queue);
 	init_waitqueue_head(&man->idle_queue);
 	init_waitqueue_head(&man->idle_queue);
@@ -1236,11 +1341,14 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
 	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
 	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
 			       &dev_priv->error_waiters);
 			       &dev_priv->error_waiters);
-	ret = vmw_cmdbuf_startstop(man, true);
-	if (ret) {
-		DRM_ERROR("Failed starting command buffer context 0.\n");
-		vmw_cmdbuf_man_destroy(man);
-		return ERR_PTR(ret);
+	for_each_cmdbuf_ctx(man, i, ctx) {
+		ret = vmw_cmdbuf_startstop(man, i, true);
+		if (ret) {
+			DRM_ERROR("Failed starting command buffer "
+				  "context %u.\n", i);
+			vmw_cmdbuf_man_destroy(man);
+			return ERR_PTR(ret);
+		}
 	}
 	}
 
 
 	return man;
 	return man;
@@ -1290,18 +1398,24 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
  */
  */
 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
 {
 {
+	struct vmw_cmdbuf_context *ctx;
+	unsigned int i;
+
 	WARN_ON_ONCE(man->has_pool);
 	WARN_ON_ONCE(man->has_pool);
 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
-	if (vmw_cmdbuf_startstop(man, false))
-		DRM_ERROR("Failed stopping command buffer context 0.\n");
+
+	for_each_cmdbuf_ctx(man, i, ctx)
+		if (vmw_cmdbuf_startstop(man, i, false))
+			DRM_ERROR("Failed stopping command buffer "
+				  "context %u.\n", i);
 
 
 	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
 	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
 				  &man->dev_priv->error_waiters);
 				  &man->dev_priv->error_waiters);
-	tasklet_kill(&man->tasklet);
 	(void) cancel_work_sync(&man->work);
 	(void) cancel_work_sync(&man->work);
 	dma_pool_destroy(man->dheaders);
 	dma_pool_destroy(man->dheaders);
 	dma_pool_destroy(man->headers);
 	dma_pool_destroy(man->headers);
 	mutex_destroy(&man->cur_mutex);
 	mutex_destroy(&man->cur_mutex);
 	mutex_destroy(&man->space_mutex);
 	mutex_destroy(&man->space_mutex);
+	mutex_destroy(&man->error_mutex);
 	kfree(man);
 	kfree(man);
 }
 }

+ 3 - 8
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c

@@ -36,7 +36,6 @@
 #include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_module.h>
 #include <linux/dma_remapping.h>
 #include <linux/dma_remapping.h>
 
 
-#define VMWGFX_DRIVER_NAME "vmwgfx"
 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
 #define VMWGFX_CHIP_SVGAII 0
 #define VMWGFX_CHIP_SVGAII 0
 #define VMW_FB_RESERVATION 0
 #define VMW_FB_RESERVATION 0
@@ -825,7 +824,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	}
 	}
 
 
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
-		ret = drm_irq_install(dev, dev->pdev->irq);
+		ret = vmw_irq_install(dev, dev->pdev->irq);
 		if (ret != 0) {
 		if (ret != 0) {
 			DRM_ERROR("Failed installing irq: %d\n", ret);
 			DRM_ERROR("Failed installing irq: %d\n", ret);
 			goto out_no_irq;
 			goto out_no_irq;
@@ -937,7 +936,7 @@ out_no_bdev:
 	vmw_fence_manager_takedown(dev_priv->fman);
 	vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
 out_no_fman:
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-		drm_irq_uninstall(dev_priv->dev);
+		vmw_irq_uninstall(dev_priv->dev);
 out_no_irq:
 out_no_irq:
 	if (dev_priv->stealth)
 	if (dev_priv->stealth)
 		pci_release_region(dev->pdev, 2);
 		pci_release_region(dev->pdev, 2);
@@ -990,7 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev)
 	vmw_release_device_late(dev_priv);
 	vmw_release_device_late(dev_priv);
 	vmw_fence_manager_takedown(dev_priv->fman);
 	vmw_fence_manager_takedown(dev_priv->fman);
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-		drm_irq_uninstall(dev_priv->dev);
+		vmw_irq_uninstall(dev_priv->dev);
 	if (dev_priv->stealth)
 	if (dev_priv->stealth)
 		pci_release_region(dev->pdev, 2);
 		pci_release_region(dev->pdev, 2);
 	else
 	else
@@ -1516,10 +1515,6 @@ static struct drm_driver driver = {
 	.load = vmw_driver_load,
 	.load = vmw_driver_load,
 	.unload = vmw_driver_unload,
 	.unload = vmw_driver_unload,
 	.lastclose = vmw_lastclose,
 	.lastclose = vmw_lastclose,
-	.irq_preinstall = vmw_irq_preinstall,
-	.irq_postinstall = vmw_irq_postinstall,
-	.irq_uninstall = vmw_irq_uninstall,
-	.irq_handler = vmw_irq_handler,
 	.get_vblank_counter = vmw_get_vblank_counter,
 	.get_vblank_counter = vmw_get_vblank_counter,
 	.enable_vblank = vmw_enable_vblank,
 	.enable_vblank = vmw_enable_vblank,
 	.disable_vblank = vmw_disable_vblank,
 	.disable_vblank = vmw_disable_vblank,

+ 23 - 16
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h

@@ -40,10 +40,12 @@
 #include <drm/ttm/ttm_execbuf_util.h>
 #include <drm/ttm/ttm_execbuf_util.h>
 #include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 #include "vmwgfx_fence.h"
+#include <linux/sync_file.h>
 
 
-#define VMWGFX_DRIVER_DATE "20170607"
+#define VMWGFX_DRIVER_NAME "vmwgfx"
+#define VMWGFX_DRIVER_DATE "20170612"
 #define VMWGFX_DRIVER_MAJOR 2
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 13
+#define VMWGFX_DRIVER_MINOR 14
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -351,6 +353,12 @@ struct vmw_otable_batch {
 	struct ttm_buffer_object *otable_bo;
 	struct ttm_buffer_object *otable_bo;
 };
 };
 
 
+enum {
+	VMW_IRQTHREAD_FENCE,
+	VMW_IRQTHREAD_CMDBUF,
+	VMW_IRQTHREAD_MAX
+};
+
 struct vmw_private {
 struct vmw_private {
 	struct ttm_bo_device bdev;
 	struct ttm_bo_device bdev;
 	struct ttm_bo_global_ref bo_global_ref;
 	struct ttm_bo_global_ref bo_global_ref;
@@ -529,6 +537,7 @@ struct vmw_private {
 	struct vmw_otable_batch otable_batch;
 	struct vmw_otable_batch otable_batch;
 
 
 	struct vmw_cmdbuf_man *cman;
 	struct vmw_cmdbuf_man *cman;
+	DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
 };
 };
 
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -561,24 +570,21 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
 static inline void vmw_write(struct vmw_private *dev_priv,
 static inline void vmw_write(struct vmw_private *dev_priv,
 			     unsigned int offset, uint32_t value)
 			     unsigned int offset, uint32_t value)
 {
 {
-	unsigned long irq_flags;
-
-	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+	spin_lock(&dev_priv->hw_lock);
 	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
 	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
 	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
 	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
-	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+	spin_unlock(&dev_priv->hw_lock);
 }
 }
 
 
 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
 				unsigned int offset)
 				unsigned int offset)
 {
 {
-	unsigned long irq_flags;
 	u32 val;
 	u32 val;
 
 
-	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+	spin_lock(&dev_priv->hw_lock);
 	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
 	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
 	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
 	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
-	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+	spin_unlock(&dev_priv->hw_lock);
 
 
 	return val;
 	return val;
 }
 }
@@ -821,7 +827,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
 			       uint32_t dx_context_handle,
 			       uint32_t dx_context_handle,
 			       struct drm_vmw_fence_rep __user
 			       struct drm_vmw_fence_rep __user
 			       *user_fence_rep,
 			       *user_fence_rep,
-			       struct vmw_fence_obj **out_fence);
+			       struct vmw_fence_obj **out_fence,
+			       uint32_t flags);
 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 					    struct vmw_fence_obj *fence);
 					    struct vmw_fence_obj *fence);
 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
@@ -836,23 +843,23 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
 					struct drm_vmw_fence_rep __user
 					struct drm_vmw_fence_rep __user
 					*user_fence_rep,
 					*user_fence_rep,
 					struct vmw_fence_obj *fence,
 					struct vmw_fence_obj *fence,
-					uint32_t fence_handle);
+					uint32_t fence_handle,
+					int32_t out_fence_fd,
+					struct sync_file *sync_file);
 extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 				      struct ttm_buffer_object *bo,
 				      struct ttm_buffer_object *bo,
 				      bool interruptible,
 				      bool interruptible,
 				      bool validate_as_mob);
 				      bool validate_as_mob);
-
+bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
 
 
 /**
 /**
  * IRQs and wating - vmwgfx_irq.c
  * IRQs and wating - vmwgfx_irq.c
  */
  */
 
 
-extern irqreturn_t vmw_irq_handler(int irq, void *arg);
 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
 			  uint32_t seqno, bool interruptible,
 			  uint32_t seqno, bool interruptible,
 			  unsigned long timeout);
 			  unsigned long timeout);
-extern void vmw_irq_preinstall(struct drm_device *dev);
-extern int vmw_irq_postinstall(struct drm_device *dev);
+extern int vmw_irq_install(struct drm_device *dev, int irq);
 extern void vmw_irq_uninstall(struct drm_device *dev);
 extern void vmw_irq_uninstall(struct drm_device *dev);
 extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
 extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
 				uint32_t seqno);
 				uint32_t seqno);
@@ -1150,13 +1157,13 @@ extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
 			      struct vmw_cmdbuf_header *header,
 			      struct vmw_cmdbuf_header *header,
 			      bool flush);
 			      bool flush);
-extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
 			      size_t size, bool interruptible,
 			      size_t size, bool interruptible,
 			      struct vmw_cmdbuf_header **p_header);
 			      struct vmw_cmdbuf_header **p_header);
 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
 				bool interruptible);
 				bool interruptible);
+extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
 
 
 
 
 /**
 /**

+ 133 - 15
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c

@@ -24,6 +24,7 @@
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  *
  **************************************************************************/
  **************************************************************************/
+#include <linux/sync_file.h>
 
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_drv.h"
 #include "vmwgfx_reg.h"
 #include "vmwgfx_reg.h"
@@ -112,11 +113,12 @@ struct vmw_cmd_entry {
 	bool user_allow;
 	bool user_allow;
 	bool gb_disable;
 	bool gb_disable;
 	bool gb_enable;
 	bool gb_enable;
+	const char *cmd_name;
 };
 };
 
 
 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
-				       (_gb_disable), (_gb_enable)}
+				       (_gb_disable), (_gb_enable), #_cmd}
 
 
 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 					struct vmw_sw_context *sw_context,
 					struct vmw_sw_context *sw_context,
@@ -3302,6 +3304,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
 		    true, false, true),
 		    true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
 		    true, false, true),
 		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
+		    true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
 		    false, false, true),
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
@@ -3469,6 +3473,51 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
 		    true, false, true),
 		    true, false, true),
 };
 };
 
 
+bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
+{
+	u32 cmd_id = ((u32 *) buf)[0];
+
+	if (cmd_id >= SVGA_CMD_MAX) {
+		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
+		const struct vmw_cmd_entry *entry;
+
+		*size = header->size + sizeof(SVGA3dCmdHeader);
+		cmd_id = header->id;
+		if (cmd_id >= SVGA_3D_CMD_MAX)
+			return false;
+
+		cmd_id -= SVGA_3D_CMD_BASE;
+		entry = &vmw_cmd_entries[cmd_id];
+		*cmd = entry->cmd_name;
+		return true;
+	}
+
+	switch (cmd_id) {
+	case SVGA_CMD_UPDATE:
+		*cmd = "SVGA_CMD_UPDATE";
+		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
+		break;
+	case SVGA_CMD_DEFINE_GMRFB:
+		*cmd = "SVGA_CMD_DEFINE_GMRFB";
+		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
+		break;
+	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
+		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+		break;
+	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
+		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+		break;
+	default:
+		*cmd = "UNKNOWN";
+		*size = 0;
+		return false;
+	}
+
+	return true;
+}
+
 static int vmw_cmd_check(struct vmw_private *dev_priv,
 static int vmw_cmd_check(struct vmw_private *dev_priv,
 			 struct vmw_sw_context *sw_context,
 			 struct vmw_sw_context *sw_context,
 			 void *buf, uint32_t *size)
 			 void *buf, uint32_t *size)
@@ -3781,6 +3830,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
  * which the information should be copied.
  * which the information should be copied.
  * @fence: Pointer to the fenc object.
  * @fence: Pointer to the fenc object.
  * @fence_handle: User-space fence handle.
  * @fence_handle: User-space fence handle.
+ * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
+ * @sync_file:  Only used to clean up in case of an error in this function.
  *
  *
  * This function copies fence information to user-space. If copying fails,
  * This function copies fence information to user-space. If copying fails,
  * The user-space struct drm_vmw_fence_rep::error member is hopefully
  * The user-space struct drm_vmw_fence_rep::error member is hopefully
@@ -3796,7 +3847,9 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
 			    int ret,
 			    int ret,
 			    struct drm_vmw_fence_rep __user *user_fence_rep,
 			    struct drm_vmw_fence_rep __user *user_fence_rep,
 			    struct vmw_fence_obj *fence,
 			    struct vmw_fence_obj *fence,
-			    uint32_t fence_handle)
+			    uint32_t fence_handle,
+			    int32_t out_fence_fd,
+			    struct sync_file *sync_file)
 {
 {
 	struct drm_vmw_fence_rep fence_rep;
 	struct drm_vmw_fence_rep fence_rep;
 
 
@@ -3806,6 +3859,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
 	memset(&fence_rep, 0, sizeof(fence_rep));
 	memset(&fence_rep, 0, sizeof(fence_rep));
 
 
 	fence_rep.error = ret;
 	fence_rep.error = ret;
+	fence_rep.fd = out_fence_fd;
 	if (ret == 0) {
 	if (ret == 0) {
 		BUG_ON(fence == NULL);
 		BUG_ON(fence == NULL);
 
 
@@ -3828,6 +3882,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
 	 * and unreference the handle.
 	 * and unreference the handle.
 	 */
 	 */
 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
+		if (sync_file)
+			fput(sync_file->file);
+
+		if (fence_rep.fd != -1) {
+			put_unused_fd(fence_rep.fd);
+			fence_rep.fd = -1;
+		}
+
 		ttm_ref_object_base_unref(vmw_fp->tfile,
 		ttm_ref_object_base_unref(vmw_fp->tfile,
 					  fence_handle, TTM_REF_USAGE);
 					  fence_handle, TTM_REF_USAGE);
 		DRM_ERROR("Fence copy error. Syncing.\n");
 		DRM_ERROR("Fence copy error. Syncing.\n");
@@ -4003,7 +4065,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 			uint64_t throttle_us,
 			uint64_t throttle_us,
 			uint32_t dx_context_handle,
 			uint32_t dx_context_handle,
 			struct drm_vmw_fence_rep __user *user_fence_rep,
 			struct drm_vmw_fence_rep __user *user_fence_rep,
-			struct vmw_fence_obj **out_fence)
+			struct vmw_fence_obj **out_fence,
+			uint32_t flags)
 {
 {
 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
 	struct vmw_fence_obj *fence = NULL;
 	struct vmw_fence_obj *fence = NULL;
@@ -4013,20 +4076,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	struct ww_acquire_ctx ticket;
 	struct ww_acquire_ctx ticket;
 	uint32_t handle;
 	uint32_t handle;
 	int ret;
 	int ret;
+	int32_t out_fence_fd = -1;
+	struct sync_file *sync_file = NULL;
+
+
+	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+		if (out_fence_fd < 0) {
+			DRM_ERROR("Failed to get a fence file descriptor.\n");
+			return out_fence_fd;
+		}
+	}
 
 
 	if (throttle_us) {
 	if (throttle_us) {
 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
 				   throttle_us);
 				   throttle_us);
 
 
 		if (ret)
 		if (ret)
-			return ret;
+			goto out_free_fence_fd;
 	}
 	}
 
 
 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
 					     kernel_commands, command_size,
 					     kernel_commands, command_size,
 					     &header);
 					     &header);
-	if (IS_ERR(kernel_commands))
-		return PTR_ERR(kernel_commands);
+	if (IS_ERR(kernel_commands)) {
+		ret = PTR_ERR(kernel_commands);
+		goto out_free_fence_fd;
+	}
 
 
 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
 	if (ret) {
 	if (ret) {
@@ -4162,8 +4238,32 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
 
 
 	vmw_clear_validations(sw_context);
 	vmw_clear_validations(sw_context);
+
+	/*
+	 * If anything fails here, give up trying to export the fence
+	 * and do a sync since the user mode will not be able to sync
+	 * the fence itself.  This ensures we are still functionally
+	 * correct.
+	 */
+	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+
+		sync_file = sync_file_create(&fence->base);
+		if (!sync_file) {
+			DRM_ERROR("Unable to create sync file for fence\n");
+			put_unused_fd(out_fence_fd);
+			out_fence_fd = -1;
+
+			(void) vmw_fence_obj_wait(fence, false, false,
+						  VMW_FENCE_WAIT_TIMEOUT);
+		} else {
+			/* Link the fence with the FD created earlier */
+			fd_install(out_fence_fd, sync_file->file);
+		}
+	}
+
 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
-				    user_fence_rep, fence, handle);
+				    user_fence_rep, fence, handle,
+				    out_fence_fd, sync_file);
 
 
 	/* Don't unreference when handing fence out */
 	/* Don't unreference when handing fence out */
 	if (unlikely(out_fence != NULL)) {
 	if (unlikely(out_fence != NULL)) {
@@ -4214,6 +4314,9 @@ out_unlock:
 out_free_header:
 out_free_header:
 	if (header)
 	if (header)
 		vmw_cmdbuf_header_free(header);
 		vmw_cmdbuf_header_free(header);
+out_free_fence_fd:
+	if (out_fence_fd >= 0)
+		put_unused_fd(out_fence_fd);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -4366,6 +4469,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
 	static const size_t copy_offset[] = {
 	static const size_t copy_offset[] = {
 		offsetof(struct drm_vmw_execbuf_arg, context_handle),
 		offsetof(struct drm_vmw_execbuf_arg, context_handle),
 		sizeof(struct drm_vmw_execbuf_arg)};
 		sizeof(struct drm_vmw_execbuf_arg)};
+	struct dma_fence *in_fence = NULL;
 
 
 	if (unlikely(size < copy_offset[0])) {
 	if (unlikely(size < copy_offset[0])) {
 		DRM_ERROR("Invalid command size, ioctl %d\n",
 		DRM_ERROR("Invalid command size, ioctl %d\n",
@@ -4401,15 +4505,25 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
 		arg.context_handle = (uint32_t) -1;
 		arg.context_handle = (uint32_t) -1;
 		break;
 		break;
 	case 2:
 	case 2:
-		if (arg.pad64 != 0) {
-			DRM_ERROR("Unused IOCTL data not set to zero.\n");
-			return -EINVAL;
-		}
-		break;
 	default:
 	default:
 		break;
 		break;
 	}
 	}
 
 
+
+	/* If imported a fence FD from elsewhere, then wait on it */
+	if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
+		in_fence = sync_file_get_fence(arg.imported_fence_fd);
+
+		if (!in_fence) {
+			DRM_ERROR("Cannot get imported fence\n");
+			return -EINVAL;
+		}
+
+		ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
+		if (ret)
+			goto out;
+	}
+
 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 	if (unlikely(ret != 0))
 	if (unlikely(ret != 0))
 		return ret;
 		return ret;
@@ -4419,12 +4533,16 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
 				  NULL, arg.command_size, arg.throttle_us,
 				  NULL, arg.command_size, arg.throttle_us,
 				  arg.context_handle,
 				  arg.context_handle,
 				  (void __user *)(unsigned long)arg.fence_rep,
 				  (void __user *)(unsigned long)arg.fence_rep,
-				  NULL);
+				  NULL,
+				  arg.flags);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	if (unlikely(ret != 0))
 	if (unlikely(ret != 0))
-		return ret;
+		goto out;
 
 
 	vmw_kms_cursor_post_execbuf(dev_priv);
 	vmw_kms_cursor_post_execbuf(dev_priv);
 
 
-	return 0;
+out:
+	if (in_fence)
+		dma_fence_put(in_fence);
+	return ret;
 }
 }

+ 70 - 34
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c

@@ -114,12 +114,11 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
 		container_of(f, struct vmw_fence_obj, base);
 		container_of(f, struct vmw_fence_obj, base);
 
 
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
-	unsigned long irq_flags;
 
 
-	spin_lock_irqsave(&fman->lock, irq_flags);
+	spin_lock(&fman->lock);
 	list_del_init(&fence->head);
 	list_del_init(&fence->head);
 	--fman->num_fence_objects;
 	--fman->num_fence_objects;
-	spin_unlock_irqrestore(&fman->lock, irq_flags);
+	spin_unlock(&fman->lock);
 	fence->destroy(fence);
 	fence->destroy(fence);
 }
 }
 
 
@@ -252,10 +251,10 @@ static void vmw_fence_work_func(struct work_struct *work)
 		INIT_LIST_HEAD(&list);
 		INIT_LIST_HEAD(&list);
 		mutex_lock(&fman->goal_irq_mutex);
 		mutex_lock(&fman->goal_irq_mutex);
 
 
-		spin_lock_irq(&fman->lock);
+		spin_lock(&fman->lock);
 		list_splice_init(&fman->cleanup_list, &list);
 		list_splice_init(&fman->cleanup_list, &list);
 		seqno_valid = fman->seqno_valid;
 		seqno_valid = fman->seqno_valid;
-		spin_unlock_irq(&fman->lock);
+		spin_unlock(&fman->lock);
 
 
 		if (!seqno_valid && fman->goal_irq_on) {
 		if (!seqno_valid && fman->goal_irq_on) {
 			fman->goal_irq_on = false;
 			fman->goal_irq_on = false;
@@ -305,15 +304,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
 
 
 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
 {
 {
-	unsigned long irq_flags;
 	bool lists_empty;
 	bool lists_empty;
 
 
 	(void) cancel_work_sync(&fman->work);
 	(void) cancel_work_sync(&fman->work);
 
 
-	spin_lock_irqsave(&fman->lock, irq_flags);
+	spin_lock(&fman->lock);
 	lists_empty = list_empty(&fman->fence_list) &&
 	lists_empty = list_empty(&fman->fence_list) &&
 		list_empty(&fman->cleanup_list);
 		list_empty(&fman->cleanup_list);
-	spin_unlock_irqrestore(&fman->lock, irq_flags);
+	spin_unlock(&fman->lock);
 
 
 	BUG_ON(!lists_empty);
 	BUG_ON(!lists_empty);
 	kfree(fman);
 	kfree(fman);
@@ -323,7 +321,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
 			      struct vmw_fence_obj *fence, u32 seqno,
 			      struct vmw_fence_obj *fence, u32 seqno,
 			      void (*destroy) (struct vmw_fence_obj *fence))
 			      void (*destroy) (struct vmw_fence_obj *fence))
 {
 {
-	unsigned long irq_flags;
 	int ret = 0;
 	int ret = 0;
 
 
 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
@@ -331,7 +328,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
 	INIT_LIST_HEAD(&fence->seq_passed_actions);
 	INIT_LIST_HEAD(&fence->seq_passed_actions);
 	fence->destroy = destroy;
 	fence->destroy = destroy;
 
 
-	spin_lock_irqsave(&fman->lock, irq_flags);
+	spin_lock(&fman->lock);
 	if (unlikely(fman->fifo_down)) {
 	if (unlikely(fman->fifo_down)) {
 		ret = -EBUSY;
 		ret = -EBUSY;
 		goto out_unlock;
 		goto out_unlock;
@@ -340,7 +337,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
 	++fman->num_fence_objects;
 	++fman->num_fence_objects;
 
 
 out_unlock:
 out_unlock:
-	spin_unlock_irqrestore(&fman->lock, irq_flags);
+	spin_unlock(&fman->lock);
 	return ret;
 	return ret;
 
 
 }
 }
@@ -489,11 +486,9 @@ rerun:
 
 
 void vmw_fences_update(struct vmw_fence_manager *fman)
 void vmw_fences_update(struct vmw_fence_manager *fman)
 {
 {
-	unsigned long irq_flags;
-
-	spin_lock_irqsave(&fman->lock, irq_flags);
+	spin_lock(&fman->lock);
 	__vmw_fences_update(fman);
 	__vmw_fences_update(fman);
-	spin_unlock_irqrestore(&fman->lock, irq_flags);
+	spin_unlock(&fman->lock);
 }
 }
 
 
 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -649,6 +644,51 @@ out_no_object:
 }
 }
 
 
 
 
+/**
+ * vmw_wait_dma_fence - Wait for a dma fence
+ *
+ * @fman: pointer to a fence manager
+ * @fence: DMA fence to wait on
+ *
+ * This function handles the case when the fence is actually a fence
+ * array.  If that's the case, it'll wait on each of the child fence
+ */
+int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
+		       struct dma_fence *fence)
+{
+	struct dma_fence_array *fence_array;
+	int ret = 0;
+	int i;
+
+
+	if (dma_fence_is_signaled(fence))
+		return 0;
+
+	if (!dma_fence_is_array(fence))
+		return dma_fence_wait(fence, true);
+
+	/* From i915: Note that if the fence-array was created in
+	 * signal-on-any mode, we should *not* decompose it into its individual
+	 * fences. However, we don't currently store which mode the fence-array
+	 * is operating in. Fortunately, the only user of signal-on-any is
+	 * private to amdgpu and we should not see any incoming fence-array
+	 * from sync-file being in signal-on-any mode.
+	 */
+
+	fence_array = to_dma_fence_array(fence);
+	for (i = 0; i < fence_array->num_fences; i++) {
+		struct dma_fence *child = fence_array->fences[i];
+
+		ret = dma_fence_wait(child, true);
+
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+
 /**
 /**
  * vmw_fence_fifo_down - signal all unsignaled fence objects.
  * vmw_fence_fifo_down - signal all unsignaled fence objects.
  */
  */
@@ -663,14 +703,14 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
 	 * restart when we've released the fman->lock.
 	 * restart when we've released the fman->lock.
 	 */
 	 */
 
 
-	spin_lock_irq(&fman->lock);
+	spin_lock(&fman->lock);
 	fman->fifo_down = true;
 	fman->fifo_down = true;
 	while (!list_empty(&fman->fence_list)) {
 	while (!list_empty(&fman->fence_list)) {
 		struct vmw_fence_obj *fence =
 		struct vmw_fence_obj *fence =
 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
 				   head);
 				   head);
 		dma_fence_get(&fence->base);
 		dma_fence_get(&fence->base);
-		spin_unlock_irq(&fman->lock);
+		spin_unlock(&fman->lock);
 
 
 		ret = vmw_fence_obj_wait(fence, false, false,
 		ret = vmw_fence_obj_wait(fence, false, false,
 					 VMW_FENCE_WAIT_TIMEOUT);
 					 VMW_FENCE_WAIT_TIMEOUT);
@@ -686,18 +726,16 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
 
 
 		BUG_ON(!list_empty(&fence->head));
 		BUG_ON(!list_empty(&fence->head));
 		dma_fence_put(&fence->base);
 		dma_fence_put(&fence->base);
-		spin_lock_irq(&fman->lock);
+		spin_lock(&fman->lock);
 	}
 	}
-	spin_unlock_irq(&fman->lock);
+	spin_unlock(&fman->lock);
 }
 }
 
 
 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
 {
 {
-	unsigned long irq_flags;
-
-	spin_lock_irqsave(&fman->lock, irq_flags);
+	spin_lock(&fman->lock);
 	fman->fifo_down = false;
 	fman->fifo_down = false;
-	spin_unlock_irqrestore(&fman->lock, irq_flags);
+	spin_unlock(&fman->lock);
 }
 }
 
 
 
 
@@ -812,9 +850,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
 	arg->signaled = vmw_fence_obj_signaled(fence);
 	arg->signaled = vmw_fence_obj_signaled(fence);
 
 
 	arg->signaled_flags = arg->flags;
 	arg->signaled_flags = arg->flags;
-	spin_lock_irq(&fman->lock);
+	spin_lock(&fman->lock);
 	arg->passed_seqno = dev_priv->last_read_seqno;
 	arg->passed_seqno = dev_priv->last_read_seqno;
-	spin_unlock_irq(&fman->lock);
+	spin_unlock(&fman->lock);
 
 
 	ttm_base_object_unref(&base);
 	ttm_base_object_unref(&base);
 
 
@@ -841,8 +879,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
  *
  *
  * This function is called when the seqno of the fence where @action is
  * This function is called when the seqno of the fence where @action is
  * attached has passed. It queues the event on the submitter's event list.
  * attached has passed. It queues the event on the submitter's event list.
- * This function is always called from atomic context, and may be called
- * from irq context.
+ * This function is always called from atomic context.
  */
  */
 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
 {
 {
@@ -851,13 +888,13 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
 	struct drm_device *dev = eaction->dev;
 	struct drm_device *dev = eaction->dev;
 	struct drm_pending_event *event = eaction->event;
 	struct drm_pending_event *event = eaction->event;
 	struct drm_file *file_priv;
 	struct drm_file *file_priv;
-	unsigned long irq_flags;
+
 
 
 	if (unlikely(event == NULL))
 	if (unlikely(event == NULL))
 		return;
 		return;
 
 
 	file_priv = event->file_priv;
 	file_priv = event->file_priv;
-	spin_lock_irqsave(&dev->event_lock, irq_flags);
+	spin_lock_irq(&dev->event_lock);
 
 
 	if (likely(eaction->tv_sec != NULL)) {
 	if (likely(eaction->tv_sec != NULL)) {
 		struct timeval tv;
 		struct timeval tv;
@@ -869,7 +906,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
 
 
 	drm_send_event_locked(dev, eaction->event);
 	drm_send_event_locked(dev, eaction->event);
 	eaction->event = NULL;
 	eaction->event = NULL;
-	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+	spin_unlock_irq(&dev->event_lock);
 }
 }
 
 
 /**
 /**
@@ -904,11 +941,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
 			      struct vmw_fence_action *action)
 			      struct vmw_fence_action *action)
 {
 {
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
-	unsigned long irq_flags;
 	bool run_update = false;
 	bool run_update = false;
 
 
 	mutex_lock(&fman->goal_irq_mutex);
 	mutex_lock(&fman->goal_irq_mutex);
-	spin_lock_irqsave(&fman->lock, irq_flags);
+	spin_lock(&fman->lock);
 
 
 	fman->pending_actions[action->type]++;
 	fman->pending_actions[action->type]++;
 	if (dma_fence_is_signaled_locked(&fence->base)) {
 	if (dma_fence_is_signaled_locked(&fence->base)) {
@@ -927,7 +963,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
 		run_update = vmw_fence_goal_check_locked(fence);
 		run_update = vmw_fence_goal_check_locked(fence);
 	}
 	}
 
 
-	spin_unlock_irqrestore(&fman->lock, irq_flags);
+	spin_unlock(&fman->lock);
 
 
 	if (run_update) {
 	if (run_update) {
 		if (!fman->goal_irq_on) {
 		if (!fman->goal_irq_on) {
@@ -1114,7 +1150,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
 	}
 	}
 
 
 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
-				    handle);
+				    handle, -1, NULL);
 	vmw_fence_obj_unreference(&fence);
 	vmw_fence_obj_unreference(&fence);
 	return 0;
 	return 0;
 out_no_create:
 out_no_create:

+ 4 - 0
drivers/gpu/drm/vmwgfx/vmwgfx_fence.h

@@ -28,6 +28,7 @@
 #ifndef _VMWGFX_FENCE_H_
 #ifndef _VMWGFX_FENCE_H_
 
 
 #include <linux/dma-fence.h>
 #include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
 
 
 #define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
 #define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
 
 
@@ -102,6 +103,9 @@ extern int vmw_user_fence_create(struct drm_file *file_priv,
 				 struct vmw_fence_obj **p_fence,
 				 struct vmw_fence_obj **p_fence,
 				 uint32_t *p_handle);
 				 uint32_t *p_handle);
 
 
+extern int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
+			      struct dma_fence *fence);
+
 extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
 extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
 
 
 extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
 extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);

+ 91 - 20
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c

@@ -30,11 +30,56 @@
 
 
 #define VMW_FENCE_WRAP (1 << 24)
 #define VMW_FENCE_WRAP (1 << 24)
 
 
-irqreturn_t vmw_irq_handler(int irq, void *arg)
+/**
+ * vmw_thread_fn - Deferred (process context) irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the deferred part of irq processing.
+ * The function is guaranteed to run at least once after the
+ * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
+ *
+ */
+static irqreturn_t vmw_thread_fn(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *)arg;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	irqreturn_t ret = IRQ_NONE;
+
+	if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
+			       dev_priv->irqthread_pending)) {
+		vmw_fences_update(dev_priv->fman);
+		wake_up_all(&dev_priv->fence_queue);
+		ret = IRQ_HANDLED;
+	}
+
+	if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
+			       dev_priv->irqthread_pending)) {
+		vmw_cmdbuf_irqthread(dev_priv->cman);
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+/**
+ * vmw_irq_handler irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the quick part of irq processing.
+ * The function performs fast actions like clearing the device interrupt
+ * flags and also reasonably quick actions like waking processes waiting for
+ * FIFO space. Other IRQ actions are deferred to the IRQ thread.
+ */
+static irqreturn_t vmw_irq_handler(int irq, void *arg)
 {
 {
 	struct drm_device *dev = (struct drm_device *)arg;
 	struct drm_device *dev = (struct drm_device *)arg;
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	uint32_t status, masked_status;
 	uint32_t status, masked_status;
+	irqreturn_t ret = IRQ_HANDLED;
 
 
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	masked_status = status & READ_ONCE(dev_priv->irq_mask);
 	masked_status = status & READ_ONCE(dev_priv->irq_mask);
@@ -45,20 +90,21 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
 	if (!status)
 	if (!status)
 		return IRQ_NONE;
 		return IRQ_NONE;
 
 
-	if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
-			     SVGA_IRQFLAG_FENCE_GOAL)) {
-		vmw_fences_update(dev_priv->fman);
-		wake_up_all(&dev_priv->fence_queue);
-	}
-
 	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
 	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
 		wake_up_all(&dev_priv->fifo_queue);
 		wake_up_all(&dev_priv->fifo_queue);
 
 
-	if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
-			     SVGA_IRQFLAG_ERROR))
-		vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
+	if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
+			      SVGA_IRQFLAG_FENCE_GOAL)) &&
+	    !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
+		ret = IRQ_WAKE_THREAD;
 
 
-	return IRQ_HANDLED;
+	if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+			      SVGA_IRQFLAG_ERROR)) &&
+	    !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
+			      dev_priv->irqthread_pending))
+		ret = IRQ_WAKE_THREAD;
+
+	return ret;
 }
 }
 
 
 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
@@ -281,23 +327,15 @@ int vmw_wait_seqno(struct vmw_private *dev_priv,
 	return ret;
 	return ret;
 }
 }
 
 
-void vmw_irq_preinstall(struct drm_device *dev)
+static void vmw_irq_preinstall(struct drm_device *dev)
 {
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	uint32_t status;
 	uint32_t status;
 
 
-	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
-		return;
-
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 }
 }
 
 
-int vmw_irq_postinstall(struct drm_device *dev)
-{
-	return 0;
-}
-
 void vmw_irq_uninstall(struct drm_device *dev)
 void vmw_irq_uninstall(struct drm_device *dev)
 {
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
@@ -306,8 +344,41 @@ void vmw_irq_uninstall(struct drm_device *dev)
 	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 		return;
 		return;
 
 
+	if (!dev->irq_enabled)
+		return;
+
 	vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
 	vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
 
 
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+
+	dev->irq_enabled = false;
+	free_irq(dev->irq, dev);
+}
+
+/**
+ * vmw_irq_install - Install the irq handlers
+ *
+ * @dev:  Pointer to the drm device.
+ * @irq:  The irq number.
+ * Return:  Zero if successful. Negative number otherwise.
+ */
+int vmw_irq_install(struct drm_device *dev, int irq)
+{
+	int ret;
+
+	if (dev->irq_enabled)
+		return -EBUSY;
+
+	vmw_irq_preinstall(dev);
+
+	ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn,
+				   IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
+	if (ret < 0)
+		return ret;
+
+	dev->irq_enabled = true;
+	dev->irq = irq;
+
+	return ret;
 }
 }

+ 1 - 1
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c

@@ -2494,7 +2494,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
 	if (file_priv)
 	if (file_priv)
 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
 					    ret, user_fence_rep, fence,
 					    ret, user_fence_rep, fence,
-					    handle);
+					    handle, -1, NULL);
 	if (out_fence)
 	if (out_fence)
 		*out_fence = fence;
 		*out_fence = fence;
 	else
 	else

+ 8 - 3
include/uapi/drm/vmwgfx_drm.h

@@ -297,13 +297,17 @@ union drm_vmw_surface_reference_arg {
  * @version: Allows expanding the execbuf ioctl parameters without breaking
  * @version: Allows expanding the execbuf ioctl parameters without breaking
  * backwards compatibility, since user-space will always tell the kernel
  * backwards compatibility, since user-space will always tell the kernel
  * which version it uses.
  * which version it uses.
- * @flags: Execbuf flags. None currently.
+ * @flags: Execbuf flags.
+ * @imported_fence_fd:  FD for a fence imported from another device
  *
  *
  * Argument to the DRM_VMW_EXECBUF Ioctl.
  * Argument to the DRM_VMW_EXECBUF Ioctl.
  */
  */
 
 
 #define DRM_VMW_EXECBUF_VERSION 2
 #define DRM_VMW_EXECBUF_VERSION 2
 
 
+#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
+#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
+
 struct drm_vmw_execbuf_arg {
 struct drm_vmw_execbuf_arg {
 	__u64 commands;
 	__u64 commands;
 	__u32 command_size;
 	__u32 command_size;
@@ -312,7 +316,7 @@ struct drm_vmw_execbuf_arg {
 	__u32 version;
 	__u32 version;
 	__u32 flags;
 	__u32 flags;
 	__u32 context_handle;
 	__u32 context_handle;
-	__u32 pad64;
+	__s32 imported_fence_fd;
 };
 };
 
 
 /**
 /**
@@ -328,6 +332,7 @@ struct drm_vmw_execbuf_arg {
  * @passed_seqno: The highest seqno number processed by the hardware
  * @passed_seqno: The highest seqno number processed by the hardware
  * so far. This can be used to mark user-space fence objects as signaled, and
  * so far. This can be used to mark user-space fence objects as signaled, and
  * to determine whether a fence seqno might be stale.
  * to determine whether a fence seqno might be stale.
+ * @fd: FD associated with the fence, -1 if not exported
  * @error: This member should've been set to -EFAULT on submission.
  * @error: This member should've been set to -EFAULT on submission.
  * The following actions should be take on completion:
  * The following actions should be take on completion:
  * error == -EFAULT: Fence communication failed. The host is synchronized.
  * error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -345,7 +350,7 @@ struct drm_vmw_fence_rep {
 	__u32 mask;
 	__u32 mask;
 	__u32 seqno;
 	__u32 seqno;
 	__u32 passed_seqno;
 	__u32 passed_seqno;
-	__u32 pad64;
+	__s32 fd;
 	__s32 error;
 	__s32 error;
 };
 };