|
@@ -415,16 +415,16 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
|
|
*
|
|
*
|
|
* Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
|
|
* Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
|
|
* command buffers left that are not submitted to hardware, Make sure
|
|
* command buffers left that are not submitted to hardware, Make sure
|
|
- * IRQ handling is turned on. Otherwise, make sure it's turned off. This
|
|
|
|
- * function may return -EAGAIN to indicate it should be rerun due to
|
|
|
|
- * possibly missed IRQs if IRQs has just been turned on.
|
|
|
|
|
|
+ * IRQ handling is turned on. Otherwise, make sure it's turned off.
|
|
*/
|
|
*/
|
|
-static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
|
|
|
|
|
|
+static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
|
|
{
|
|
{
|
|
- int notempty = 0;
|
|
|
|
|
|
+ int notempty;
|
|
struct vmw_cmdbuf_context *ctx;
|
|
struct vmw_cmdbuf_context *ctx;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
|
|
+retry:
|
|
|
|
+ notempty = 0;
|
|
for_each_cmdbuf_ctx(man, i, ctx)
|
|
for_each_cmdbuf_ctx(man, i, ctx)
|
|
vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
|
|
vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
|
|
|
|
|
|
@@ -440,10 +440,8 @@ static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
|
|
man->irq_on = true;
|
|
man->irq_on = true;
|
|
|
|
|
|
/* Rerun in case we just missed an irq. */
|
|
/* Rerun in case we just missed an irq. */
|
|
- return -EAGAIN;
|
|
|
|
|
|
+ goto retry;
|
|
}
|
|
}
|
|
-
|
|
|
|
- return 0;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -468,8 +466,7 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
|
|
header->cb_context = cb_context;
|
|
header->cb_context = cb_context;
|
|
list_add_tail(&header->list, &man->ctx[cb_context].submitted);
|
|
list_add_tail(&header->list, &man->ctx[cb_context].submitted);
|
|
|
|
|
|
- if (vmw_cmdbuf_man_process(man) == -EAGAIN)
|
|
|
|
- vmw_cmdbuf_man_process(man);
|
|
|
|
|
|
+ vmw_cmdbuf_man_process(man);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -488,8 +485,7 @@ static void vmw_cmdbuf_man_tasklet(unsigned long data)
|
|
struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
|
|
struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
|
|
|
|
|
|
spin_lock(&man->lock);
|
|
spin_lock(&man->lock);
|
|
- if (vmw_cmdbuf_man_process(man) == -EAGAIN)
|
|
|
|
- (void) vmw_cmdbuf_man_process(man);
|
|
|
|
|
|
+ vmw_cmdbuf_man_process(man);
|
|
spin_unlock(&man->lock);
|
|
spin_unlock(&man->lock);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -507,6 +503,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
|
|
struct vmw_cmdbuf_man *man =
|
|
struct vmw_cmdbuf_man *man =
|
|
container_of(work, struct vmw_cmdbuf_man, work);
|
|
container_of(work, struct vmw_cmdbuf_man, work);
|
|
struct vmw_cmdbuf_header *entry, *next;
|
|
struct vmw_cmdbuf_header *entry, *next;
|
|
|
|
+ uint32_t dummy;
|
|
bool restart = false;
|
|
bool restart = false;
|
|
|
|
|
|
spin_lock_bh(&man->lock);
|
|
spin_lock_bh(&man->lock);
|
|
@@ -523,6 +520,8 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
|
|
if (restart && vmw_cmdbuf_startstop(man, true))
|
|
if (restart && vmw_cmdbuf_startstop(man, true))
|
|
DRM_ERROR("Failed restarting command buffer context 0.\n");
|
|
DRM_ERROR("Failed restarting command buffer context 0.\n");
|
|
|
|
|
|
|
|
+ /* Send a new fence in case one was removed */
|
|
|
|
+ vmw_fifo_send_fence(man->dev_priv, &dummy);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -682,7 +681,7 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
|
|
DRM_MM_SEARCH_DEFAULT,
|
|
DRM_MM_SEARCH_DEFAULT,
|
|
DRM_MM_CREATE_DEFAULT);
|
|
DRM_MM_CREATE_DEFAULT);
|
|
if (ret) {
|
|
if (ret) {
|
|
- (void) vmw_cmdbuf_man_process(man);
|
|
|
|
|
|
+ vmw_cmdbuf_man_process(man);
|
|
ret = drm_mm_insert_node_generic(&man->mm, info->node,
|
|
ret = drm_mm_insert_node_generic(&man->mm, info->node,
|
|
info->page_size, 0, 0,
|
|
info->page_size, 0, 0,
|
|
DRM_MM_SEARCH_DEFAULT,
|
|
DRM_MM_SEARCH_DEFAULT,
|
|
@@ -1168,7 +1167,14 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
|
|
drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
|
|
drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
|
|
|
|
|
|
man->has_pool = true;
|
|
man->has_pool = true;
|
|
- man->default_size = default_size;
|
|
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
|
|
|
|
+ * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
|
|
|
|
+ * needs to wait for space and we block on further command
|
|
|
|
+ * submissions to be able to free up space.
|
|
|
|
+ */
|
|
|
|
+ man->default_size = VMW_CMDBUF_INLINE_SIZE;
|
|
DRM_INFO("Using command buffers with %s pool.\n",
|
|
DRM_INFO("Using command buffers with %s pool.\n",
|
|
(man->using_mob) ? "MOB" : "DMA");
|
|
(man->using_mob) ? "MOB" : "DMA");
|
|
|
|
|