|
@@ -1996,14 +1996,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
|
|
|
|
|
+static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
|
{
|
|
{
|
|
drm_gem_object_unreference(&ringbuf->obj->base);
|
|
drm_gem_object_unreference(&ringbuf->obj->base);
|
|
ringbuf->obj = NULL;
|
|
ringbuf->obj = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
-int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
|
|
|
- struct intel_ringbuffer *ringbuf)
|
|
|
|
|
|
+static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
|
|
|
+ struct intel_ringbuffer *ringbuf)
|
|
{
|
|
{
|
|
struct drm_i915_gem_object *obj;
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
|
@@ -2023,6 +2023,48 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+struct intel_ringbuffer *
|
|
|
|
+intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
|
|
|
|
+{
|
|
|
|
+ struct intel_ringbuffer *ring;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
|
|
|
+ if (ring == NULL)
|
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
+
|
|
|
|
+ ring->ring = engine;
|
|
|
|
+
|
|
|
|
+ ring->size = size;
|
|
|
|
+ /* Workaround an erratum on the i830 which causes a hang if
|
|
|
|
+ * the TAIL pointer points to within the last 2 cachelines
|
|
|
|
+ * of the buffer.
|
|
|
|
+ */
|
|
|
|
+ ring->effective_size = size;
|
|
|
|
+ if (IS_I830(engine->dev) || IS_845G(engine->dev))
|
|
|
|
+ ring->effective_size -= 2 * CACHELINE_BYTES;
|
|
|
|
+
|
|
|
|
+ ring->last_retired_head = -1;
|
|
|
|
+ intel_ring_update_space(ring);
|
|
|
|
+
|
|
|
|
+ ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
|
|
|
|
+ if (ret) {
|
|
|
|
+ DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
|
|
|
|
+ engine->name, ret);
|
|
|
|
+ kfree(ring);
|
|
|
|
+ return ERR_PTR(ret);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ring;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void
|
|
|
|
+intel_ringbuffer_free(struct intel_ringbuffer *ring)
|
|
|
|
+{
|
|
|
|
+ intel_destroy_ringbuffer_obj(ring);
|
|
|
|
+ kfree(ring);
|
|
|
|
+}
|
|
|
|
+
|
|
static int intel_init_ring_buffer(struct drm_device *dev,
|
|
static int intel_init_ring_buffer(struct drm_device *dev,
|
|
struct intel_engine_cs *ring)
|
|
struct intel_engine_cs *ring)
|
|
{
|
|
{
|
|
@@ -2031,22 +2073,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|
|
|
|
|
WARN_ON(ring->buffer);
|
|
WARN_ON(ring->buffer);
|
|
|
|
|
|
- ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
|
|
|
|
- if (!ringbuf)
|
|
|
|
- return -ENOMEM;
|
|
|
|
- ring->buffer = ringbuf;
|
|
|
|
-
|
|
|
|
ring->dev = dev;
|
|
ring->dev = dev;
|
|
INIT_LIST_HEAD(&ring->active_list);
|
|
INIT_LIST_HEAD(&ring->active_list);
|
|
INIT_LIST_HEAD(&ring->request_list);
|
|
INIT_LIST_HEAD(&ring->request_list);
|
|
INIT_LIST_HEAD(&ring->execlist_queue);
|
|
INIT_LIST_HEAD(&ring->execlist_queue);
|
|
i915_gem_batch_pool_init(dev, &ring->batch_pool);
|
|
i915_gem_batch_pool_init(dev, &ring->batch_pool);
|
|
- ringbuf->size = 32 * PAGE_SIZE;
|
|
|
|
- ringbuf->ring = ring;
|
|
|
|
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
|
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
|
|
|
|
|
init_waitqueue_head(&ring->irq_queue);
|
|
init_waitqueue_head(&ring->irq_queue);
|
|
|
|
|
|
|
|
+ ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
|
|
|
|
+ if (IS_ERR(ringbuf))
|
|
|
|
+ return PTR_ERR(ringbuf);
|
|
|
|
+ ring->buffer = ringbuf;
|
|
|
|
+
|
|
if (I915_NEED_GFX_HWS(dev)) {
|
|
if (I915_NEED_GFX_HWS(dev)) {
|
|
ret = init_status_page(ring);
|
|
ret = init_status_page(ring);
|
|
if (ret)
|
|
if (ret)
|
|
@@ -2058,15 +2098,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|
goto error;
|
|
goto error;
|
|
}
|
|
}
|
|
|
|
|
|
- WARN_ON(ringbuf->obj);
|
|
|
|
-
|
|
|
|
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
|
|
|
|
- if (ret) {
|
|
|
|
- DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
|
|
|
|
- ring->name, ret);
|
|
|
|
- goto error;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
|
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
|
if (ret) {
|
|
if (ret) {
|
|
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
|
|
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
|
|
@@ -2075,14 +2106,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|
goto error;
|
|
goto error;
|
|
}
|
|
}
|
|
|
|
|
|
- /* Workaround an erratum on the i830 which causes a hang if
|
|
|
|
- * the TAIL pointer points to within the last 2 cachelines
|
|
|
|
- * of the buffer.
|
|
|
|
- */
|
|
|
|
- ringbuf->effective_size = ringbuf->size;
|
|
|
|
- if (IS_I830(dev) || IS_845G(dev))
|
|
|
|
- ringbuf->effective_size -= 2 * CACHELINE_BYTES;
|
|
|
|
-
|
|
|
|
ret = i915_cmd_parser_init_ring(ring);
|
|
ret = i915_cmd_parser_init_ring(ring);
|
|
if (ret)
|
|
if (ret)
|
|
goto error;
|
|
goto error;
|
|
@@ -2090,7 +2113,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
error:
|
|
error:
|
|
- kfree(ringbuf);
|
|
|
|
|
|
+ intel_ringbuffer_free(ringbuf);
|
|
ring->buffer = NULL;
|
|
ring->buffer = NULL;
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -2098,19 +2121,18 @@ error:
|
|
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
|
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
|
{
|
|
{
|
|
struct drm_i915_private *dev_priv;
|
|
struct drm_i915_private *dev_priv;
|
|
- struct intel_ringbuffer *ringbuf;
|
|
|
|
|
|
|
|
if (!intel_ring_initialized(ring))
|
|
if (!intel_ring_initialized(ring))
|
|
return;
|
|
return;
|
|
|
|
|
|
dev_priv = to_i915(ring->dev);
|
|
dev_priv = to_i915(ring->dev);
|
|
- ringbuf = ring->buffer;
|
|
|
|
|
|
|
|
intel_stop_ring_buffer(ring);
|
|
intel_stop_ring_buffer(ring);
|
|
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
|
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
|
|
|
|
|
- intel_unpin_ringbuffer_obj(ringbuf);
|
|
|
|
- intel_destroy_ringbuffer_obj(ringbuf);
|
|
|
|
|
|
+ intel_unpin_ringbuffer_obj(ring->buffer);
|
|
|
|
+ intel_ringbuffer_free(ring->buffer);
|
|
|
|
+ ring->buffer = NULL;
|
|
|
|
|
|
if (ring->cleanup)
|
|
if (ring->cleanup)
|
|
ring->cleanup(ring);
|
|
ring->cleanup(ring);
|
|
@@ -2119,9 +2141,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
|
|
|
|
|
i915_cmd_parser_fini_ring(ring);
|
|
i915_cmd_parser_fini_ring(ring);
|
|
i915_gem_batch_pool_fini(&ring->batch_pool);
|
|
i915_gem_batch_pool_fini(&ring->batch_pool);
|
|
-
|
|
|
|
- kfree(ringbuf);
|
|
|
|
- ring->buffer = NULL;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
|
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|