|
@@ -50,7 +50,7 @@ static inline int __ring_space(int head, int tail, int size)
|
|
|
|
|
|
static inline int ring_space(struct intel_engine_cs *ring)
|
|
|
{
|
|
|
- return __ring_space(ring->head & HEAD_ADDR, ring->tail, ring->size);
|
|
|
+ return __ring_space(ring->buffer->head & HEAD_ADDR, ring->buffer->tail, ring->buffer->size);
|
|
|
}
|
|
|
|
|
|
static bool intel_ring_stopped(struct intel_engine_cs *ring)
|
|
@@ -61,10 +61,10 @@ static bool intel_ring_stopped(struct intel_engine_cs *ring)
|
|
|
|
|
|
void __intel_ring_advance(struct intel_engine_cs *ring)
|
|
|
{
|
|
|
- ring->tail &= ring->size - 1;
|
|
|
+ ring->buffer->tail &= ring->buffer->size - 1;
|
|
|
if (intel_ring_stopped(ring))
|
|
|
return;
|
|
|
- ring->write_tail(ring, ring->tail);
|
|
|
+ ring->write_tail(ring, ring->buffer->tail);
|
|
|
}
|
|
|
|
|
|
static int
|
|
@@ -481,7 +481,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
|
|
|
{
|
|
|
struct drm_device *dev = ring->dev;
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
- struct drm_i915_gem_object *obj = ring->obj;
|
|
|
+ struct drm_i915_gem_object *obj = ring->buffer->obj;
|
|
|
int ret = 0;
|
|
|
|
|
|
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
|
|
@@ -520,7 +520,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
|
|
|
* register values. */
|
|
|
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
|
|
|
I915_WRITE_CTL(ring,
|
|
|
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
|
|
|
+ ((ring->buffer->size - PAGE_SIZE) & RING_NR_PAGES)
|
|
|
| RING_VALID);
|
|
|
|
|
|
/* If the head is still not zero, the ring is dead */
|
|
@@ -540,10 +540,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
|
|
|
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
|
|
|
i915_kernel_lost_context(ring->dev);
|
|
|
else {
|
|
|
- ring->head = I915_READ_HEAD(ring);
|
|
|
- ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
|
|
- ring->space = ring_space(ring);
|
|
|
- ring->last_retired_head = -1;
|
|
|
+ ring->buffer->head = I915_READ_HEAD(ring);
|
|
|
+ ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
|
|
+ ring->buffer->space = ring_space(ring);
|
|
|
+ ring->buffer->last_retired_head = -1;
|
|
|
}
|
|
|
|
|
|
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
|
|
@@ -1382,14 +1382,14 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
int ret;
|
|
|
|
|
|
- if (ring->obj)
|
|
|
+ if (ring->buffer->obj)
|
|
|
return 0;
|
|
|
|
|
|
obj = NULL;
|
|
|
if (!HAS_LLC(dev))
|
|
|
- obj = i915_gem_object_create_stolen(dev, ring->size);
|
|
|
+ obj = i915_gem_object_create_stolen(dev, ring->buffer->size);
|
|
|
if (obj == NULL)
|
|
|
- obj = i915_gem_alloc_object(dev, ring->size);
|
|
|
+ obj = i915_gem_alloc_object(dev, ring->buffer->size);
|
|
|
if (obj == NULL)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -1401,15 +1401,15 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
|
|
|
if (ret)
|
|
|
goto err_unpin;
|
|
|
|
|
|
- ring->virtual_start =
|
|
|
+ ring->buffer->virtual_start =
|
|
|
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
|
|
- ring->size);
|
|
|
- if (ring->virtual_start == NULL) {
|
|
|
+ ring->buffer->size);
|
|
|
+ if (ring->buffer->virtual_start == NULL) {
|
|
|
ret = -EINVAL;
|
|
|
goto err_unpin;
|
|
|
}
|
|
|
|
|
|
- ring->obj = obj;
|
|
|
+ ring->buffer->obj = obj;
|
|
|
return 0;
|
|
|
|
|
|
err_unpin:
|
|
@@ -1435,7 +1435,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|
|
ring->dev = dev;
|
|
|
INIT_LIST_HEAD(&ring->active_list);
|
|
|
INIT_LIST_HEAD(&ring->request_list);
|
|
|
- ring->size = 32 * PAGE_SIZE;
|
|
|
+ ring->buffer->size = 32 * PAGE_SIZE;
|
|
|
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
|
|
|
|
|
init_waitqueue_head(&ring->irq_queue);
|
|
@@ -1461,9 +1461,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|
|
* the TAIL pointer points to within the last 2 cachelines
|
|
|
* of the buffer.
|
|
|
*/
|
|
|
- ring->effective_size = ring->size;
|
|
|
+ ring->buffer->effective_size = ring->buffer->size;
|
|
|
if (IS_I830(dev) || IS_845G(dev))
|
|
|
- ring->effective_size -= 2 * CACHELINE_BYTES;
|
|
|
+ ring->buffer->effective_size -= 2 * CACHELINE_BYTES;
|
|
|
|
|
|
ret = i915_cmd_parser_init_ring(ring);
|
|
|
if (ret)
|
|
@@ -1485,17 +1485,17 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
|
|
|
|
|
- if (ring->obj == NULL)
|
|
|
+ if (ring->buffer->obj == NULL)
|
|
|
return;
|
|
|
|
|
|
intel_stop_ring_buffer(ring);
|
|
|
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
|
|
|
|
|
- iounmap(ring->virtual_start);
|
|
|
+ iounmap(ring->buffer->virtual_start);
|
|
|
|
|
|
- i915_gem_object_ggtt_unpin(ring->obj);
|
|
|
- drm_gem_object_unreference(&ring->obj->base);
|
|
|
- ring->obj = NULL;
|
|
|
+ i915_gem_object_ggtt_unpin(ring->buffer->obj);
|
|
|
+ drm_gem_object_unreference(&ring->buffer->obj->base);
|
|
|
+ ring->buffer->obj = NULL;
|
|
|
ring->preallocated_lazy_request = NULL;
|
|
|
ring->outstanding_lazy_seqno = 0;
|
|
|
|
|
@@ -1516,17 +1516,17 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
|
|
|
u32 seqno = 0;
|
|
|
int ret;
|
|
|
|
|
|
- if (ring->last_retired_head != -1) {
|
|
|
- ring->head = ring->last_retired_head;
|
|
|
- ring->last_retired_head = -1;
|
|
|
+ if (ring->buffer->last_retired_head != -1) {
|
|
|
+ ring->buffer->head = ring->buffer->last_retired_head;
|
|
|
+ ring->buffer->last_retired_head = -1;
|
|
|
|
|
|
- ring->space = ring_space(ring);
|
|
|
- if (ring->space >= n)
|
|
|
+ ring->buffer->space = ring_space(ring);
|
|
|
+ if (ring->buffer->space >= n)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
list_for_each_entry(request, &ring->request_list, list) {
|
|
|
- if (__ring_space(request->tail, ring->tail, ring->size) >= n) {
|
|
|
+ if (__ring_space(request->tail, ring->buffer->tail, ring->buffer->size) >= n) {
|
|
|
seqno = request->seqno;
|
|
|
break;
|
|
|
}
|
|
@@ -1540,10 +1540,10 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
|
|
|
return ret;
|
|
|
|
|
|
i915_gem_retire_requests_ring(ring);
|
|
|
- ring->head = ring->last_retired_head;
|
|
|
- ring->last_retired_head = -1;
|
|
|
+ ring->buffer->head = ring->buffer->last_retired_head;
|
|
|
+ ring->buffer->last_retired_head = -1;
|
|
|
|
|
|
- ring->space = ring_space(ring);
|
|
|
+ ring->buffer->space = ring_space(ring);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1570,9 +1570,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
|
|
|
|
|
trace_i915_ring_wait_begin(ring);
|
|
|
do {
|
|
|
- ring->head = I915_READ_HEAD(ring);
|
|
|
- ring->space = ring_space(ring);
|
|
|
- if (ring->space >= n) {
|
|
|
+ ring->buffer->head = I915_READ_HEAD(ring);
|
|
|
+ ring->buffer->space = ring_space(ring);
|
|
|
+ if (ring->buffer->space >= n) {
|
|
|
ret = 0;
|
|
|
break;
|
|
|
}
|
|
@@ -1608,21 +1608,21 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
|
|
static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
|
|
|
{
|
|
|
uint32_t __iomem *virt;
|
|
|
- int rem = ring->size - ring->tail;
|
|
|
+ int rem = ring->buffer->size - ring->buffer->tail;
|
|
|
|
|
|
- if (ring->space < rem) {
|
|
|
+ if (ring->buffer->space < rem) {
|
|
|
int ret = ring_wait_for_space(ring, rem);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- virt = ring->virtual_start + ring->tail;
|
|
|
+ virt = ring->buffer->virtual_start + ring->buffer->tail;
|
|
|
rem /= 4;
|
|
|
while (rem--)
|
|
|
iowrite32(MI_NOOP, virt++);
|
|
|
|
|
|
- ring->tail = 0;
|
|
|
- ring->space = ring_space(ring);
|
|
|
+ ring->buffer->tail = 0;
|
|
|
+ ring->buffer->space = ring_space(ring);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1674,13 +1674,13 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring,
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
- if (unlikely(ring->tail + bytes > ring->effective_size)) {
|
|
|
+ if (unlikely(ring->buffer->tail + bytes > ring->buffer->effective_size)) {
|
|
|
ret = intel_wrap_ring_buffer(ring);
|
|
|
if (unlikely(ret))
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(ring->space < bytes)) {
|
|
|
+ if (unlikely(ring->buffer->space < bytes)) {
|
|
|
ret = ring_wait_for_space(ring, bytes);
|
|
|
if (unlikely(ret))
|
|
|
return ret;
|
|
@@ -1709,14 +1709,14 @@ int intel_ring_begin(struct intel_engine_cs *ring,
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- ring->space -= num_dwords * sizeof(uint32_t);
|
|
|
+ ring->buffer->space -= num_dwords * sizeof(uint32_t);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
/* Align the ring tail to a cacheline boundary */
|
|
|
int intel_ring_cacheline_align(struct intel_engine_cs *ring)
|
|
|
{
|
|
|
- int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
|
|
+ int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
|
|
int ret;
|
|
|
|
|
|
if (num_dwords == 0)
|
|
@@ -2094,13 +2094,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
|
|
INIT_LIST_HEAD(&ring->active_list);
|
|
|
INIT_LIST_HEAD(&ring->request_list);
|
|
|
|
|
|
- ring->size = size;
|
|
|
- ring->effective_size = ring->size;
|
|
|
+ ring->buffer->size = size;
|
|
|
+ ring->buffer->effective_size = ring->buffer->size;
|
|
|
if (IS_I830(ring->dev) || IS_845G(ring->dev))
|
|
|
- ring->effective_size -= 2 * CACHELINE_BYTES;
|
|
|
+ ring->buffer->effective_size -= 2 * CACHELINE_BYTES;
|
|
|
|
|
|
- ring->virtual_start = ioremap_wc(start, size);
|
|
|
- if (ring->virtual_start == NULL) {
|
|
|
+ ring->buffer->virtual_start = ioremap_wc(start, size);
|
|
|
+ if (ring->buffer->virtual_start == NULL) {
|
|
|
DRM_ERROR("can not ioremap virtual address for"
|
|
|
" ring buffer\n");
|
|
|
ret = -ENOMEM;
|
|
@@ -2116,7 +2116,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
|
|
return 0;
|
|
|
|
|
|
err_vstart:
|
|
|
- iounmap(ring->virtual_start);
|
|
|
+ iounmap(ring->buffer->virtual_start);
|
|
|
err_ringbuf:
|
|
|
kfree(ringbuf);
|
|
|
ring->buffer = NULL;
|