|
@@ -29,6 +29,7 @@
|
|
#include "drm.h"
|
|
#include "drm.h"
|
|
#include "i915_drm.h"
|
|
#include "i915_drm.h"
|
|
#include "i915_drv.h"
|
|
#include "i915_drv.h"
|
|
|
|
+#include "i915_trace.h"
|
|
#include "intel_drv.h"
|
|
#include "intel_drv.h"
|
|
#include <linux/swap.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/pci.h>
|
|
@@ -48,11 +49,15 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
|
|
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
|
|
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
|
|
unsigned alignment);
|
|
unsigned alignment);
|
|
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
|
|
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
|
|
-static int i915_gem_evict_something(struct drm_device *dev);
|
|
|
|
|
|
+static int i915_gem_evict_something(struct drm_device *dev, int min_size);
|
|
|
|
+static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
|
|
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
|
|
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
|
|
struct drm_i915_gem_pwrite *args,
|
|
struct drm_i915_gem_pwrite *args,
|
|
struct drm_file *file_priv);
|
|
struct drm_file *file_priv);
|
|
|
|
|
|
|
|
+static LIST_HEAD(shrink_list);
|
|
|
|
+static DEFINE_SPINLOCK(shrink_list_lock);
|
|
|
|
+
|
|
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
|
|
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
|
|
unsigned long end)
|
|
unsigned long end)
|
|
{
|
|
{
|
|
@@ -316,6 +321,45 @@ fail_unlock:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline gfp_t
|
|
|
|
+i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
|
|
|
|
+{
|
|
|
|
+ return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void
|
|
|
|
+i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
|
|
|
|
+{
|
|
|
|
+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
|
|
|
|
+{
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ ret = i915_gem_object_get_pages(obj);
|
|
|
|
+
|
|
|
|
+ /* If we've insufficient memory to map in the pages, attempt
|
|
|
|
+ * to make some space by throwing out some old buffers.
|
|
|
|
+ */
|
|
|
|
+ if (ret == -ENOMEM) {
|
|
|
|
+ struct drm_device *dev = obj->dev;
|
|
|
|
+ gfp_t gfp;
|
|
|
|
+
|
|
|
|
+ ret = i915_gem_evict_something(dev, obj->size);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ gfp = i915_gem_object_get_page_gfp_mask(obj);
|
|
|
|
+ i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
|
|
|
|
+ ret = i915_gem_object_get_pages(obj);
|
|
|
|
+ i915_gem_object_set_page_gfp_mask (obj, gfp);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* This is the fallback shmem pread path, which allocates temporary storage
|
|
* This is the fallback shmem pread path, which allocates temporary storage
|
|
* in kernel space to copy_to_user into outside of the struct_mutex, so we
|
|
* in kernel space to copy_to_user into outside of the struct_mutex, so we
|
|
@@ -367,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
|
|
- ret = i915_gem_object_get_pages(obj);
|
|
|
|
- if (ret != 0)
|
|
|
|
|
|
+ ret = i915_gem_object_get_pages_or_evict(obj);
|
|
|
|
+ if (ret)
|
|
goto fail_unlock;
|
|
goto fail_unlock;
|
|
|
|
|
|
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
|
|
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
|
|
@@ -842,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
|
|
- ret = i915_gem_object_get_pages(obj);
|
|
|
|
- if (ret != 0)
|
|
|
|
|
|
+ ret = i915_gem_object_get_pages_or_evict(obj);
|
|
|
|
+ if (ret)
|
|
goto fail_unlock;
|
|
goto fail_unlock;
|
|
|
|
|
|
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
|
|
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
|
|
@@ -1155,28 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
/* Now bind it into the GTT if needed */
|
|
/* Now bind it into the GTT if needed */
|
|
mutex_lock(&dev->struct_mutex);
|
|
mutex_lock(&dev->struct_mutex);
|
|
if (!obj_priv->gtt_space) {
|
|
if (!obj_priv->gtt_space) {
|
|
- ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
|
|
|
|
- if (ret) {
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
- return VM_FAULT_SIGBUS;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
|
|
|
|
- if (ret) {
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
- return VM_FAULT_SIGBUS;
|
|
|
|
- }
|
|
|
|
|
|
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto unlock;
|
|
|
|
|
|
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
|
|
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
|
|
|
|
+
|
|
|
|
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto unlock;
|
|
}
|
|
}
|
|
|
|
|
|
/* Need a new fence register? */
|
|
/* Need a new fence register? */
|
|
if (obj_priv->tiling_mode != I915_TILING_NONE) {
|
|
if (obj_priv->tiling_mode != I915_TILING_NONE) {
|
|
ret = i915_gem_object_get_fence_reg(obj);
|
|
ret = i915_gem_object_get_fence_reg(obj);
|
|
- if (ret) {
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
- return VM_FAULT_SIGBUS;
|
|
|
|
- }
|
|
|
|
|
|
+ if (ret)
|
|
|
|
+ goto unlock;
|
|
}
|
|
}
|
|
|
|
|
|
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
|
|
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
|
|
@@ -1184,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
|
|
|
|
/* Finally, remap it using the new GTT offset */
|
|
/* Finally, remap it using the new GTT offset */
|
|
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
|
|
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
|
|
-
|
|
|
|
|
|
+unlock:
|
|
mutex_unlock(&dev->struct_mutex);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
switch (ret) {
|
|
switch (ret) {
|
|
|
|
+ case 0:
|
|
|
|
+ case -ERESTARTSYS:
|
|
|
|
+ return VM_FAULT_NOPAGE;
|
|
case -ENOMEM:
|
|
case -ENOMEM:
|
|
case -EAGAIN:
|
|
case -EAGAIN:
|
|
return VM_FAULT_OOM;
|
|
return VM_FAULT_OOM;
|
|
- case -EFAULT:
|
|
|
|
- case -EINVAL:
|
|
|
|
- return VM_FAULT_SIGBUS;
|
|
|
|
default:
|
|
default:
|
|
- return VM_FAULT_NOPAGE;
|
|
|
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1388,6 +1426,14 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
|
|
|
|
|
|
obj_priv = obj->driver_private;
|
|
obj_priv = obj->driver_private;
|
|
|
|
|
|
|
|
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
|
|
|
|
+ DRM_ERROR("Attempting to mmap a purgeable buffer\n");
|
|
|
|
+ drm_gem_object_unreference(obj);
|
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
if (!obj_priv->mmap_offset) {
|
|
if (!obj_priv->mmap_offset) {
|
|
ret = i915_gem_create_mmap_offset(obj);
|
|
ret = i915_gem_create_mmap_offset(obj);
|
|
if (ret) {
|
|
if (ret) {
|
|
@@ -1399,22 +1445,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
|
|
|
|
|
|
args->offset = obj_priv->mmap_offset;
|
|
args->offset = obj_priv->mmap_offset;
|
|
|
|
|
|
- obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
|
|
|
|
-
|
|
|
|
- /* Make sure the alignment is correct for fence regs etc */
|
|
|
|
- if (obj_priv->agp_mem &&
|
|
|
|
- (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
|
|
|
|
- drm_gem_object_unreference(obj);
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Pull it into the GTT so that we have a page list (makes the
|
|
* Pull it into the GTT so that we have a page list (makes the
|
|
* initial fault faster and any subsequent flushing possible).
|
|
* initial fault faster and any subsequent flushing possible).
|
|
*/
|
|
*/
|
|
if (!obj_priv->agp_mem) {
|
|
if (!obj_priv->agp_mem) {
|
|
- ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
|
|
|
|
|
|
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
|
|
if (ret) {
|
|
if (ret) {
|
|
drm_gem_object_unreference(obj);
|
|
drm_gem_object_unreference(obj);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
@@ -1437,6 +1473,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
|
|
int i;
|
|
int i;
|
|
|
|
|
|
BUG_ON(obj_priv->pages_refcount == 0);
|
|
BUG_ON(obj_priv->pages_refcount == 0);
|
|
|
|
+ BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
|
|
|
|
|
|
if (--obj_priv->pages_refcount != 0)
|
|
if (--obj_priv->pages_refcount != 0)
|
|
return;
|
|
return;
|
|
@@ -1444,13 +1481,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
|
|
if (obj_priv->tiling_mode != I915_TILING_NONE)
|
|
if (obj_priv->tiling_mode != I915_TILING_NONE)
|
|
i915_gem_object_save_bit_17_swizzle(obj);
|
|
i915_gem_object_save_bit_17_swizzle(obj);
|
|
|
|
|
|
- for (i = 0; i < page_count; i++)
|
|
|
|
- if (obj_priv->pages[i] != NULL) {
|
|
|
|
- if (obj_priv->dirty)
|
|
|
|
- set_page_dirty(obj_priv->pages[i]);
|
|
|
|
|
|
+ if (obj_priv->madv == I915_MADV_DONTNEED)
|
|
|
|
+ obj_priv->dirty = 0;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < page_count; i++) {
|
|
|
|
+ if (obj_priv->pages[i] == NULL)
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ if (obj_priv->dirty)
|
|
|
|
+ set_page_dirty(obj_priv->pages[i]);
|
|
|
|
+
|
|
|
|
+ if (obj_priv->madv == I915_MADV_WILLNEED)
|
|
mark_page_accessed(obj_priv->pages[i]);
|
|
mark_page_accessed(obj_priv->pages[i]);
|
|
- page_cache_release(obj_priv->pages[i]);
|
|
|
|
- }
|
|
|
|
|
|
+
|
|
|
|
+ page_cache_release(obj_priv->pages[i]);
|
|
|
|
+ }
|
|
obj_priv->dirty = 0;
|
|
obj_priv->dirty = 0;
|
|
|
|
|
|
drm_free_large(obj_priv->pages);
|
|
drm_free_large(obj_priv->pages);
|
|
@@ -1489,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
|
|
obj_priv->last_rendering_seqno = 0;
|
|
obj_priv->last_rendering_seqno = 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* Immediately discard the backing storage */
|
|
|
|
+static void
|
|
|
|
+i915_gem_object_truncate(struct drm_gem_object *obj)
|
|
|
|
+{
|
|
|
|
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
+ struct inode *inode;
|
|
|
|
+
|
|
|
|
+ inode = obj->filp->f_path.dentry->d_inode;
|
|
|
|
+ if (inode->i_op->truncate)
|
|
|
|
+ inode->i_op->truncate (inode);
|
|
|
|
+
|
|
|
|
+ obj_priv->madv = __I915_MADV_PURGED;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int
|
|
|
|
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
|
|
|
|
+{
|
|
|
|
+ return obj_priv->madv == I915_MADV_DONTNEED;
|
|
|
|
+}
|
|
|
|
+
|
|
static void
|
|
static void
|
|
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
|
|
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
|
|
{
|
|
{
|
|
@@ -1577,15 +1642,24 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
|
|
|
|
|
|
if ((obj->write_domain & flush_domains) ==
|
|
if ((obj->write_domain & flush_domains) ==
|
|
obj->write_domain) {
|
|
obj->write_domain) {
|
|
|
|
+ uint32_t old_write_domain = obj->write_domain;
|
|
|
|
+
|
|
obj->write_domain = 0;
|
|
obj->write_domain = 0;
|
|
i915_gem_object_move_to_active(obj, seqno);
|
|
i915_gem_object_move_to_active(obj, seqno);
|
|
|
|
+
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ obj->read_domains,
|
|
|
|
+ old_write_domain);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
}
|
|
}
|
|
|
|
|
|
- if (was_empty && !dev_priv->mm.suspended)
|
|
|
|
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
|
|
|
|
|
|
+ if (!dev_priv->mm.suspended) {
|
|
|
|
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
|
|
|
|
+ if (was_empty)
|
|
|
|
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
|
|
|
|
+ }
|
|
return seqno;
|
|
return seqno;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1623,6 +1697,8 @@ i915_gem_retire_request(struct drm_device *dev,
|
|
{
|
|
{
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
|
|
+ trace_i915_gem_request_retire(dev, request->seqno);
|
|
|
|
+
|
|
/* Move any buffers on the active list that are no longer referenced
|
|
/* Move any buffers on the active list that are no longer referenced
|
|
* by the ringbuffer to the flushing/inactive lists as appropriate.
|
|
* by the ringbuffer to the flushing/inactive lists as appropriate.
|
|
*/
|
|
*/
|
|
@@ -1671,7 +1747,7 @@ out:
|
|
/**
|
|
/**
|
|
* Returns true if seq1 is later than seq2.
|
|
* Returns true if seq1 is later than seq2.
|
|
*/
|
|
*/
|
|
-static int
|
|
|
|
|
|
+bool
|
|
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
|
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
|
{
|
|
{
|
|
return (int32_t)(seq1 - seq2) >= 0;
|
|
return (int32_t)(seq1 - seq2) >= 0;
|
|
@@ -1709,7 +1785,7 @@ i915_gem_retire_requests(struct drm_device *dev)
|
|
retiring_seqno = request->seqno;
|
|
retiring_seqno = request->seqno;
|
|
|
|
|
|
if (i915_seqno_passed(seqno, retiring_seqno) ||
|
|
if (i915_seqno_passed(seqno, retiring_seqno) ||
|
|
- dev_priv->mm.wedged) {
|
|
|
|
|
|
+ atomic_read(&dev_priv->mm.wedged)) {
|
|
i915_gem_retire_request(dev, request);
|
|
i915_gem_retire_request(dev, request);
|
|
|
|
|
|
list_del(&request->list);
|
|
list_del(&request->list);
|
|
@@ -1751,6 +1827,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
|
|
|
|
|
|
BUG_ON(seqno == 0);
|
|
BUG_ON(seqno == 0);
|
|
|
|
|
|
|
|
+ if (atomic_read(&dev_priv->mm.wedged))
|
|
|
|
+ return -EIO;
|
|
|
|
+
|
|
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
|
|
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
|
|
if (IS_IGDNG(dev))
|
|
if (IS_IGDNG(dev))
|
|
ier = I915_READ(DEIER) | I915_READ(GTIER);
|
|
ier = I915_READ(DEIER) | I915_READ(GTIER);
|
|
@@ -1763,16 +1842,20 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
|
|
i915_driver_irq_postinstall(dev);
|
|
i915_driver_irq_postinstall(dev);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ trace_i915_gem_request_wait_begin(dev, seqno);
|
|
|
|
+
|
|
dev_priv->mm.waiting_gem_seqno = seqno;
|
|
dev_priv->mm.waiting_gem_seqno = seqno;
|
|
i915_user_irq_get(dev);
|
|
i915_user_irq_get(dev);
|
|
ret = wait_event_interruptible(dev_priv->irq_queue,
|
|
ret = wait_event_interruptible(dev_priv->irq_queue,
|
|
i915_seqno_passed(i915_get_gem_seqno(dev),
|
|
i915_seqno_passed(i915_get_gem_seqno(dev),
|
|
seqno) ||
|
|
seqno) ||
|
|
- dev_priv->mm.wedged);
|
|
|
|
|
|
+ atomic_read(&dev_priv->mm.wedged));
|
|
i915_user_irq_put(dev);
|
|
i915_user_irq_put(dev);
|
|
dev_priv->mm.waiting_gem_seqno = 0;
|
|
dev_priv->mm.waiting_gem_seqno = 0;
|
|
|
|
+
|
|
|
|
+ trace_i915_gem_request_wait_end(dev, seqno);
|
|
}
|
|
}
|
|
- if (dev_priv->mm.wedged)
|
|
|
|
|
|
+ if (atomic_read(&dev_priv->mm.wedged))
|
|
ret = -EIO;
|
|
ret = -EIO;
|
|
|
|
|
|
if (ret && ret != -ERESTARTSYS)
|
|
if (ret && ret != -ERESTARTSYS)
|
|
@@ -1803,6 +1886,8 @@ i915_gem_flush(struct drm_device *dev,
|
|
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
|
|
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
|
|
invalidate_domains, flush_domains);
|
|
invalidate_domains, flush_domains);
|
|
#endif
|
|
#endif
|
|
|
|
+ trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
|
|
|
|
+ invalidate_domains, flush_domains);
|
|
|
|
|
|
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
|
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
|
drm_agp_chipset_flush(dev);
|
|
drm_agp_chipset_flush(dev);
|
|
@@ -1915,6 +2000,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* blow away mappings if mapped through GTT */
|
|
|
|
+ i915_gem_release_mmap(obj);
|
|
|
|
+
|
|
|
|
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
|
|
|
|
+ i915_gem_clear_fence_reg(obj);
|
|
|
|
+
|
|
/* Move the object to the CPU domain to ensure that
|
|
/* Move the object to the CPU domain to ensure that
|
|
* any possible CPU writes while it's not in the GTT
|
|
* any possible CPU writes while it's not in the GTT
|
|
* are flushed when we go to remap it. This will
|
|
* are flushed when we go to remap it. This will
|
|
@@ -1928,21 +2019,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ BUG_ON(obj_priv->active);
|
|
|
|
+
|
|
if (obj_priv->agp_mem != NULL) {
|
|
if (obj_priv->agp_mem != NULL) {
|
|
drm_unbind_agp(obj_priv->agp_mem);
|
|
drm_unbind_agp(obj_priv->agp_mem);
|
|
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
|
|
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
|
|
obj_priv->agp_mem = NULL;
|
|
obj_priv->agp_mem = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
- BUG_ON(obj_priv->active);
|
|
|
|
-
|
|
|
|
- /* blow away mappings if mapped through GTT */
|
|
|
|
- i915_gem_release_mmap(obj);
|
|
|
|
-
|
|
|
|
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
|
|
|
|
- i915_gem_clear_fence_reg(obj);
|
|
|
|
-
|
|
|
|
i915_gem_object_put_pages(obj);
|
|
i915_gem_object_put_pages(obj);
|
|
|
|
+ BUG_ON(obj_priv->pages_refcount);
|
|
|
|
|
|
if (obj_priv->gtt_space) {
|
|
if (obj_priv->gtt_space) {
|
|
atomic_dec(&dev->gtt_count);
|
|
atomic_dec(&dev->gtt_count);
|
|
@@ -1956,40 +2042,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
|
if (!list_empty(&obj_priv->list))
|
|
if (!list_empty(&obj_priv->list))
|
|
list_del_init(&obj_priv->list);
|
|
list_del_init(&obj_priv->list);
|
|
|
|
|
|
|
|
+ if (i915_gem_object_is_purgeable(obj_priv))
|
|
|
|
+ i915_gem_object_truncate(obj);
|
|
|
|
+
|
|
|
|
+ trace_i915_gem_object_unbind(obj);
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static struct drm_gem_object *
|
|
|
|
+i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
|
|
|
|
+{
|
|
|
|
+ drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
+ struct drm_i915_gem_object *obj_priv;
|
|
|
|
+ struct drm_gem_object *best = NULL;
|
|
|
|
+ struct drm_gem_object *first = NULL;
|
|
|
|
+
|
|
|
|
+ /* Try to find the smallest clean object */
|
|
|
|
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
|
|
|
+ struct drm_gem_object *obj = obj_priv->obj;
|
|
|
|
+ if (obj->size >= min_size) {
|
|
|
|
+ if ((!obj_priv->dirty ||
|
|
|
|
+ i915_gem_object_is_purgeable(obj_priv)) &&
|
|
|
|
+ (!best || obj->size < best->size)) {
|
|
|
|
+ best = obj;
|
|
|
|
+ if (best->size == min_size)
|
|
|
|
+ return best;
|
|
|
|
+ }
|
|
|
|
+ if (!first)
|
|
|
|
+ first = obj;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return best ? best : first;
|
|
|
|
+}
|
|
|
|
+
|
|
static int
|
|
static int
|
|
-i915_gem_evict_something(struct drm_device *dev)
|
|
|
|
|
|
+i915_gem_evict_everything(struct drm_device *dev)
|
|
|
|
+{
|
|
|
|
+ drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
+ uint32_t seqno;
|
|
|
|
+ int ret;
|
|
|
|
+ bool lists_empty;
|
|
|
|
+
|
|
|
|
+ spin_lock(&dev_priv->mm.active_list_lock);
|
|
|
|
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
|
|
|
+ list_empty(&dev_priv->mm.flushing_list) &&
|
|
|
|
+ list_empty(&dev_priv->mm.active_list));
|
|
|
|
+ spin_unlock(&dev_priv->mm.active_list_lock);
|
|
|
|
+
|
|
|
|
+ if (lists_empty)
|
|
|
|
+ return -ENOSPC;
|
|
|
|
+
|
|
|
|
+ /* Flush everything (on to the inactive lists) and evict */
|
|
|
|
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
|
|
|
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
|
|
|
|
+ if (seqno == 0)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ ret = i915_wait_request(dev, seqno);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ ret = i915_gem_evict_from_inactive_list(dev);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ spin_lock(&dev_priv->mm.active_list_lock);
|
|
|
|
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
|
|
|
+ list_empty(&dev_priv->mm.flushing_list) &&
|
|
|
|
+ list_empty(&dev_priv->mm.active_list));
|
|
|
|
+ spin_unlock(&dev_priv->mm.active_list_lock);
|
|
|
|
+ BUG_ON(!lists_empty);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+i915_gem_evict_something(struct drm_device *dev, int min_size)
|
|
{
|
|
{
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
struct drm_gem_object *obj;
|
|
struct drm_gem_object *obj;
|
|
- struct drm_i915_gem_object *obj_priv;
|
|
|
|
- int ret = 0;
|
|
|
|
|
|
+ int ret;
|
|
|
|
|
|
for (;;) {
|
|
for (;;) {
|
|
|
|
+ i915_gem_retire_requests(dev);
|
|
|
|
+
|
|
/* If there's an inactive buffer available now, grab it
|
|
/* If there's an inactive buffer available now, grab it
|
|
* and be done.
|
|
* and be done.
|
|
*/
|
|
*/
|
|
- if (!list_empty(&dev_priv->mm.inactive_list)) {
|
|
|
|
- obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
|
|
|
|
- struct drm_i915_gem_object,
|
|
|
|
- list);
|
|
|
|
- obj = obj_priv->obj;
|
|
|
|
- BUG_ON(obj_priv->pin_count != 0);
|
|
|
|
|
|
+ obj = i915_gem_find_inactive_object(dev, min_size);
|
|
|
|
+ if (obj) {
|
|
|
|
+ struct drm_i915_gem_object *obj_priv;
|
|
|
|
+
|
|
#if WATCH_LRU
|
|
#if WATCH_LRU
|
|
DRM_INFO("%s: evicting %p\n", __func__, obj);
|
|
DRM_INFO("%s: evicting %p\n", __func__, obj);
|
|
#endif
|
|
#endif
|
|
|
|
+ obj_priv = obj->driver_private;
|
|
|
|
+ BUG_ON(obj_priv->pin_count != 0);
|
|
BUG_ON(obj_priv->active);
|
|
BUG_ON(obj_priv->active);
|
|
|
|
|
|
/* Wait on the rendering and unbind the buffer. */
|
|
/* Wait on the rendering and unbind the buffer. */
|
|
- ret = i915_gem_object_unbind(obj);
|
|
|
|
- break;
|
|
|
|
|
|
+ return i915_gem_object_unbind(obj);
|
|
}
|
|
}
|
|
|
|
|
|
/* If we didn't get anything, but the ring is still processing
|
|
/* If we didn't get anything, but the ring is still processing
|
|
- * things, wait for one of those things to finish and hopefully
|
|
|
|
- * leave us a buffer to evict.
|
|
|
|
|
|
+ * things, wait for the next to finish and hopefully leave us
|
|
|
|
+ * a buffer to evict.
|
|
*/
|
|
*/
|
|
if (!list_empty(&dev_priv->mm.request_list)) {
|
|
if (!list_empty(&dev_priv->mm.request_list)) {
|
|
struct drm_i915_gem_request *request;
|
|
struct drm_i915_gem_request *request;
|
|
@@ -2000,16 +2159,9 @@ i915_gem_evict_something(struct drm_device *dev)
|
|
|
|
|
|
ret = i915_wait_request(dev, request->seqno);
|
|
ret = i915_wait_request(dev, request->seqno);
|
|
if (ret)
|
|
if (ret)
|
|
- break;
|
|
|
|
|
|
+ return ret;
|
|
|
|
|
|
- /* if waiting caused an object to become inactive,
|
|
|
|
- * then loop around and wait for it. Otherwise, we
|
|
|
|
- * assume that waiting freed and unbound something,
|
|
|
|
- * so there should now be some space in the GTT
|
|
|
|
- */
|
|
|
|
- if (!list_empty(&dev_priv->mm.inactive_list))
|
|
|
|
- continue;
|
|
|
|
- break;
|
|
|
|
|
|
+ continue;
|
|
}
|
|
}
|
|
|
|
|
|
/* If we didn't have anything on the request list but there
|
|
/* If we didn't have anything on the request list but there
|
|
@@ -2018,46 +2170,44 @@ i915_gem_evict_something(struct drm_device *dev)
|
|
* will get moved to inactive.
|
|
* will get moved to inactive.
|
|
*/
|
|
*/
|
|
if (!list_empty(&dev_priv->mm.flushing_list)) {
|
|
if (!list_empty(&dev_priv->mm.flushing_list)) {
|
|
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
|
|
|
|
- struct drm_i915_gem_object,
|
|
|
|
- list);
|
|
|
|
- obj = obj_priv->obj;
|
|
|
|
|
|
+ struct drm_i915_gem_object *obj_priv;
|
|
|
|
|
|
- i915_gem_flush(dev,
|
|
|
|
- obj->write_domain,
|
|
|
|
- obj->write_domain);
|
|
|
|
- i915_add_request(dev, NULL, obj->write_domain);
|
|
|
|
|
|
+ /* Find an object that we can immediately reuse */
|
|
|
|
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
|
|
|
|
+ obj = obj_priv->obj;
|
|
|
|
+ if (obj->size >= min_size)
|
|
|
|
+ break;
|
|
|
|
|
|
- obj = NULL;
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
|
|
+ obj = NULL;
|
|
|
|
+ }
|
|
|
|
|
|
- DRM_ERROR("inactive empty %d request empty %d "
|
|
|
|
- "flushing empty %d\n",
|
|
|
|
- list_empty(&dev_priv->mm.inactive_list),
|
|
|
|
- list_empty(&dev_priv->mm.request_list),
|
|
|
|
- list_empty(&dev_priv->mm.flushing_list));
|
|
|
|
- /* If we didn't do any of the above, there's nothing to be done
|
|
|
|
- * and we just can't fit it in.
|
|
|
|
- */
|
|
|
|
- return -ENOSPC;
|
|
|
|
- }
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
|
|
+ if (obj != NULL) {
|
|
|
|
+ uint32_t seqno;
|
|
|
|
|
|
-static int
|
|
|
|
-i915_gem_evict_everything(struct drm_device *dev)
|
|
|
|
-{
|
|
|
|
- int ret;
|
|
|
|
|
|
+ i915_gem_flush(dev,
|
|
|
|
+ obj->write_domain,
|
|
|
|
+ obj->write_domain);
|
|
|
|
+ seqno = i915_add_request(dev, NULL, obj->write_domain);
|
|
|
|
+ if (seqno == 0)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- for (;;) {
|
|
|
|
- ret = i915_gem_evict_something(dev);
|
|
|
|
- if (ret != 0)
|
|
|
|
- break;
|
|
|
|
|
|
+ ret = i915_wait_request(dev, seqno);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* If we didn't do any of the above, there's no single buffer
|
|
|
|
+ * large enough to swap out for the new one, so just evict
|
|
|
|
+ * everything and start again. (This should be rare.)
|
|
|
|
+ */
|
|
|
|
+ if (!list_empty (&dev_priv->mm.inactive_list))
|
|
|
|
+ return i915_gem_evict_from_inactive_list(dev);
|
|
|
|
+ else
|
|
|
|
+ return i915_gem_evict_everything(dev);
|
|
}
|
|
}
|
|
- if (ret == -ENOSPC)
|
|
|
|
- return 0;
|
|
|
|
- return ret;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
int
|
|
int
|
|
@@ -2080,7 +2230,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
|
|
BUG_ON(obj_priv->pages != NULL);
|
|
BUG_ON(obj_priv->pages != NULL);
|
|
obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
|
|
obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
|
|
if (obj_priv->pages == NULL) {
|
|
if (obj_priv->pages == NULL) {
|
|
- DRM_ERROR("Faled to allocate page list\n");
|
|
|
|
obj_priv->pages_refcount--;
|
|
obj_priv->pages_refcount--;
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
@@ -2091,7 +2240,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
|
|
page = read_mapping_page(mapping, i, NULL);
|
|
page = read_mapping_page(mapping, i, NULL);
|
|
if (IS_ERR(page)) {
|
|
if (IS_ERR(page)) {
|
|
ret = PTR_ERR(page);
|
|
ret = PTR_ERR(page);
|
|
- DRM_ERROR("read_mapping_page failed: %d\n", ret);
|
|
|
|
i915_gem_object_put_pages(obj);
|
|
i915_gem_object_put_pages(obj);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -2328,6 +2476,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
|
|
else
|
|
else
|
|
i830_write_fence_reg(reg);
|
|
i830_write_fence_reg(reg);
|
|
|
|
|
|
|
|
+ trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2410,10 +2560,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
struct drm_mm_node *free_space;
|
|
struct drm_mm_node *free_space;
|
|
- int page_count, ret;
|
|
|
|
|
|
+ bool retry_alloc = false;
|
|
|
|
+ int ret;
|
|
|
|
|
|
if (dev_priv->mm.suspended)
|
|
if (dev_priv->mm.suspended)
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
|
|
+
|
|
|
|
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
|
|
|
|
+ DRM_ERROR("Attempting to bind a purgeable object\n");
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
if (alignment == 0)
|
|
if (alignment == 0)
|
|
alignment = i915_gem_get_gtt_alignment(obj);
|
|
alignment = i915_gem_get_gtt_alignment(obj);
|
|
if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
|
|
if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
|
|
@@ -2433,30 +2590,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
if (obj_priv->gtt_space == NULL) {
|
|
if (obj_priv->gtt_space == NULL) {
|
|
- bool lists_empty;
|
|
|
|
-
|
|
|
|
/* If the gtt is empty and we're still having trouble
|
|
/* If the gtt is empty and we're still having trouble
|
|
* fitting our object in, we're out of memory.
|
|
* fitting our object in, we're out of memory.
|
|
*/
|
|
*/
|
|
#if WATCH_LRU
|
|
#if WATCH_LRU
|
|
DRM_INFO("%s: GTT full, evicting something\n", __func__);
|
|
DRM_INFO("%s: GTT full, evicting something\n", __func__);
|
|
#endif
|
|
#endif
|
|
- spin_lock(&dev_priv->mm.active_list_lock);
|
|
|
|
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
|
|
|
- list_empty(&dev_priv->mm.flushing_list) &&
|
|
|
|
- list_empty(&dev_priv->mm.active_list));
|
|
|
|
- spin_unlock(&dev_priv->mm.active_list_lock);
|
|
|
|
- if (lists_empty) {
|
|
|
|
- DRM_ERROR("GTT full, but LRU list empty\n");
|
|
|
|
- return -ENOSPC;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- ret = i915_gem_evict_something(dev);
|
|
|
|
- if (ret != 0) {
|
|
|
|
- if (ret != -ERESTARTSYS)
|
|
|
|
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
|
|
|
|
|
|
+ ret = i915_gem_evict_something(dev, obj->size);
|
|
|
|
+ if (ret)
|
|
return ret;
|
|
return ret;
|
|
- }
|
|
|
|
|
|
+
|
|
goto search_free;
|
|
goto search_free;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2464,27 +2607,56 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|
DRM_INFO("Binding object of size %zd at 0x%08x\n",
|
|
DRM_INFO("Binding object of size %zd at 0x%08x\n",
|
|
obj->size, obj_priv->gtt_offset);
|
|
obj->size, obj_priv->gtt_offset);
|
|
#endif
|
|
#endif
|
|
|
|
+ if (retry_alloc) {
|
|
|
|
+ i915_gem_object_set_page_gfp_mask (obj,
|
|
|
|
+ i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
|
|
|
|
+ }
|
|
ret = i915_gem_object_get_pages(obj);
|
|
ret = i915_gem_object_get_pages(obj);
|
|
|
|
+ if (retry_alloc) {
|
|
|
|
+ i915_gem_object_set_page_gfp_mask (obj,
|
|
|
|
+ i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
|
|
|
|
+ }
|
|
if (ret) {
|
|
if (ret) {
|
|
drm_mm_put_block(obj_priv->gtt_space);
|
|
drm_mm_put_block(obj_priv->gtt_space);
|
|
obj_priv->gtt_space = NULL;
|
|
obj_priv->gtt_space = NULL;
|
|
|
|
+
|
|
|
|
+ if (ret == -ENOMEM) {
|
|
|
|
+ /* first try to clear up some space from the GTT */
|
|
|
|
+ ret = i915_gem_evict_something(dev, obj->size);
|
|
|
|
+ if (ret) {
|
|
|
|
+ /* now try to shrink everyone else */
|
|
|
|
+ if (! retry_alloc) {
|
|
|
|
+ retry_alloc = true;
|
|
|
|
+ goto search_free;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ goto search_free;
|
|
|
|
+ }
|
|
|
|
+
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
- page_count = obj->size / PAGE_SIZE;
|
|
|
|
/* Create an AGP memory structure pointing at our pages, and bind it
|
|
/* Create an AGP memory structure pointing at our pages, and bind it
|
|
* into the GTT.
|
|
* into the GTT.
|
|
*/
|
|
*/
|
|
obj_priv->agp_mem = drm_agp_bind_pages(dev,
|
|
obj_priv->agp_mem = drm_agp_bind_pages(dev,
|
|
obj_priv->pages,
|
|
obj_priv->pages,
|
|
- page_count,
|
|
|
|
|
|
+ obj->size >> PAGE_SHIFT,
|
|
obj_priv->gtt_offset,
|
|
obj_priv->gtt_offset,
|
|
obj_priv->agp_type);
|
|
obj_priv->agp_type);
|
|
if (obj_priv->agp_mem == NULL) {
|
|
if (obj_priv->agp_mem == NULL) {
|
|
i915_gem_object_put_pages(obj);
|
|
i915_gem_object_put_pages(obj);
|
|
drm_mm_put_block(obj_priv->gtt_space);
|
|
drm_mm_put_block(obj_priv->gtt_space);
|
|
obj_priv->gtt_space = NULL;
|
|
obj_priv->gtt_space = NULL;
|
|
- return -ENOMEM;
|
|
|
|
|
|
+
|
|
|
|
+ ret = i915_gem_evict_something(dev, obj->size);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ goto search_free;
|
|
}
|
|
}
|
|
atomic_inc(&dev->gtt_count);
|
|
atomic_inc(&dev->gtt_count);
|
|
atomic_add(obj->size, &dev->gtt_memory);
|
|
atomic_add(obj->size, &dev->gtt_memory);
|
|
@@ -2496,6 +2668,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
|
|
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
|
|
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
|
|
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
|
|
|
|
|
|
|
|
+ trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2511,15 +2685,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
|
|
if (obj_priv->pages == NULL)
|
|
if (obj_priv->pages == NULL)
|
|
return;
|
|
return;
|
|
|
|
|
|
- /* XXX: The 865 in particular appears to be weird in how it handles
|
|
|
|
- * cache flushing. We haven't figured it out, but the
|
|
|
|
- * clflush+agp_chipset_flush doesn't appear to successfully get the
|
|
|
|
- * data visible to the PGU, while wbinvd + agp_chipset_flush does.
|
|
|
|
- */
|
|
|
|
- if (IS_I865G(obj->dev)) {
|
|
|
|
- wbinvd();
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
|
|
+ trace_i915_gem_object_clflush(obj);
|
|
|
|
|
|
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
|
|
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
|
|
}
|
|
}
|
|
@@ -2530,21 +2696,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
|
|
{
|
|
{
|
|
struct drm_device *dev = obj->dev;
|
|
struct drm_device *dev = obj->dev;
|
|
uint32_t seqno;
|
|
uint32_t seqno;
|
|
|
|
+ uint32_t old_write_domain;
|
|
|
|
|
|
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
|
|
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
|
|
return;
|
|
return;
|
|
|
|
|
|
/* Queue the GPU write cache flushing we need. */
|
|
/* Queue the GPU write cache flushing we need. */
|
|
|
|
+ old_write_domain = obj->write_domain;
|
|
i915_gem_flush(dev, 0, obj->write_domain);
|
|
i915_gem_flush(dev, 0, obj->write_domain);
|
|
seqno = i915_add_request(dev, NULL, obj->write_domain);
|
|
seqno = i915_add_request(dev, NULL, obj->write_domain);
|
|
obj->write_domain = 0;
|
|
obj->write_domain = 0;
|
|
i915_gem_object_move_to_active(obj, seqno);
|
|
i915_gem_object_move_to_active(obj, seqno);
|
|
|
|
+
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ obj->read_domains,
|
|
|
|
+ old_write_domain);
|
|
}
|
|
}
|
|
|
|
|
|
/** Flushes the GTT write domain for the object if it's dirty. */
|
|
/** Flushes the GTT write domain for the object if it's dirty. */
|
|
static void
|
|
static void
|
|
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
|
|
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
|
|
{
|
|
{
|
|
|
|
+ uint32_t old_write_domain;
|
|
|
|
+
|
|
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
|
|
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -2552,7 +2726,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
|
|
* to it immediately go to main memory as far as we know, so there's
|
|
* to it immediately go to main memory as far as we know, so there's
|
|
* no chipset flush. It also doesn't land in render cache.
|
|
* no chipset flush. It also doesn't land in render cache.
|
|
*/
|
|
*/
|
|
|
|
+ old_write_domain = obj->write_domain;
|
|
obj->write_domain = 0;
|
|
obj->write_domain = 0;
|
|
|
|
+
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ obj->read_domains,
|
|
|
|
+ old_write_domain);
|
|
}
|
|
}
|
|
|
|
|
|
/** Flushes the CPU write domain for the object if it's dirty. */
|
|
/** Flushes the CPU write domain for the object if it's dirty. */
|
|
@@ -2560,13 +2739,19 @@ static void
|
|
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
|
|
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
|
|
{
|
|
{
|
|
struct drm_device *dev = obj->dev;
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
+ uint32_t old_write_domain;
|
|
|
|
|
|
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
|
|
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
|
|
return;
|
|
return;
|
|
|
|
|
|
i915_gem_clflush_object(obj);
|
|
i915_gem_clflush_object(obj);
|
|
drm_agp_chipset_flush(dev);
|
|
drm_agp_chipset_flush(dev);
|
|
|
|
+ old_write_domain = obj->write_domain;
|
|
obj->write_domain = 0;
|
|
obj->write_domain = 0;
|
|
|
|
+
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ obj->read_domains,
|
|
|
|
+ old_write_domain);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -2579,6 +2764,7 @@ int
|
|
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
|
|
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
|
|
{
|
|
{
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
+ uint32_t old_write_domain, old_read_domains;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
/* Not valid to be called on unbound objects. */
|
|
/* Not valid to be called on unbound objects. */
|
|
@@ -2591,6 +2777,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
|
|
if (ret != 0)
|
|
if (ret != 0)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
|
|
+ old_write_domain = obj->write_domain;
|
|
|
|
+ old_read_domains = obj->read_domains;
|
|
|
|
+
|
|
/* If we're writing through the GTT domain, then CPU and GPU caches
|
|
/* If we're writing through the GTT domain, then CPU and GPU caches
|
|
* will need to be invalidated at next use.
|
|
* will need to be invalidated at next use.
|
|
*/
|
|
*/
|
|
@@ -2609,6 +2798,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
|
|
obj_priv->dirty = 1;
|
|
obj_priv->dirty = 1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ old_read_domains,
|
|
|
|
+ old_write_domain);
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2621,6 +2814,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
|
|
static int
|
|
static int
|
|
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
|
|
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
|
|
{
|
|
{
|
|
|
|
+ uint32_t old_write_domain, old_read_domains;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
i915_gem_object_flush_gpu_write_domain(obj);
|
|
i915_gem_object_flush_gpu_write_domain(obj);
|
|
@@ -2636,6 +2830,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
|
|
*/
|
|
*/
|
|
i915_gem_object_set_to_full_cpu_read_domain(obj);
|
|
i915_gem_object_set_to_full_cpu_read_domain(obj);
|
|
|
|
|
|
|
|
+ old_write_domain = obj->write_domain;
|
|
|
|
+ old_read_domains = obj->read_domains;
|
|
|
|
+
|
|
/* Flush the CPU cache if it's still invalid. */
|
|
/* Flush the CPU cache if it's still invalid. */
|
|
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
|
|
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
|
|
i915_gem_clflush_object(obj);
|
|
i915_gem_clflush_object(obj);
|
|
@@ -2656,6 +2853,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
|
|
obj->write_domain = I915_GEM_DOMAIN_CPU;
|
|
obj->write_domain = I915_GEM_DOMAIN_CPU;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ old_read_domains,
|
|
|
|
+ old_write_domain);
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2777,6 +2978,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
uint32_t invalidate_domains = 0;
|
|
uint32_t invalidate_domains = 0;
|
|
uint32_t flush_domains = 0;
|
|
uint32_t flush_domains = 0;
|
|
|
|
+ uint32_t old_read_domains;
|
|
|
|
|
|
BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
|
|
BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
|
|
BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
|
|
BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
|
|
@@ -2823,6 +3025,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
|
|
i915_gem_clflush_object(obj);
|
|
i915_gem_clflush_object(obj);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ old_read_domains = obj->read_domains;
|
|
|
|
+
|
|
/* The actual obj->write_domain will be updated with
|
|
/* The actual obj->write_domain will be updated with
|
|
* pending_write_domain after we emit the accumulated flush for all
|
|
* pending_write_domain after we emit the accumulated flush for all
|
|
* of our domain changes in execbuffers (which clears objects'
|
|
* of our domain changes in execbuffers (which clears objects'
|
|
@@ -2841,6 +3045,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
|
|
obj->read_domains, obj->write_domain,
|
|
obj->read_domains, obj->write_domain,
|
|
dev->invalidate_domains, dev->flush_domains);
|
|
dev->invalidate_domains, dev->flush_domains);
|
|
#endif
|
|
#endif
|
|
|
|
+
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ old_read_domains,
|
|
|
|
+ obj->write_domain);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -2893,6 +3101,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
|
|
uint64_t offset, uint64_t size)
|
|
uint64_t offset, uint64_t size)
|
|
{
|
|
{
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
+ uint32_t old_read_domains;
|
|
int i, ret;
|
|
int i, ret;
|
|
|
|
|
|
if (offset == 0 && size == obj->size)
|
|
if (offset == 0 && size == obj->size)
|
|
@@ -2939,8 +3148,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
|
|
*/
|
|
*/
|
|
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
|
|
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
|
|
|
|
|
|
|
|
+ old_read_domains = obj->read_domains;
|
|
obj->read_domains |= I915_GEM_DOMAIN_CPU;
|
|
obj->read_domains |= I915_GEM_DOMAIN_CPU;
|
|
|
|
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ old_read_domains,
|
|
|
|
+ obj->write_domain);
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2984,6 +3198,21 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|
}
|
|
}
|
|
target_obj_priv = target_obj->driver_private;
|
|
target_obj_priv = target_obj->driver_private;
|
|
|
|
|
|
|
|
+#if WATCH_RELOC
|
|
|
|
+ DRM_INFO("%s: obj %p offset %08x target %d "
|
|
|
|
+ "read %08x write %08x gtt %08x "
|
|
|
|
+ "presumed %08x delta %08x\n",
|
|
|
|
+ __func__,
|
|
|
|
+ obj,
|
|
|
|
+ (int) reloc->offset,
|
|
|
|
+ (int) reloc->target_handle,
|
|
|
|
+ (int) reloc->read_domains,
|
|
|
|
+ (int) reloc->write_domain,
|
|
|
|
+ (int) target_obj_priv->gtt_offset,
|
|
|
|
+ (int) reloc->presumed_offset,
|
|
|
|
+ reloc->delta);
|
|
|
|
+#endif
|
|
|
|
+
|
|
/* The target buffer should have appeared before us in the
|
|
/* The target buffer should have appeared before us in the
|
|
* exec_object list, so it should have a GTT space bound by now.
|
|
* exec_object list, so it should have a GTT space bound by now.
|
|
*/
|
|
*/
|
|
@@ -2995,25 +3224,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- if (reloc->offset > obj->size - 4) {
|
|
|
|
- DRM_ERROR("Relocation beyond object bounds: "
|
|
|
|
- "obj %p target %d offset %d size %d.\n",
|
|
|
|
- obj, reloc->target_handle,
|
|
|
|
- (int) reloc->offset, (int) obj->size);
|
|
|
|
- drm_gem_object_unreference(target_obj);
|
|
|
|
- i915_gem_object_unpin(obj);
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
- if (reloc->offset & 3) {
|
|
|
|
- DRM_ERROR("Relocation not 4-byte aligned: "
|
|
|
|
- "obj %p target %d offset %d.\n",
|
|
|
|
- obj, reloc->target_handle,
|
|
|
|
- (int) reloc->offset);
|
|
|
|
- drm_gem_object_unreference(target_obj);
|
|
|
|
- i915_gem_object_unpin(obj);
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
|
|
+ /* Validate that the target is in a valid r/w GPU domain */
|
|
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
|
|
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
|
|
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
|
|
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
|
|
DRM_ERROR("reloc with read/write CPU domains: "
|
|
DRM_ERROR("reloc with read/write CPU domains: "
|
|
@@ -3027,7 +3238,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|
i915_gem_object_unpin(obj);
|
|
i915_gem_object_unpin(obj);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
-
|
|
|
|
if (reloc->write_domain && target_obj->pending_write_domain &&
|
|
if (reloc->write_domain && target_obj->pending_write_domain &&
|
|
reloc->write_domain != target_obj->pending_write_domain) {
|
|
reloc->write_domain != target_obj->pending_write_domain) {
|
|
DRM_ERROR("Write domain conflict: "
|
|
DRM_ERROR("Write domain conflict: "
|
|
@@ -3042,21 +3252,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
-#if WATCH_RELOC
|
|
|
|
- DRM_INFO("%s: obj %p offset %08x target %d "
|
|
|
|
- "read %08x write %08x gtt %08x "
|
|
|
|
- "presumed %08x delta %08x\n",
|
|
|
|
- __func__,
|
|
|
|
- obj,
|
|
|
|
- (int) reloc->offset,
|
|
|
|
- (int) reloc->target_handle,
|
|
|
|
- (int) reloc->read_domains,
|
|
|
|
- (int) reloc->write_domain,
|
|
|
|
- (int) target_obj_priv->gtt_offset,
|
|
|
|
- (int) reloc->presumed_offset,
|
|
|
|
- reloc->delta);
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
target_obj->pending_read_domains |= reloc->read_domains;
|
|
target_obj->pending_read_domains |= reloc->read_domains;
|
|
target_obj->pending_write_domain |= reloc->write_domain;
|
|
target_obj->pending_write_domain |= reloc->write_domain;
|
|
|
|
|
|
@@ -3068,6 +3263,37 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* Check that the relocation address is valid... */
|
|
|
|
+ if (reloc->offset > obj->size - 4) {
|
|
|
|
+ DRM_ERROR("Relocation beyond object bounds: "
|
|
|
|
+ "obj %p target %d offset %d size %d.\n",
|
|
|
|
+ obj, reloc->target_handle,
|
|
|
|
+ (int) reloc->offset, (int) obj->size);
|
|
|
|
+ drm_gem_object_unreference(target_obj);
|
|
|
|
+ i915_gem_object_unpin(obj);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ if (reloc->offset & 3) {
|
|
|
|
+ DRM_ERROR("Relocation not 4-byte aligned: "
|
|
|
|
+ "obj %p target %d offset %d.\n",
|
|
|
|
+ obj, reloc->target_handle,
|
|
|
|
+ (int) reloc->offset);
|
|
|
|
+ drm_gem_object_unreference(target_obj);
|
|
|
|
+ i915_gem_object_unpin(obj);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* and points to somewhere within the target object. */
|
|
|
|
+ if (reloc->delta >= target_obj->size) {
|
|
|
|
+ DRM_ERROR("Relocation beyond target object bounds: "
|
|
|
|
+ "obj %p target %d delta %d size %d.\n",
|
|
|
|
+ obj, reloc->target_handle,
|
|
|
|
+ (int) reloc->delta, (int) target_obj->size);
|
|
|
|
+ drm_gem_object_unreference(target_obj);
|
|
|
|
+ i915_gem_object_unpin(obj);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
|
|
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
|
|
if (ret != 0) {
|
|
if (ret != 0) {
|
|
drm_gem_object_unreference(target_obj);
|
|
drm_gem_object_unreference(target_obj);
|
|
@@ -3126,6 +3352,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
|
|
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
|
|
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
|
|
exec_len = (uint32_t) exec->batch_len;
|
|
exec_len = (uint32_t) exec->batch_len;
|
|
|
|
|
|
|
|
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
|
|
|
|
+
|
|
count = nbox ? nbox : 1;
|
|
count = nbox ? nbox : 1;
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
for (i = 0; i < count; i++) {
|
|
@@ -3363,7 +3591,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
|
|
|
|
i915_verify_inactive(dev, __FILE__, __LINE__);
|
|
i915_verify_inactive(dev, __FILE__, __LINE__);
|
|
|
|
|
|
- if (dev_priv->mm.wedged) {
|
|
|
|
|
|
+ if (atomic_read(&dev_priv->mm.wedged)) {
|
|
DRM_ERROR("Execbuf while wedged\n");
|
|
DRM_ERROR("Execbuf while wedged\n");
|
|
mutex_unlock(&dev->struct_mutex);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
ret = -EIO;
|
|
ret = -EIO;
|
|
@@ -3421,8 +3649,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
|
|
|
|
/* error other than GTT full, or we've already tried again */
|
|
/* error other than GTT full, or we've already tried again */
|
|
if (ret != -ENOSPC || pin_tries >= 1) {
|
|
if (ret != -ENOSPC || pin_tries >= 1) {
|
|
- if (ret != -ERESTARTSYS)
|
|
|
|
- DRM_ERROR("Failed to pin buffers %d\n", ret);
|
|
|
|
|
|
+ if (ret != -ERESTARTSYS) {
|
|
|
|
+ unsigned long long total_size = 0;
|
|
|
|
+ for (i = 0; i < args->buffer_count; i++)
|
|
|
|
+ total_size += object_list[i]->size;
|
|
|
|
+ DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
|
|
|
|
+ pinned+1, args->buffer_count,
|
|
|
|
+ total_size, ret);
|
|
|
|
+ DRM_ERROR("%d objects [%d pinned], "
|
|
|
|
+ "%d object bytes [%d pinned], "
|
|
|
|
+ "%d/%d gtt bytes\n",
|
|
|
|
+ atomic_read(&dev->object_count),
|
|
|
|
+ atomic_read(&dev->pin_count),
|
|
|
|
+ atomic_read(&dev->object_memory),
|
|
|
|
+ atomic_read(&dev->pin_memory),
|
|
|
|
+ atomic_read(&dev->gtt_memory),
|
|
|
|
+ dev->gtt_total);
|
|
|
|
+ }
|
|
goto err;
|
|
goto err;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3433,7 +3676,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
|
|
|
|
/* evict everyone we can from the aperture */
|
|
/* evict everyone we can from the aperture */
|
|
ret = i915_gem_evict_everything(dev);
|
|
ret = i915_gem_evict_everything(dev);
|
|
- if (ret)
|
|
|
|
|
|
+ if (ret && ret != -ENOSPC)
|
|
goto err;
|
|
goto err;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3489,8 +3732,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|
|
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
struct drm_gem_object *obj = object_list[i];
|
|
struct drm_gem_object *obj = object_list[i];
|
|
|
|
+ uint32_t old_write_domain = obj->write_domain;
|
|
|
|
|
|
obj->write_domain = obj->pending_write_domain;
|
|
obj->write_domain = obj->pending_write_domain;
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ obj->read_domains,
|
|
|
|
+ old_write_domain);
|
|
}
|
|
}
|
|
|
|
|
|
i915_verify_inactive(dev, __FILE__, __LINE__);
|
|
i915_verify_inactive(dev, __FILE__, __LINE__);
|
|
@@ -3607,11 +3854,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
|
i915_verify_inactive(dev, __FILE__, __LINE__);
|
|
i915_verify_inactive(dev, __FILE__, __LINE__);
|
|
if (obj_priv->gtt_space == NULL) {
|
|
if (obj_priv->gtt_space == NULL) {
|
|
ret = i915_gem_object_bind_to_gtt(obj, alignment);
|
|
ret = i915_gem_object_bind_to_gtt(obj, alignment);
|
|
- if (ret != 0) {
|
|
|
|
- if (ret != -EBUSY && ret != -ERESTARTSYS)
|
|
|
|
- DRM_ERROR("Failure to bind: %d\n", ret);
|
|
|
|
|
|
+ if (ret)
|
|
return ret;
|
|
return ret;
|
|
- }
|
|
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
* Pre-965 chips need a fence register set up in order to
|
|
* Pre-965 chips need a fence register set up in order to
|
|
@@ -3691,6 +3935,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
|
}
|
|
}
|
|
obj_priv = obj->driver_private;
|
|
obj_priv = obj->driver_private;
|
|
|
|
|
|
|
|
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
|
|
|
|
+ DRM_ERROR("Attempting to pin a purgeable buffer\n");
|
|
|
|
+ drm_gem_object_unreference(obj);
|
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
|
|
if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
|
|
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
|
|
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
|
|
args->handle);
|
|
args->handle);
|
|
@@ -3803,6 +4054,56 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
|
|
return i915_gem_ring_throttle(dev, file_priv);
|
|
return i915_gem_ring_throttle(dev, file_priv);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+int
|
|
|
|
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
|
|
|
+ struct drm_file *file_priv)
|
|
|
|
+{
|
|
|
|
+ struct drm_i915_gem_madvise *args = data;
|
|
|
|
+ struct drm_gem_object *obj;
|
|
|
|
+ struct drm_i915_gem_object *obj_priv;
|
|
|
|
+
|
|
|
|
+ switch (args->madv) {
|
|
|
|
+ case I915_MADV_DONTNEED:
|
|
|
|
+ case I915_MADV_WILLNEED:
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
+ if (obj == NULL) {
|
|
|
|
+ DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
|
|
|
|
+ args->handle);
|
|
|
|
+ return -EBADF;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mutex_lock(&dev->struct_mutex);
|
|
|
|
+ obj_priv = obj->driver_private;
|
|
|
|
+
|
|
|
|
+ if (obj_priv->pin_count) {
|
|
|
|
+ drm_gem_object_unreference(obj);
|
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
|
+
|
|
|
|
+ DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (obj_priv->madv != __I915_MADV_PURGED)
|
|
|
|
+ obj_priv->madv = args->madv;
|
|
|
|
+
|
|
|
|
+ /* if the object is no longer bound, discard its backing storage */
|
|
|
|
+ if (i915_gem_object_is_purgeable(obj_priv) &&
|
|
|
|
+ obj_priv->gtt_space == NULL)
|
|
|
|
+ i915_gem_object_truncate(obj);
|
|
|
|
+
|
|
|
|
+ args->retained = obj_priv->madv != __I915_MADV_PURGED;
|
|
|
|
+
|
|
|
|
+ drm_gem_object_unreference(obj);
|
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
int i915_gem_init_object(struct drm_gem_object *obj)
|
|
int i915_gem_init_object(struct drm_gem_object *obj)
|
|
{
|
|
{
|
|
struct drm_i915_gem_object *obj_priv;
|
|
struct drm_i915_gem_object *obj_priv;
|
|
@@ -3827,6 +4128,9 @@ int i915_gem_init_object(struct drm_gem_object *obj)
|
|
obj_priv->fence_reg = I915_FENCE_REG_NONE;
|
|
obj_priv->fence_reg = I915_FENCE_REG_NONE;
|
|
INIT_LIST_HEAD(&obj_priv->list);
|
|
INIT_LIST_HEAD(&obj_priv->list);
|
|
INIT_LIST_HEAD(&obj_priv->fence_list);
|
|
INIT_LIST_HEAD(&obj_priv->fence_list);
|
|
|
|
+ obj_priv->madv = I915_MADV_WILLNEED;
|
|
|
|
+
|
|
|
|
+ trace_i915_gem_object_create(obj);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -3836,6 +4140,8 @@ void i915_gem_free_object(struct drm_gem_object *obj)
|
|
struct drm_device *dev = obj->dev;
|
|
struct drm_device *dev = obj->dev;
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
|
|
|
|
+ trace_i915_gem_object_destroy(obj);
|
|
|
|
+
|
|
while (obj_priv->pin_count > 0)
|
|
while (obj_priv->pin_count > 0)
|
|
i915_gem_object_unpin(obj);
|
|
i915_gem_object_unpin(obj);
|
|
|
|
|
|
@@ -3844,43 +4150,35 @@ void i915_gem_free_object(struct drm_gem_object *obj)
|
|
|
|
|
|
i915_gem_object_unbind(obj);
|
|
i915_gem_object_unbind(obj);
|
|
|
|
|
|
- i915_gem_free_mmap_offset(obj);
|
|
|
|
|
|
+ if (obj_priv->mmap_offset)
|
|
|
|
+ i915_gem_free_mmap_offset(obj);
|
|
|
|
|
|
kfree(obj_priv->page_cpu_valid);
|
|
kfree(obj_priv->page_cpu_valid);
|
|
kfree(obj_priv->bit_17);
|
|
kfree(obj_priv->bit_17);
|
|
kfree(obj->driver_private);
|
|
kfree(obj->driver_private);
|
|
}
|
|
}
|
|
|
|
|
|
-/** Unbinds all objects that are on the given buffer list. */
|
|
|
|
|
|
+/** Unbinds all inactive objects. */
|
|
static int
|
|
static int
|
|
-i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
|
|
|
|
|
|
+i915_gem_evict_from_inactive_list(struct drm_device *dev)
|
|
{
|
|
{
|
|
- struct drm_gem_object *obj;
|
|
|
|
- struct drm_i915_gem_object *obj_priv;
|
|
|
|
- int ret;
|
|
|
|
|
|
+ drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
- while (!list_empty(head)) {
|
|
|
|
- obj_priv = list_first_entry(head,
|
|
|
|
- struct drm_i915_gem_object,
|
|
|
|
- list);
|
|
|
|
- obj = obj_priv->obj;
|
|
|
|
|
|
+ while (!list_empty(&dev_priv->mm.inactive_list)) {
|
|
|
|
+ struct drm_gem_object *obj;
|
|
|
|
+ int ret;
|
|
|
|
|
|
- if (obj_priv->pin_count != 0) {
|
|
|
|
- DRM_ERROR("Pinned object in unbind list\n");
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
|
|
+ obj = list_first_entry(&dev_priv->mm.inactive_list,
|
|
|
|
+ struct drm_i915_gem_object,
|
|
|
|
+ list)->obj;
|
|
|
|
|
|
ret = i915_gem_object_unbind(obj);
|
|
ret = i915_gem_object_unbind(obj);
|
|
if (ret != 0) {
|
|
if (ret != 0) {
|
|
- DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
|
|
|
|
- ret);
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
+ DRM_ERROR("Error unbinding object: %d\n", ret);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3902,6 +4200,7 @@ i915_gem_idle(struct drm_device *dev)
|
|
* We need to replace this with a semaphore, or something.
|
|
* We need to replace this with a semaphore, or something.
|
|
*/
|
|
*/
|
|
dev_priv->mm.suspended = 1;
|
|
dev_priv->mm.suspended = 1;
|
|
|
|
+ del_timer(&dev_priv->hangcheck_timer);
|
|
|
|
|
|
/* Cancel the retire work handler, wait for it to finish if running
|
|
/* Cancel the retire work handler, wait for it to finish if running
|
|
*/
|
|
*/
|
|
@@ -3931,7 +4230,7 @@ i915_gem_idle(struct drm_device *dev)
|
|
if (last_seqno == cur_seqno) {
|
|
if (last_seqno == cur_seqno) {
|
|
if (stuck++ > 100) {
|
|
if (stuck++ > 100) {
|
|
DRM_ERROR("hardware wedged\n");
|
|
DRM_ERROR("hardware wedged\n");
|
|
- dev_priv->mm.wedged = 1;
|
|
|
|
|
|
+ atomic_set(&dev_priv->mm.wedged, 1);
|
|
DRM_WAKEUP(&dev_priv->irq_queue);
|
|
DRM_WAKEUP(&dev_priv->irq_queue);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -3944,7 +4243,7 @@ i915_gem_idle(struct drm_device *dev)
|
|
i915_gem_retire_requests(dev);
|
|
i915_gem_retire_requests(dev);
|
|
|
|
|
|
spin_lock(&dev_priv->mm.active_list_lock);
|
|
spin_lock(&dev_priv->mm.active_list_lock);
|
|
- if (!dev_priv->mm.wedged) {
|
|
|
|
|
|
+ if (!atomic_read(&dev_priv->mm.wedged)) {
|
|
/* Active and flushing should now be empty as we've
|
|
/* Active and flushing should now be empty as we've
|
|
* waited for a sequence higher than any pending execbuffer
|
|
* waited for a sequence higher than any pending execbuffer
|
|
*/
|
|
*/
|
|
@@ -3962,29 +4261,41 @@ i915_gem_idle(struct drm_device *dev)
|
|
* the GPU domains and just stuff them onto inactive.
|
|
* the GPU domains and just stuff them onto inactive.
|
|
*/
|
|
*/
|
|
while (!list_empty(&dev_priv->mm.active_list)) {
|
|
while (!list_empty(&dev_priv->mm.active_list)) {
|
|
- struct drm_i915_gem_object *obj_priv;
|
|
|
|
|
|
+ struct drm_gem_object *obj;
|
|
|
|
+ uint32_t old_write_domain;
|
|
|
|
|
|
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
|
|
|
|
- struct drm_i915_gem_object,
|
|
|
|
- list);
|
|
|
|
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
|
|
|
|
- i915_gem_object_move_to_inactive(obj_priv->obj);
|
|
|
|
|
|
+ obj = list_first_entry(&dev_priv->mm.active_list,
|
|
|
|
+ struct drm_i915_gem_object,
|
|
|
|
+ list)->obj;
|
|
|
|
+ old_write_domain = obj->write_domain;
|
|
|
|
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
|
|
|
|
+ i915_gem_object_move_to_inactive(obj);
|
|
|
|
+
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ obj->read_domains,
|
|
|
|
+ old_write_domain);
|
|
}
|
|
}
|
|
spin_unlock(&dev_priv->mm.active_list_lock);
|
|
spin_unlock(&dev_priv->mm.active_list_lock);
|
|
|
|
|
|
while (!list_empty(&dev_priv->mm.flushing_list)) {
|
|
while (!list_empty(&dev_priv->mm.flushing_list)) {
|
|
- struct drm_i915_gem_object *obj_priv;
|
|
|
|
|
|
+ struct drm_gem_object *obj;
|
|
|
|
+ uint32_t old_write_domain;
|
|
|
|
|
|
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
|
|
|
|
- struct drm_i915_gem_object,
|
|
|
|
- list);
|
|
|
|
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
|
|
|
|
- i915_gem_object_move_to_inactive(obj_priv->obj);
|
|
|
|
|
|
+ obj = list_first_entry(&dev_priv->mm.flushing_list,
|
|
|
|
+ struct drm_i915_gem_object,
|
|
|
|
+ list)->obj;
|
|
|
|
+ old_write_domain = obj->write_domain;
|
|
|
|
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
|
|
|
|
+ i915_gem_object_move_to_inactive(obj);
|
|
|
|
+
|
|
|
|
+ trace_i915_gem_object_change_domain(obj,
|
|
|
|
+ obj->read_domains,
|
|
|
|
+ old_write_domain);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Move all inactive buffers out of the GTT. */
|
|
/* Move all inactive buffers out of the GTT. */
|
|
- ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
|
|
|
|
|
|
+ ret = i915_gem_evict_from_inactive_list(dev);
|
|
WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
|
|
WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
|
|
if (ret) {
|
|
if (ret) {
|
|
mutex_unlock(&dev->struct_mutex);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
@@ -4206,9 +4517,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- if (dev_priv->mm.wedged) {
|
|
|
|
|
|
+ if (atomic_read(&dev_priv->mm.wedged)) {
|
|
DRM_ERROR("Reenabling wedged hardware, good luck\n");
|
|
DRM_ERROR("Reenabling wedged hardware, good luck\n");
|
|
- dev_priv->mm.wedged = 0;
|
|
|
|
|
|
+ atomic_set(&dev_priv->mm.wedged, 0);
|
|
}
|
|
}
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
mutex_lock(&dev->struct_mutex);
|
|
@@ -4274,6 +4585,10 @@ i915_gem_load(struct drm_device *dev)
|
|
i915_gem_retire_work_handler);
|
|
i915_gem_retire_work_handler);
|
|
dev_priv->mm.next_gem_seqno = 1;
|
|
dev_priv->mm.next_gem_seqno = 1;
|
|
|
|
|
|
|
|
+ spin_lock(&shrink_list_lock);
|
|
|
|
+ list_add(&dev_priv->mm.shrink_list, &shrink_list);
|
|
|
|
+ spin_unlock(&shrink_list_lock);
|
|
|
|
+
|
|
/* Old X drivers will take 0-2 for front, back, depth buffers */
|
|
/* Old X drivers will take 0-2 for front, back, depth buffers */
|
|
dev_priv->fence_reg_start = 3;
|
|
dev_priv->fence_reg_start = 3;
|
|
|
|
|
|
@@ -4491,3 +4806,116 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
|
|
list_del_init(i915_file_priv->mm.request_list.next);
|
|
list_del_init(i915_file_priv->mm.request_list.next);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
|
|
|
|
+{
|
|
|
|
+ drm_i915_private_t *dev_priv, *next_dev;
|
|
|
|
+ struct drm_i915_gem_object *obj_priv, *next_obj;
|
|
|
|
+ int cnt = 0;
|
|
|
|
+ int would_deadlock = 1;
|
|
|
|
+
|
|
|
|
+ /* "fast-path" to count number of available objects */
|
|
|
|
+ if (nr_to_scan == 0) {
|
|
|
|
+ spin_lock(&shrink_list_lock);
|
|
|
|
+ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
|
|
|
|
+ struct drm_device *dev = dev_priv->dev;
|
|
|
|
+
|
|
|
|
+ if (mutex_trylock(&dev->struct_mutex)) {
|
|
|
|
+ list_for_each_entry(obj_priv,
|
|
|
|
+ &dev_priv->mm.inactive_list,
|
|
|
|
+ list)
|
|
|
|
+ cnt++;
|
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ spin_unlock(&shrink_list_lock);
|
|
|
|
+
|
|
|
|
+ return (cnt / 100) * sysctl_vfs_cache_pressure;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_lock(&shrink_list_lock);
|
|
|
|
+
|
|
|
|
+ /* first scan for clean buffers */
|
|
|
|
+ list_for_each_entry_safe(dev_priv, next_dev,
|
|
|
|
+ &shrink_list, mm.shrink_list) {
|
|
|
|
+ struct drm_device *dev = dev_priv->dev;
|
|
|
|
+
|
|
|
|
+ if (! mutex_trylock(&dev->struct_mutex))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ spin_unlock(&shrink_list_lock);
|
|
|
|
+
|
|
|
|
+ i915_gem_retire_requests(dev);
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(obj_priv, next_obj,
|
|
|
|
+ &dev_priv->mm.inactive_list,
|
|
|
|
+ list) {
|
|
|
|
+ if (i915_gem_object_is_purgeable(obj_priv)) {
|
|
|
|
+ i915_gem_object_unbind(obj_priv->obj);
|
|
|
|
+ if (--nr_to_scan <= 0)
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_lock(&shrink_list_lock);
|
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
|
+
|
|
|
|
+ would_deadlock = 0;
|
|
|
|
+
|
|
|
|
+ if (nr_to_scan <= 0)
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* second pass, evict/count anything still on the inactive list */
|
|
|
|
+ list_for_each_entry_safe(dev_priv, next_dev,
|
|
|
|
+ &shrink_list, mm.shrink_list) {
|
|
|
|
+ struct drm_device *dev = dev_priv->dev;
|
|
|
|
+
|
|
|
|
+ if (! mutex_trylock(&dev->struct_mutex))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ spin_unlock(&shrink_list_lock);
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(obj_priv, next_obj,
|
|
|
|
+ &dev_priv->mm.inactive_list,
|
|
|
|
+ list) {
|
|
|
|
+ if (nr_to_scan > 0) {
|
|
|
|
+ i915_gem_object_unbind(obj_priv->obj);
|
|
|
|
+ nr_to_scan--;
|
|
|
|
+ } else
|
|
|
|
+ cnt++;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_lock(&shrink_list_lock);
|
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
|
+
|
|
|
|
+ would_deadlock = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_unlock(&shrink_list_lock);
|
|
|
|
+
|
|
|
|
+ if (would_deadlock)
|
|
|
|
+ return -1;
|
|
|
|
+ else if (cnt > 0)
|
|
|
|
+ return (cnt / 100) * sysctl_vfs_cache_pressure;
|
|
|
|
+ else
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct shrinker shrinker = {
|
|
|
|
+ .shrink = i915_gem_shrink,
|
|
|
|
+ .seeks = DEFAULT_SEEKS,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+__init void
|
|
|
|
+i915_gem_shrinker_init(void)
|
|
|
|
+{
|
|
|
|
+ register_shrinker(&shrinker);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+__exit void
|
|
|
|
+i915_gem_shrinker_exit(void)
|
|
|
|
+{
|
|
|
|
+ unregister_shrinker(&shrinker);
|
|
|
|
+}
|