|
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
|
|
|
static __must_check int
|
|
|
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
|
|
bool readonly);
|
|
|
-static int i915_gem_phys_pwrite(struct drm_device *dev,
|
|
|
- struct drm_i915_gem_object *obj,
|
|
|
- struct drm_i915_gem_pwrite *args,
|
|
|
- struct drm_file *file);
|
|
|
|
|
|
static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
|
|
struct drm_i915_gem_object *obj);
|
|
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
|
|
|
+{
|
|
|
+ drm_dma_handle_t *phys = obj->phys_handle;
|
|
|
+
|
|
|
+ if (!phys)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (obj->madv == I915_MADV_WILLNEED) {
|
|
|
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
|
|
+ char *vaddr = phys->vaddr;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
|
|
+ struct page *page = shmem_read_mapping_page(mapping, i);
|
|
|
+ if (!IS_ERR(page)) {
|
|
|
+ char *dst = kmap_atomic(page);
|
|
|
+ memcpy(dst, vaddr, PAGE_SIZE);
|
|
|
+ drm_clflush_virt_range(dst, PAGE_SIZE);
|
|
|
+ kunmap_atomic(dst);
|
|
|
+
|
|
|
+ set_page_dirty(page);
|
|
|
+ mark_page_accessed(page);
|
|
|
+ page_cache_release(page);
|
|
|
+ }
|
|
|
+ vaddr += PAGE_SIZE;
|
|
|
+ }
|
|
|
+ i915_gem_chipset_flush(obj->base.dev);
|
|
|
+ }
|
|
|
+
|
|
|
+#ifdef CONFIG_X86
|
|
|
+ set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
|
|
|
+#endif
|
|
|
+ drm_pci_free(obj->base.dev, phys);
|
|
|
+ obj->phys_handle = NULL;
|
|
|
+}
|
|
|
+
|
|
|
+int
|
|
|
+i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
|
|
+ int align)
|
|
|
+{
|
|
|
+ drm_dma_handle_t *phys;
|
|
|
+ struct address_space *mapping;
|
|
|
+ char *vaddr;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (obj->phys_handle) {
|
|
|
+ if ((unsigned long)obj->phys_handle->vaddr & (align -1))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (obj->madv != I915_MADV_WILLNEED)
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ if (obj->base.filp == NULL)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* create a new object */
|
|
|
+ phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
|
|
|
+ if (!phys)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ vaddr = phys->vaddr;
|
|
|
+#ifdef CONFIG_X86
|
|
|
+ set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
|
|
|
+#endif
|
|
|
+ mapping = file_inode(obj->base.filp)->i_mapping;
|
|
|
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
|
|
+ struct page *page;
|
|
|
+ char *src;
|
|
|
+
|
|
|
+ page = shmem_read_mapping_page(mapping, i);
|
|
|
+ if (IS_ERR(page)) {
|
|
|
+#ifdef CONFIG_X86
|
|
|
+ set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
|
|
|
+#endif
|
|
|
+ drm_pci_free(obj->base.dev, phys);
|
|
|
+ return PTR_ERR(page);
|
|
|
+ }
|
|
|
+
|
|
|
+ src = kmap_atomic(page);
|
|
|
+ memcpy(vaddr, src, PAGE_SIZE);
|
|
|
+ kunmap_atomic(src);
|
|
|
+
|
|
|
+ mark_page_accessed(page);
|
|
|
+ page_cache_release(page);
|
|
|
+
|
|
|
+ vaddr += PAGE_SIZE;
|
|
|
+ }
|
|
|
+
|
|
|
+ obj->phys_handle = phys;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
|
|
+ struct drm_i915_gem_pwrite *args,
|
|
|
+ struct drm_file *file_priv)
|
|
|
+{
|
|
|
+ struct drm_device *dev = obj->base.dev;
|
|
|
+ void *vaddr = obj->phys_handle->vaddr + args->offset;
|
|
|
+ char __user *user_data = to_user_ptr(args->data_ptr);
|
|
|
+
|
|
|
+ if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
|
|
+ unsigned long unwritten;
|
|
|
+
|
|
|
+ /* The physical object once assigned is fixed for the lifetime
|
|
|
+ * of the obj, so we can safely drop the lock and continue
|
|
|
+ * to access vaddr.
|
|
|
+ */
|
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
|
+ unwritten = copy_from_user(vaddr, user_data, args->size);
|
|
|
+ mutex_lock(&dev->struct_mutex);
|
|
|
+ if (unwritten)
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+
|
|
|
+ i915_gem_chipset_flush(dev);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
void *i915_gem_object_alloc(struct drm_device *dev)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
|
|
* pread/pwrite currently are reading and writing from the CPU
|
|
|
* perspective, requiring manual detiling by the client.
|
|
|
*/
|
|
|
- if (obj->phys_obj) {
|
|
|
- ret = i915_gem_phys_pwrite(dev, obj, args, file);
|
|
|
+ if (obj->phys_handle) {
|
|
|
+ ret = i915_gem_phys_pwrite(obj, args, file);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -3208,12 +3326,14 @@ static struct i915_vma *
|
|
|
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
|
|
struct i915_address_space *vm,
|
|
|
unsigned alignment,
|
|
|
- unsigned flags)
|
|
|
+ uint64_t flags)
|
|
|
{
|
|
|
struct drm_device *dev = obj->base.dev;
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
u32 size, fence_size, fence_alignment, unfenced_alignment;
|
|
|
- size_t gtt_max =
|
|
|
+ unsigned long start =
|
|
|
+ flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
|
|
|
+ unsigned long end =
|
|
|
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
|
|
|
struct i915_vma *vma;
|
|
|
int ret;
|
|
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
|
|
/* If the object is bigger than the entire aperture, reject it early
|
|
|
* before evicting everything in a vain attempt to find space.
|
|
|
*/
|
|
|
- if (obj->base.size > gtt_max) {
|
|
|
- DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
|
|
|
+ if (obj->base.size > end) {
|
|
|
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
|
|
|
obj->base.size,
|
|
|
flags & PIN_MAPPABLE ? "mappable" : "total",
|
|
|
- gtt_max);
|
|
|
+ end);
|
|
|
return ERR_PTR(-E2BIG);
|
|
|
}
|
|
|
|
|
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
|
|
search_free:
|
|
|
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
|
|
|
size, alignment,
|
|
|
- obj->cache_level, 0, gtt_max,
|
|
|
+ obj->cache_level,
|
|
|
+ start, end,
|
|
|
DRM_MM_SEARCH_DEFAULT,
|
|
|
DRM_MM_CREATE_DEFAULT);
|
|
|
if (ret) {
|
|
|
ret = i915_gem_evict_something(dev, vm, size, alignment,
|
|
|
- obj->cache_level, flags);
|
|
|
+ obj->cache_level,
|
|
|
+ start, end,
|
|
|
+ flags);
|
|
|
if (ret == 0)
|
|
|
goto search_free;
|
|
|
|
|
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static bool
|
|
|
+i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
|
|
|
+{
|
|
|
+ struct drm_i915_gem_object *obj = vma->obj;
|
|
|
+
|
|
|
+ if (alignment &&
|
|
|
+ vma->node.start & (alignment - 1))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ if (flags & PIN_OFFSET_BIAS &&
|
|
|
+ vma->node.start < (flags & PIN_OFFSET_MASK))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
int
|
|
|
i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
|
|
struct i915_address_space *vm,
|
|
|
uint32_t alignment,
|
|
|
- unsigned flags)
|
|
|
+ uint64_t flags)
|
|
|
{
|
|
|
struct i915_vma *vma;
|
|
|
int ret;
|
|
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
|
|
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
|
|
|
return -EBUSY;
|
|
|
|
|
|
- if ((alignment &&
|
|
|
- vma->node.start & (alignment - 1)) ||
|
|
|
- (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
|
|
|
+ if (i915_vma_misplaced(vma, alignment, flags)) {
|
|
|
WARN(vma->pin_count,
|
|
|
"bo is already pinned with incorrect alignment:"
|
|
|
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
|
|
|
" obj->map_and_fenceable=%d\n",
|
|
|
i915_gem_obj_offset(obj, vm), alignment,
|
|
|
- flags & PIN_MAPPABLE,
|
|
|
+ !!(flags & PIN_MAPPABLE),
|
|
|
obj->map_and_fenceable);
|
|
|
ret = i915_vma_unbind(vma);
|
|
|
if (ret)
|
|
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
|
|
|
|
|
trace_i915_gem_object_destroy(obj);
|
|
|
|
|
|
- if (obj->phys_obj)
|
|
|
- i915_gem_detach_phys_object(dev, obj);
|
|
|
-
|
|
|
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
|
|
|
int ret;
|
|
|
|
|
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ i915_gem_object_detach_phys(obj);
|
|
|
+
|
|
|
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
|
|
|
* before progressing. */
|
|
|
if (obj->stolen)
|
|
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
|
|
|
register_shrinker(&dev_priv->mm.inactive_shrinker);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Create a physically contiguous memory object for this object
|
|
|
- * e.g. for cursor + overlay regs
|
|
|
- */
|
|
|
-static int i915_gem_init_phys_object(struct drm_device *dev,
|
|
|
- int id, int size, int align)
|
|
|
-{
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
- struct drm_i915_gem_phys_object *phys_obj;
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (dev_priv->mm.phys_objs[id - 1] || !size)
|
|
|
- return 0;
|
|
|
-
|
|
|
- phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
|
|
|
- if (!phys_obj)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- phys_obj->id = id;
|
|
|
-
|
|
|
- phys_obj->handle = drm_pci_alloc(dev, size, align);
|
|
|
- if (!phys_obj->handle) {
|
|
|
- ret = -ENOMEM;
|
|
|
- goto kfree_obj;
|
|
|
- }
|
|
|
-#ifdef CONFIG_X86
|
|
|
- set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
|
|
|
-#endif
|
|
|
-
|
|
|
- dev_priv->mm.phys_objs[id - 1] = phys_obj;
|
|
|
-
|
|
|
- return 0;
|
|
|
-kfree_obj:
|
|
|
- kfree(phys_obj);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static void i915_gem_free_phys_object(struct drm_device *dev, int id)
|
|
|
-{
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
- struct drm_i915_gem_phys_object *phys_obj;
|
|
|
-
|
|
|
- if (!dev_priv->mm.phys_objs[id - 1])
|
|
|
- return;
|
|
|
-
|
|
|
- phys_obj = dev_priv->mm.phys_objs[id - 1];
|
|
|
- if (phys_obj->cur_obj) {
|
|
|
- i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
|
|
|
- }
|
|
|
-
|
|
|
-#ifdef CONFIG_X86
|
|
|
- set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
|
|
|
-#endif
|
|
|
- drm_pci_free(dev, phys_obj->handle);
|
|
|
- kfree(phys_obj);
|
|
|
- dev_priv->mm.phys_objs[id - 1] = NULL;
|
|
|
-}
|
|
|
-
|
|
|
-void i915_gem_free_all_phys_object(struct drm_device *dev)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
|
|
|
- i915_gem_free_phys_object(dev, i);
|
|
|
-}
|
|
|
-
|
|
|
-void i915_gem_detach_phys_object(struct drm_device *dev,
|
|
|
- struct drm_i915_gem_object *obj)
|
|
|
-{
|
|
|
- struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
|
|
- char *vaddr;
|
|
|
- int i;
|
|
|
- int page_count;
|
|
|
-
|
|
|
- if (!obj->phys_obj)
|
|
|
- return;
|
|
|
- vaddr = obj->phys_obj->handle->vaddr;
|
|
|
-
|
|
|
- page_count = obj->base.size / PAGE_SIZE;
|
|
|
- for (i = 0; i < page_count; i++) {
|
|
|
- struct page *page = shmem_read_mapping_page(mapping, i);
|
|
|
- if (!IS_ERR(page)) {
|
|
|
- char *dst = kmap_atomic(page);
|
|
|
- memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
|
|
|
- kunmap_atomic(dst);
|
|
|
-
|
|
|
- drm_clflush_pages(&page, 1);
|
|
|
-
|
|
|
- set_page_dirty(page);
|
|
|
- mark_page_accessed(page);
|
|
|
- page_cache_release(page);
|
|
|
- }
|
|
|
- }
|
|
|
- i915_gem_chipset_flush(dev);
|
|
|
-
|
|
|
- obj->phys_obj->cur_obj = NULL;
|
|
|
- obj->phys_obj = NULL;
|
|
|
-}
|
|
|
-
|
|
|
-int
|
|
|
-i915_gem_attach_phys_object(struct drm_device *dev,
|
|
|
- struct drm_i915_gem_object *obj,
|
|
|
- int id,
|
|
|
- int align)
|
|
|
-{
|
|
|
- struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
- int ret = 0;
|
|
|
- int page_count;
|
|
|
- int i;
|
|
|
-
|
|
|
- if (id > I915_MAX_PHYS_OBJECT)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (obj->phys_obj) {
|
|
|
- if (obj->phys_obj->id == id)
|
|
|
- return 0;
|
|
|
- i915_gem_detach_phys_object(dev, obj);
|
|
|
- }
|
|
|
-
|
|
|
- /* create a new object */
|
|
|
- if (!dev_priv->mm.phys_objs[id - 1]) {
|
|
|
- ret = i915_gem_init_phys_object(dev, id,
|
|
|
- obj->base.size, align);
|
|
|
- if (ret) {
|
|
|
- DRM_ERROR("failed to init phys object %d size: %zu\n",
|
|
|
- id, obj->base.size);
|
|
|
- return ret;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* bind to the object */
|
|
|
- obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
|
|
|
- obj->phys_obj->cur_obj = obj;
|
|
|
-
|
|
|
- page_count = obj->base.size / PAGE_SIZE;
|
|
|
-
|
|
|
- for (i = 0; i < page_count; i++) {
|
|
|
- struct page *page;
|
|
|
- char *dst, *src;
|
|
|
-
|
|
|
- page = shmem_read_mapping_page(mapping, i);
|
|
|
- if (IS_ERR(page))
|
|
|
- return PTR_ERR(page);
|
|
|
-
|
|
|
- src = kmap_atomic(page);
|
|
|
- dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
|
|
|
- memcpy(dst, src, PAGE_SIZE);
|
|
|
- kunmap_atomic(src);
|
|
|
-
|
|
|
- mark_page_accessed(page);
|
|
|
- page_cache_release(page);
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int
|
|
|
-i915_gem_phys_pwrite(struct drm_device *dev,
|
|
|
- struct drm_i915_gem_object *obj,
|
|
|
- struct drm_i915_gem_pwrite *args,
|
|
|
- struct drm_file *file_priv)
|
|
|
-{
|
|
|
- void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
|
|
|
- char __user *user_data = to_user_ptr(args->data_ptr);
|
|
|
-
|
|
|
- if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
|
|
- unsigned long unwritten;
|
|
|
-
|
|
|
- /* The physical object once assigned is fixed for the lifetime
|
|
|
- * of the obj, so we can safely drop the lock and continue
|
|
|
- * to access vaddr.
|
|
|
- */
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
- unwritten = copy_from_user(vaddr, user_data, args->size);
|
|
|
- mutex_lock(&dev->struct_mutex);
|
|
|
- if (unwritten)
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
-
|
|
|
- i915_gem_chipset_flush(dev);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
|
|
{
|
|
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|