|
@@ -294,10 +294,87 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
|
|
|
vma->vm_private_data = NULL;
|
|
|
}
|
|
|
|
|
|
+static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
|
|
|
+ unsigned long offset,
|
|
|
+ void *buf, int len, int write)
|
|
|
+{
|
|
|
+ unsigned long page = offset >> PAGE_SHIFT;
|
|
|
+ unsigned long bytes_left = len;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /* Copy a page at a time, that way no extra virtual address
|
|
|
+ * mapping is needed
|
|
|
+ */
|
|
|
+ offset -= page << PAGE_SHIFT;
|
|
|
+ do {
|
|
|
+ unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
|
|
|
+ struct ttm_bo_kmap_obj map;
|
|
|
+ void *ptr;
|
|
|
+ bool is_iomem;
|
|
|
+
|
|
|
+ ret = ttm_bo_kmap(bo, page, 1, &map);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
|
|
|
+ WARN_ON_ONCE(is_iomem);
|
|
|
+ if (write)
|
|
|
+ memcpy(ptr, buf, bytes);
|
|
|
+ else
|
|
|
+ memcpy(buf, ptr, bytes);
|
|
|
+ ttm_bo_kunmap(&map);
|
|
|
+
|
|
|
+ page++;
|
|
|
+ bytes_left -= bytes;
|
|
|
+ offset = 0;
|
|
|
+ } while (bytes_left);
|
|
|
+
|
|
|
+ return len;
|
|
|
+}
|
|
|
+
|
|
|
+static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
|
|
|
+ void *buf, int len, int write)
|
|
|
+{
|
|
|
+ unsigned long offset = (addr) - vma->vm_start;
|
|
|
+ struct ttm_buffer_object *bo = vma->vm_private_data;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ ret = ttm_bo_reserve(bo, true, false, NULL);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ switch (bo->mem.mem_type) {
|
|
|
+ case TTM_PL_SYSTEM:
|
|
|
+ if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
|
|
+ ret = ttm_tt_swapin(bo->ttm);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ /* fall through */
|
|
|
+ case TTM_PL_TT:
|
|
|
+ ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ if (bo->bdev->driver->access_memory)
|
|
|
+ ret = bo->bdev->driver->access_memory(
|
|
|
+ bo, offset, buf, len, write);
|
|
|
+ else
|
|
|
+ ret = -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ ttm_bo_unreserve(bo);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static const struct vm_operations_struct ttm_bo_vm_ops = {
|
|
|
.fault = ttm_bo_vm_fault,
|
|
|
.open = ttm_bo_vm_open,
|
|
|
- .close = ttm_bo_vm_close
|
|
|
+ .close = ttm_bo_vm_close,
|
|
|
+ .access = ttm_bo_vm_access
|
|
|
};
|
|
|
|
|
|
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
|