|
@@ -39,6 +39,8 @@
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/swiotlb.h>
|
|
|
+#include <linux/swap.h>
|
|
|
+#include <linux/pagemap.h>
|
|
|
#include <linux/debugfs.h>
|
|
|
#include "radeon_reg.h"
|
|
|
#include "radeon.h"
|
|
@@ -515,8 +517,92 @@ struct radeon_ttm_tt {
|
|
|
struct ttm_dma_tt ttm;
|
|
|
struct radeon_device *rdev;
|
|
|
u64 offset;
|
|
|
+
|
|
|
+ uint64_t userptr;
|
|
|
+ struct mm_struct *usermm;
|
|
|
+ uint32_t userflags;
|
|
|
};
|
|
|
|
|
|
+/* prepare the sg table with the user pages */
|
|
|
+static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
|
|
+{
|
|
|
+ struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
|
|
|
+ struct radeon_ttm_tt *gtt = (void *)ttm;
|
|
|
+ unsigned pinned = 0, nents;
|
|
|
+ int r;
|
|
|
+
|
|
|
+ int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
|
|
|
+ enum dma_data_direction direction = write ?
|
|
|
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
|
|
+
|
|
|
+ if (current->mm != gtt->usermm)
|
|
|
+ return -EPERM;
|
|
|
+
|
|
|
+ do {
|
|
|
+ unsigned num_pages = ttm->num_pages - pinned;
|
|
|
+ uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
|
|
|
+ struct page **pages = ttm->pages + pinned;
|
|
|
+
|
|
|
+ r = get_user_pages(current, current->mm, userptr, num_pages,
|
|
|
+ write, 0, pages, NULL);
|
|
|
+ if (r < 0)
|
|
|
+ goto release_pages;
|
|
|
+
|
|
|
+ pinned += r;
|
|
|
+
|
|
|
+ } while (pinned < ttm->num_pages);
|
|
|
+
|
|
|
+ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
|
|
|
+ ttm->num_pages << PAGE_SHIFT,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (r)
|
|
|
+ goto release_sg;
|
|
|
+
|
|
|
+ r = -ENOMEM;
|
|
|
+ nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
|
|
|
+ if (nents != ttm->sg->nents)
|
|
|
+ goto release_sg;
|
|
|
+
|
|
|
+ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
|
|
+ gtt->ttm.dma_address, ttm->num_pages);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+release_sg:
|
|
|
+ kfree(ttm->sg);
|
|
|
+
|
|
|
+release_pages:
|
|
|
+ release_pages(ttm->pages, pinned, 0);
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
|
|
|
+{
|
|
|
+ struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
|
|
|
+ struct radeon_ttm_tt *gtt = (void *)ttm;
|
|
|
+ struct scatterlist *sg;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
|
|
|
+ enum dma_data_direction direction = write ?
|
|
|
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
|
|
+
|
|
|
+ /* free the sg table and pages again */
|
|
|
+ dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
|
|
|
+
|
|
|
+ for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
|
|
|
+ struct page *page = sg_page(sg);
|
|
|
+
|
|
|
+ if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
|
|
|
+ set_page_dirty(page);
|
|
|
+
|
|
|
+ mark_page_accessed(page);
|
|
|
+ page_cache_release(page);
|
|
|
+ }
|
|
|
+
|
|
|
+ sg_free_table(ttm->sg);
|
|
|
+}
|
|
|
+
|
|
|
static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
|
|
|
struct ttm_mem_reg *bo_mem)
|
|
|
{
|
|
@@ -525,6 +611,11 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
|
|
|
RADEON_GART_PAGE_WRITE;
|
|
|
int r;
|
|
|
|
|
|
+ if (gtt->userptr) {
|
|
|
+ radeon_ttm_tt_pin_userptr(ttm);
|
|
|
+ flags &= ~RADEON_GART_PAGE_WRITE;
|
|
|
+ }
|
|
|
+
|
|
|
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
|
|
|
if (!ttm->num_pages) {
|
|
|
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
|
@@ -547,6 +638,10 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
|
|
|
struct radeon_ttm_tt *gtt = (void *)ttm;
|
|
|
|
|
|
radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
|
|
|
+
|
|
|
+ if (gtt->userptr)
|
|
|
+ radeon_ttm_tt_unpin_userptr(ttm);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -603,6 +698,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
|
|
|
if (ttm->state != tt_unpopulated)
|
|
|
return 0;
|
|
|
|
|
|
+ if (gtt->userptr) {
|
|
|
+ ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
|
|
|
+ if (!ttm->sg)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ ttm->page_flags |= TTM_PAGE_FLAG_SG;
|
|
|
+ ttm->state = tt_unbound;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
if (slave && ttm->sg) {
|
|
|
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
|
|
gtt->ttm.dma_address, ttm->num_pages);
|
|
@@ -652,6 +757,12 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|
|
unsigned i;
|
|
|
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
|
|
|
|
|
+ if (gtt->userptr) {
|
|
|
+ kfree(ttm->sg);
|
|
|
+ ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
if (slave)
|
|
|
return;
|
|
|
|
|
@@ -680,6 +791,40 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|
|
ttm_pool_unpopulate(ttm);
|
|
|
}
|
|
|
|
|
|
+int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
|
|
+ uint32_t flags)
|
|
|
+{
|
|
|
+ struct radeon_ttm_tt *gtt = (void *)ttm;
|
|
|
+
|
|
|
+ if (gtt == NULL)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ gtt->userptr = addr;
|
|
|
+ gtt->usermm = current->mm;
|
|
|
+ gtt->userflags = flags;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
|
|
|
+{
|
|
|
+ struct radeon_ttm_tt *gtt = (void *)ttm;
|
|
|
+
|
|
|
+ if (gtt == NULL)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ return !!gtt->userptr;
|
|
|
+}
|
|
|
+
|
|
|
+bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
|
|
|
+{
|
|
|
+ struct radeon_ttm_tt *gtt = (void *)ttm;
|
|
|
+
|
|
|
+ if (gtt == NULL)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
|
|
|
+}
|
|
|
+
|
|
|
static struct ttm_bo_driver radeon_bo_driver = {
|
|
|
.ttm_tt_create = &radeon_ttm_tt_create,
|
|
|
.ttm_tt_populate = &radeon_ttm_tt_populate,
|