|
@@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|
|
struct drm_device *ddev = p->rdev->ddev;
|
|
|
struct radeon_cs_chunk *chunk;
|
|
|
struct radeon_cs_buckets buckets;
|
|
|
- unsigned i, j;
|
|
|
- bool duplicate, need_mmap_lock = false;
|
|
|
+ unsigned i;
|
|
|
+ bool need_mmap_lock = false;
|
|
|
int r;
|
|
|
|
|
|
- if (p->chunk_relocs_idx == -1) {
|
|
|
+ if (p->chunk_relocs == NULL) {
|
|
|
return 0;
|
|
|
}
|
|
|
- chunk = &p->chunks[p->chunk_relocs_idx];
|
|
|
+ chunk = p->chunk_relocs;
|
|
|
p->dma_reloc_idx = 0;
|
|
|
/* FIXME: we assume that each relocs use 4 dwords */
|
|
|
p->nrelocs = chunk->length_dw / 4;
|
|
|
- p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
|
|
|
- if (p->relocs_ptr == NULL) {
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
- p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
|
|
|
+ p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
|
|
|
if (p->relocs == NULL) {
|
|
|
return -ENOMEM;
|
|
|
}
|
|
@@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|
|
|
|
|
for (i = 0; i < p->nrelocs; i++) {
|
|
|
struct drm_radeon_cs_reloc *r;
|
|
|
+ struct drm_gem_object *gobj;
|
|
|
unsigned priority;
|
|
|
|
|
|
- duplicate = false;
|
|
|
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
|
|
|
- for (j = 0; j < i; j++) {
|
|
|
- if (r->handle == p->relocs[j].handle) {
|
|
|
- p->relocs_ptr[i] = &p->relocs[j];
|
|
|
- duplicate = true;
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- if (duplicate) {
|
|
|
- p->relocs[i].handle = 0;
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
|
|
|
- r->handle);
|
|
|
- if (p->relocs[i].gobj == NULL) {
|
|
|
+ gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
|
|
|
+ if (gobj == NULL) {
|
|
|
DRM_ERROR("gem object lookup failed 0x%x\n",
|
|
|
r->handle);
|
|
|
return -ENOENT;
|
|
|
}
|
|
|
- p->relocs_ptr[i] = &p->relocs[i];
|
|
|
- p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
|
|
|
+ p->relocs[i].robj = gem_to_radeon_bo(gobj);
|
|
|
|
|
|
/* The userspace buffer priorities are from 0 to 15. A higher
|
|
|
* number means the buffer is more important.
|
|
@@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|
|
|
|
|
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
|
|
|
p->relocs[i].tv.shared = !r->write_domain;
|
|
|
- p->relocs[i].handle = r->handle;
|
|
|
|
|
|
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
|
|
|
priority);
|
|
@@ -251,22 +232,20 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
|
|
|
|
|
|
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
|
|
|
{
|
|
|
- int i, r = 0;
|
|
|
+ struct radeon_bo_list *reloc;
|
|
|
+ int r;
|
|
|
|
|
|
- for (i = 0; i < p->nrelocs; i++) {
|
|
|
+ list_for_each_entry(reloc, &p->validated, tv.head) {
|
|
|
struct reservation_object *resv;
|
|
|
|
|
|
- if (!p->relocs[i].robj)
|
|
|
- continue;
|
|
|
-
|
|
|
- resv = p->relocs[i].robj->tbo.resv;
|
|
|
+ resv = reloc->robj->tbo.resv;
|
|
|
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
|
|
|
- p->relocs[i].tv.shared);
|
|
|
+ reloc->tv.shared);
|
|
|
|
|
|
if (r)
|
|
|
- break;
|
|
|
+ return r;
|
|
|
}
|
|
|
- return r;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
|
|
@@ -286,10 +265,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|
|
p->idx = 0;
|
|
|
p->ib.sa_bo = NULL;
|
|
|
p->const_ib.sa_bo = NULL;
|
|
|
- p->chunk_ib_idx = -1;
|
|
|
- p->chunk_relocs_idx = -1;
|
|
|
- p->chunk_flags_idx = -1;
|
|
|
- p->chunk_const_ib_idx = -1;
|
|
|
+ p->chunk_ib = NULL;
|
|
|
+ p->chunk_relocs = NULL;
|
|
|
+ p->chunk_flags = NULL;
|
|
|
+ p->chunk_const_ib = NULL;
|
|
|
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
|
|
|
if (p->chunks_array == NULL) {
|
|
|
return -ENOMEM;
|
|
@@ -316,24 +295,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|
|
return -EFAULT;
|
|
|
}
|
|
|
p->chunks[i].length_dw = user_chunk.length_dw;
|
|
|
- p->chunks[i].chunk_id = user_chunk.chunk_id;
|
|
|
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
|
|
|
- p->chunk_relocs_idx = i;
|
|
|
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
|
|
|
+ p->chunk_relocs = &p->chunks[i];
|
|
|
}
|
|
|
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
|
|
|
- p->chunk_ib_idx = i;
|
|
|
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
|
|
|
+ p->chunk_ib = &p->chunks[i];
|
|
|
/* zero length IB isn't useful */
|
|
|
if (p->chunks[i].length_dw == 0)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
|
|
|
- p->chunk_const_ib_idx = i;
|
|
|
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
|
|
|
+ p->chunk_const_ib = &p->chunks[i];
|
|
|
/* zero length CONST IB isn't useful */
|
|
|
if (p->chunks[i].length_dw == 0)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
|
|
|
- p->chunk_flags_idx = i;
|
|
|
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
|
|
|
+ p->chunk_flags = &p->chunks[i];
|
|
|
/* zero length flags aren't useful */
|
|
|
if (p->chunks[i].length_dw == 0)
|
|
|
return -EINVAL;
|
|
@@ -342,10 +320,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|
|
size = p->chunks[i].length_dw;
|
|
|
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
|
|
|
p->chunks[i].user_ptr = cdata;
|
|
|
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
|
|
|
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
|
|
|
continue;
|
|
|
|
|
|
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
|
|
|
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
|
|
|
if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
|
|
|
continue;
|
|
|
}
|
|
@@ -358,7 +336,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|
|
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
|
|
|
return -EFAULT;
|
|
|
}
|
|
|
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
|
|
|
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
|
|
|
p->cs_flags = p->chunks[i].kdata[0];
|
|
|
if (p->chunks[i].length_dw > 1)
|
|
|
ring = p->chunks[i].kdata[1];
|
|
@@ -399,8 +377,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|
|
static int cmp_size_smaller_first(void *priv, struct list_head *a,
|
|
|
struct list_head *b)
|
|
|
{
|
|
|
- struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
|
|
|
- struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
|
|
|
+ struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
|
|
|
+ struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
|
|
|
|
|
|
/* Sort A before B if A is smaller. */
|
|
|
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
|
|
@@ -441,13 +419,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
|
|
|
|
|
|
if (parser->relocs != NULL) {
|
|
|
for (i = 0; i < parser->nrelocs; i++) {
|
|
|
- if (parser->relocs[i].gobj)
|
|
|
- drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
|
|
|
+ struct radeon_bo *bo = parser->relocs[i].robj;
|
|
|
+ if (bo == NULL)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
|
|
|
}
|
|
|
}
|
|
|
kfree(parser->track);
|
|
|
kfree(parser->relocs);
|
|
|
- kfree(parser->relocs_ptr);
|
|
|
drm_free_large(parser->vm_bos);
|
|
|
for (i = 0; i < parser->nchunks; i++)
|
|
|
drm_free_large(parser->chunks[i].kdata);
|
|
@@ -462,7 +442,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
|
|
|
{
|
|
|
int r;
|
|
|
|
|
|
- if (parser->chunk_ib_idx == -1)
|
|
|
+ if (parser->chunk_ib == NULL)
|
|
|
return 0;
|
|
|
|
|
|
if (parser->cs_flags & RADEON_CS_USE_VM)
|
|
@@ -505,9 +485,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
|
- radeon_sync_resv(p->rdev, &p->ib.sync, vm->page_directory->tbo.resv,
|
|
|
- true);
|
|
|
-
|
|
|
r = radeon_vm_clear_freed(rdev, vm);
|
|
|
if (r)
|
|
|
return r;
|
|
@@ -525,10 +502,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
|
|
|
for (i = 0; i < p->nrelocs; i++) {
|
|
|
struct radeon_bo *bo;
|
|
|
|
|
|
- /* ignore duplicates */
|
|
|
- if (p->relocs_ptr[i] != &p->relocs[i])
|
|
|
- continue;
|
|
|
-
|
|
|
bo = p->relocs[i].robj;
|
|
|
bo_va = radeon_vm_bo_find(vm, bo);
|
|
|
if (bo_va == NULL) {
|
|
@@ -553,7 +526,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
|
|
struct radeon_vm *vm = &fpriv->vm;
|
|
|
int r;
|
|
|
|
|
|
- if (parser->chunk_ib_idx == -1)
|
|
|
+ if (parser->chunk_ib == NULL)
|
|
|
return 0;
|
|
|
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
|
|
|
return 0;
|
|
@@ -587,7 +560,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
|
|
}
|
|
|
|
|
|
if ((rdev->family >= CHIP_TAHITI) &&
|
|
|
- (parser->chunk_const_ib_idx != -1)) {
|
|
|
+ (parser->chunk_const_ib != NULL)) {
|
|
|
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
|
|
|
} else {
|
|
|
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
|
|
@@ -614,7 +587,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
|
|
|
struct radeon_vm *vm = NULL;
|
|
|
int r;
|
|
|
|
|
|
- if (parser->chunk_ib_idx == -1)
|
|
|
+ if (parser->chunk_ib == NULL)
|
|
|
return 0;
|
|
|
|
|
|
if (parser->cs_flags & RADEON_CS_USE_VM) {
|
|
@@ -622,8 +595,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
|
|
|
vm = &fpriv->vm;
|
|
|
|
|
|
if ((rdev->family >= CHIP_TAHITI) &&
|
|
|
- (parser->chunk_const_ib_idx != -1)) {
|
|
|
- ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
|
|
|
+ (parser->chunk_const_ib != NULL)) {
|
|
|
+ ib_chunk = parser->chunk_const_ib;
|
|
|
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
|
|
|
DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
|
|
|
return -EINVAL;
|
|
@@ -642,13 +615,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
|
|
|
return -EFAULT;
|
|
|
}
|
|
|
|
|
|
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
|
|
|
+ ib_chunk = parser->chunk_ib;
|
|
|
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
|
|
|
DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
}
|
|
|
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
|
|
|
+ ib_chunk = parser->chunk_ib;
|
|
|
|
|
|
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
|
|
|
vm, ib_chunk->length_dw * 4);
|
|
@@ -740,7 +713,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
|
|
|
struct radeon_cs_packet *pkt,
|
|
|
unsigned idx)
|
|
|
{
|
|
|
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
|
|
|
+ struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
|
|
|
struct radeon_device *rdev = p->rdev;
|
|
|
uint32_t header;
|
|
|
|
|
@@ -834,7 +807,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
|
|
|
* GPU offset using the provided start.
|
|
|
**/
|
|
|
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
|
|
- struct radeon_cs_reloc **cs_reloc,
|
|
|
+ struct radeon_bo_list **cs_reloc,
|
|
|
int nomm)
|
|
|
{
|
|
|
struct radeon_cs_chunk *relocs_chunk;
|
|
@@ -842,12 +815,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
|
|
unsigned idx;
|
|
|
int r;
|
|
|
|
|
|
- if (p->chunk_relocs_idx == -1) {
|
|
|
+ if (p->chunk_relocs == NULL) {
|
|
|
DRM_ERROR("No relocation chunk !\n");
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
*cs_reloc = NULL;
|
|
|
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
|
|
|
+ relocs_chunk = p->chunk_relocs;
|
|
|
r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
|
|
|
if (r)
|
|
|
return r;
|
|
@@ -873,6 +846,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
|
|
(u64)relocs_chunk->kdata[idx + 3] << 32;
|
|
|
(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
|
|
|
} else
|
|
|
- *cs_reloc = p->relocs_ptr[(idx / 4)];
|
|
|
+ *cs_reloc = &p->relocs[(idx / 4)];
|
|
|
return 0;
|
|
|
}
|