|
@@ -90,7 +90,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|
|
}
|
|
|
|
|
|
/* get chunks */
|
|
|
- chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
|
|
|
+ chunk_array_user = u64_to_user_ptr(cs->in.chunks);
|
|
|
if (copy_from_user(chunk_array, chunk_array_user,
|
|
|
sizeof(uint64_t)*cs->in.num_chunks)) {
|
|
|
ret = -EFAULT;
|
|
@@ -110,7 +110,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|
|
struct drm_amdgpu_cs_chunk user_chunk;
|
|
|
uint32_t __user *cdata;
|
|
|
|
|
|
- chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
|
|
|
+ chunk_ptr = u64_to_user_ptr(chunk_array[i]);
|
|
|
if (copy_from_user(&user_chunk, chunk_ptr,
|
|
|
sizeof(struct drm_amdgpu_cs_chunk))) {
|
|
|
ret = -EFAULT;
|
|
@@ -121,7 +121,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|
|
p->chunks[i].length_dw = user_chunk.length_dw;
|
|
|
|
|
|
size = p->chunks[i].length_dw;
|
|
|
- cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
|
|
|
+ cdata = u64_to_user_ptr(user_chunk.chunk_data);
|
|
|
|
|
|
p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
|
|
|
if (p->chunks[i].kdata == NULL) {
|
|
@@ -1437,7 +1437,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
|
|
|
if (fences == NULL)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- fences_user = (void __user *)(uintptr_t)(wait->in.fences);
|
|
|
+ fences_user = u64_to_user_ptr(wait->in.fences);
|
|
|
if (copy_from_user(fences, fences_user,
|
|
|
sizeof(struct drm_amdgpu_fence) * fence_count)) {
|
|
|
r = -EFAULT;
|