|
@@ -221,31 +221,32 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
|
|
|
|
|
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
|
|
{
|
|
|
- unsigned size;
|
|
|
- void *ptr;
|
|
|
- const struct common_firmware_header *hdr;
|
|
|
- int i;
|
|
|
+ struct amdgpu_ring *ring = &adev->uvd.ring;
|
|
|
+ int i, r;
|
|
|
|
|
|
if (adev->uvd.vcpu_bo == NULL)
|
|
|
return 0;
|
|
|
|
|
|
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
|
|
|
- if (atomic_read(&adev->uvd.handles[i]))
|
|
|
- break;
|
|
|
-
|
|
|
- if (i == AMDGPU_MAX_UVD_HANDLES)
|
|
|
- return 0;
|
|
|
+ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
|
|
+ uint32_t handle = atomic_read(&adev->uvd.handles[i]);
|
|
|
+ if (handle != 0) {
|
|
|
+ struct fence *fence;
|
|
|
|
|
|
- hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
|
|
|
+ amdgpu_uvd_note_usage(adev);
|
|
|
|
|
|
- size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
|
|
- size -= le32_to_cpu(hdr->ucode_size_bytes);
|
|
|
+ r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
|
|
|
+ if (r) {
|
|
|
+ DRM_ERROR("Error destroying UVD (%d)!\n", r);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
- ptr = adev->uvd.cpu_addr;
|
|
|
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
|
|
|
+ fence_wait(fence, false);
|
|
|
+ fence_put(fence);
|
|
|
|
|
|
- adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
|
|
|
- memcpy(adev->uvd.saved_bo, ptr, size);
|
|
|
+ adev->uvd.filp[i] = NULL;
|
|
|
+ atomic_set(&adev->uvd.handles[i], 0);
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -270,12 +271,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
|
|
|
ptr = adev->uvd.cpu_addr;
|
|
|
ptr += le32_to_cpu(hdr->ucode_size_bytes);
|
|
|
|
|
|
- if (adev->uvd.saved_bo != NULL) {
|
|
|
- memcpy(ptr, adev->uvd.saved_bo, size);
|
|
|
- kfree(adev->uvd.saved_bo);
|
|
|
- adev->uvd.saved_bo = NULL;
|
|
|
- } else
|
|
|
- memset(ptr, 0, size);
|
|
|
+ memset(ptr, 0, size);
|
|
|
|
|
|
return 0;
|
|
|
}
|