|
@@ -1807,10 +1807,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
|
|
|
|
|
|
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
|
|
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
|
|
{
|
|
{
|
|
- if (adev->mman.buffer_funcs == NULL) {
|
|
|
|
- adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
|
|
|
|
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
|
|
|
- }
|
|
|
|
|
|
+ adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
|
|
|
|
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
|
}
|
|
}
|
|
|
|
|
|
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
|
|
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
|
|
@@ -1826,15 +1824,13 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|
struct drm_gpu_scheduler *sched;
|
|
struct drm_gpu_scheduler *sched;
|
|
unsigned i;
|
|
unsigned i;
|
|
|
|
|
|
- if (adev->vm_manager.vm_pte_funcs == NULL) {
|
|
|
|
- adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
|
|
|
|
- for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
|
- sched = &adev->sdma.instance[i].ring.sched;
|
|
|
|
- adev->vm_manager.vm_pte_rqs[i] =
|
|
|
|
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
|
|
|
- }
|
|
|
|
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
|
|
|
|
|
+ adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
|
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
|
+ sched = &adev->sdma.instance[i].ring.sched;
|
|
|
|
+ adev->vm_manager.vm_pte_rqs[i] =
|
|
|
|
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
|
}
|
|
}
|
|
|
|
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
|
}
|
|
}
|
|
|
|
|
|
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
|
|
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
|