|
@@ -659,8 +659,6 @@ static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
|
|
|
static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
|
|
|
static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
|
|
|
static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
|
|
|
-static int gfx_v8_0_compute_mqd_sw_init(struct amdgpu_device *adev);
|
|
|
-static void gfx_v8_0_compute_mqd_sw_fini(struct amdgpu_device *adev);
|
|
|
|
|
|
static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
|
|
{
|
|
@@ -2102,7 +2100,7 @@ static int gfx_v8_0_sw_init(void *handle)
|
|
|
return r;
|
|
|
|
|
|
/* create MQD for all compute queues as well as KIQ for SRIOV case */
|
|
|
- r = gfx_v8_0_compute_mqd_sw_init(adev);
|
|
|
+ r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct vi_mqd));
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
@@ -2148,7 +2146,7 @@ static int gfx_v8_0_sw_fini(void *handle)
|
|
|
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
|
|
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
|
|
|
|
|
|
- gfx_v8_0_compute_mqd_sw_fini(adev);
|
|
|
+ amdgpu_gfx_compute_mqd_sw_fini(adev);
|
|
|
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
|
|
amdgpu_gfx_kiq_fini(adev);
|
|
|
|
|
@@ -7146,68 +7144,3 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
|
|
|
amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
|
|
|
amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
|
|
|
}
|
|
|
-
|
|
|
-/* create MQD for each compute queue */
|
|
|
-static int gfx_v8_0_compute_mqd_sw_init(struct amdgpu_device *adev)
|
|
|
-{
|
|
|
- struct amdgpu_ring *ring = NULL;
|
|
|
- int r, i;
|
|
|
-
|
|
|
- /* create MQD for KIQ */
|
|
|
- ring = &adev->gfx.kiq.ring;
|
|
|
- if (!ring->mqd_obj) {
|
|
|
- r = amdgpu_bo_create_kernel(adev, sizeof(struct vi_mqd), PAGE_SIZE,
|
|
|
- AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
|
|
|
- &ring->mqd_gpu_addr, &ring->mqd_ptr);
|
|
|
- if (r) {
|
|
|
- dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
|
|
|
- return r;
|
|
|
- }
|
|
|
-
|
|
|
- /* prepare MQD backup */
|
|
|
- adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(sizeof(struct vi_mqd), GFP_KERNEL);
|
|
|
- if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
|
|
|
- dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
|
|
|
- }
|
|
|
-
|
|
|
- /* create MQD for each KCQ */
|
|
|
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
|
|
- ring = &adev->gfx.compute_ring[i];
|
|
|
- if (!ring->mqd_obj) {
|
|
|
- r = amdgpu_bo_create_kernel(adev, sizeof(struct vi_mqd), PAGE_SIZE,
|
|
|
- AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
|
|
|
- &ring->mqd_gpu_addr, &ring->mqd_ptr);
|
|
|
- if (r) {
|
|
|
- dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
|
|
|
- return r;
|
|
|
- }
|
|
|
-
|
|
|
- /* prepare MQD backup */
|
|
|
- adev->gfx.mec.mqd_backup[i] = kmalloc(sizeof(struct vi_mqd), GFP_KERNEL);
|
|
|
- if (!adev->gfx.mec.mqd_backup[i])
|
|
|
- dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void gfx_v8_0_compute_mqd_sw_fini(struct amdgpu_device *adev)
|
|
|
-{
|
|
|
- struct amdgpu_ring *ring = NULL;
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
|
|
- ring = &adev->gfx.compute_ring[i];
|
|
|
- kfree(adev->gfx.mec.mqd_backup[i]);
|
|
|
- amdgpu_bo_free_kernel(&ring->mqd_obj,
|
|
|
- &ring->mqd_gpu_addr,
|
|
|
- &ring->mqd_ptr);
|
|
|
- }
|
|
|
-
|
|
|
- ring = &adev->gfx.kiq.ring;
|
|
|
- kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
|
|
|
- amdgpu_bo_free_kernel(&ring->mqd_obj,
|
|
|
- &ring->mqd_gpu_addr,
|
|
|
- &ring->mqd_ptr);
|
|
|
-}
|