|
@@ -184,7 +184,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
|
|
|
{
|
|
|
const char *chip_name;
|
|
|
char fw_name[30];
|
|
|
- int err, i;
|
|
|
+ int err = 0, i;
|
|
|
struct amdgpu_firmware_info *info = NULL;
|
|
|
const struct common_firmware_header *header = NULL;
|
|
|
const struct sdma_firmware_header_v1_0 *hdr;
|
|
@@ -204,27 +204,27 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
|
|
|
default: BUG();
|
|
|
}
|
|
|
|
|
|
- for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
if (i == 0)
|
|
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
|
|
|
else
|
|
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
|
|
|
- err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
|
|
|
+ err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
|
|
|
if (err)
|
|
|
goto out;
|
|
|
- err = amdgpu_ucode_validate(adev->sdma[i].fw);
|
|
|
+ err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
|
|
|
if (err)
|
|
|
goto out;
|
|
|
- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
|
|
|
- adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
|
|
|
- adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
|
|
|
- if (adev->sdma[i].feature_version >= 20)
|
|
|
- adev->sdma[i].burst_nop = true;
|
|
|
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
|
|
|
+ adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
|
|
|
+ adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
|
|
|
+ if (adev->sdma.instance[i].feature_version >= 20)
|
|
|
+ adev->sdma.instance[i].burst_nop = true;
|
|
|
|
|
|
if (adev->firmware.smu_load) {
|
|
|
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
|
|
|
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
|
|
|
- info->fw = adev->sdma[i].fw;
|
|
|
+ info->fw = adev->sdma.instance[i].fw;
|
|
|
header = (const struct common_firmware_header *)info->fw->data;
|
|
|
adev->firmware.fw_size +=
|
|
|
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
|
@@ -235,9 +235,9 @@ out:
|
|
|
printk(KERN_ERR
|
|
|
"sdma_v3_0: Failed to load firmware \"%s\"\n",
|
|
|
fw_name);
|
|
|
- for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
|
|
- release_firmware(adev->sdma[i].fw);
|
|
|
- adev->sdma[i].fw = NULL;
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
+ release_firmware(adev->sdma.instance[i].fw);
|
|
|
+ adev->sdma.instance[i].fw = NULL;
|
|
|
}
|
|
|
}
|
|
|
return err;
|
|
@@ -276,7 +276,7 @@ static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
|
|
|
/* XXX check if swapping is necessary on BE */
|
|
|
wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
|
|
|
} else {
|
|
|
- int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
|
|
|
+ int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
|
|
|
|
|
|
wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
|
|
|
}
|
|
@@ -300,7 +300,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
|
|
|
adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
|
|
|
WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
|
|
|
} else {
|
|
|
- int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
|
|
|
+ int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
|
|
|
|
|
|
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
|
|
|
}
|
|
@@ -308,7 +308,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
|
|
|
|
|
|
static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|
|
{
|
|
|
- struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
|
|
|
+ struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < count; i++)
|
|
@@ -369,7 +369,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|
|
{
|
|
|
u32 ref_and_mask = 0;
|
|
|
|
|
|
- if (ring == &ring->adev->sdma[0].ring)
|
|
|
+ if (ring == &ring->adev->sdma.instance[0].ring)
|
|
|
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
|
|
|
else
|
|
|
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
|
|
@@ -454,8 +454,8 @@ static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
|
|
*/
|
|
|
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
|
|
|
{
|
|
|
- struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
|
|
|
- struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
|
|
|
+ struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
|
|
+ struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
|
|
u32 rb_cntl, ib_cntl;
|
|
|
int i;
|
|
|
|
|
@@ -463,7 +463,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
|
|
|
(adev->mman.buffer_funcs_ring == sdma1))
|
|
|
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
|
|
|
|
|
- for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
|
|
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
|
|
|
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
|
@@ -500,7 +500,7 @@ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
|
|
|
u32 f32_cntl;
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
|
|
|
if (enable)
|
|
|
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
|
|
@@ -530,7 +530,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
|
|
|
sdma_v3_0_rlc_stop(adev);
|
|
|
}
|
|
|
|
|
|
- for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
|
|
|
if (enable)
|
|
|
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
|
|
@@ -557,8 +557,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
|
|
u32 doorbell;
|
|
|
int i, j, r;
|
|
|
|
|
|
- for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
|
|
- ring = &adev->sdma[i].ring;
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
+ ring = &adev->sdma.instance[i].ring;
|
|
|
wb_offset = (ring->rptr_offs * 4);
|
|
|
|
|
|
mutex_lock(&adev->srbm_mutex);
|
|
@@ -669,23 +669,22 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
|
|
|
u32 fw_size;
|
|
|
int i, j;
|
|
|
|
|
|
- if (!adev->sdma[0].fw || !adev->sdma[1].fw)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
/* halt the MEs */
|
|
|
sdma_v3_0_enable(adev, false);
|
|
|
|
|
|
- for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
|
|
- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
+ if (!adev->sdma.instance[i].fw)
|
|
|
+ return -EINVAL;
|
|
|
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
|
|
|
amdgpu_ucode_print_sdma_hdr(&hdr->header);
|
|
|
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
|
|
fw_data = (const __le32 *)
|
|
|
- (adev->sdma[i].fw->data +
|
|
|
+ (adev->sdma.instance[i].fw->data +
|
|
|
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
|
|
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
|
|
|
for (j = 0; j < fw_size; j++)
|
|
|
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
|
|
|
- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
|
|
|
+ WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -701,21 +700,21 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
|
|
|
*/
|
|
|
static int sdma_v3_0_start(struct amdgpu_device *adev)
|
|
|
{
|
|
|
- int r;
|
|
|
+ int r, i;
|
|
|
|
|
|
if (!adev->firmware.smu_load) {
|
|
|
r = sdma_v3_0_load_microcode(adev);
|
|
|
if (r)
|
|
|
return r;
|
|
|
} else {
|
|
|
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
|
|
- AMDGPU_UCODE_ID_SDMA0);
|
|
|
- if (r)
|
|
|
- return -EINVAL;
|
|
|
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
|
|
- AMDGPU_UCODE_ID_SDMA1);
|
|
|
- if (r)
|
|
|
- return -EINVAL;
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
+ r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
|
|
+ (i == 0) ?
|
|
|
+ AMDGPU_UCODE_ID_SDMA0 :
|
|
|
+ AMDGPU_UCODE_ID_SDMA1);
|
|
|
+ if (r)
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/* unhalt the MEs */
|
|
@@ -1013,7 +1012,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
|
|
|
*/
|
|
|
static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
|
|
|
{
|
|
|
- struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
|
|
|
+ struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
|
|
|
u32 pad_count;
|
|
|
int i;
|
|
|
|
|
@@ -1071,6 +1070,12 @@ static int sdma_v3_0_early_init(void *handle)
|
|
|
{
|
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
|
|
+ switch (adev->asic_type) {
|
|
|
+ default:
|
|
|
+ adev->sdma.num_instances = SDMA_MAX_INSTANCE;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
sdma_v3_0_set_ring_funcs(adev);
|
|
|
sdma_v3_0_set_buffer_funcs(adev);
|
|
|
sdma_v3_0_set_vm_pte_funcs(adev);
|
|
@@ -1082,21 +1087,21 @@ static int sdma_v3_0_early_init(void *handle)
|
|
|
static int sdma_v3_0_sw_init(void *handle)
|
|
|
{
|
|
|
struct amdgpu_ring *ring;
|
|
|
- int r;
|
|
|
+ int r, i;
|
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
|
|
/* SDMA trap event */
|
|
|
- r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
|
|
|
+ r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
|
/* SDMA Privileged inst */
|
|
|
- r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
|
|
|
+ r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
|
/* SDMA Privileged inst */
|
|
|
- r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
|
|
|
+ r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
@@ -1106,33 +1111,23 @@ static int sdma_v3_0_sw_init(void *handle)
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
- ring = &adev->sdma[0].ring;
|
|
|
- ring->ring_obj = NULL;
|
|
|
- ring->use_doorbell = true;
|
|
|
- ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0;
|
|
|
-
|
|
|
- ring = &adev->sdma[1].ring;
|
|
|
- ring->ring_obj = NULL;
|
|
|
- ring->use_doorbell = true;
|
|
|
- ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1;
|
|
|
-
|
|
|
- ring = &adev->sdma[0].ring;
|
|
|
- sprintf(ring->name, "sdma0");
|
|
|
- r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
|
|
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
|
|
- &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
|
|
|
- AMDGPU_RING_TYPE_SDMA);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
-
|
|
|
- ring = &adev->sdma[1].ring;
|
|
|
- sprintf(ring->name, "sdma1");
|
|
|
- r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
|
|
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
|
|
- &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
|
|
|
- AMDGPU_RING_TYPE_SDMA);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
+ ring = &adev->sdma.instance[i].ring;
|
|
|
+ ring->ring_obj = NULL;
|
|
|
+ ring->use_doorbell = true;
|
|
|
+ ring->doorbell_index = (i == 0) ?
|
|
|
+ AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
|
|
|
+
|
|
|
+ sprintf(ring->name, "sdma%d", i);
|
|
|
+ r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
|
|
+ SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
|
|
+ &adev->sdma.trap_irq,
|
|
|
+ (i == 0) ?
|
|
|
+ AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
|
|
|
+ AMDGPU_RING_TYPE_SDMA);
|
|
|
+ if (r)
|
|
|
+ return r;
|
|
|
+ }
|
|
|
|
|
|
return r;
|
|
|
}
|
|
@@ -1140,9 +1135,10 @@ static int sdma_v3_0_sw_init(void *handle)
|
|
|
static int sdma_v3_0_sw_fini(void *handle)
|
|
|
{
|
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
+ int i;
|
|
|
|
|
|
- amdgpu_ring_fini(&adev->sdma[0].ring);
|
|
|
- amdgpu_ring_fini(&adev->sdma[1].ring);
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++)
|
|
|
+ amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1222,7 +1218,7 @@ static void sdma_v3_0_print_status(void *handle)
|
|
|
dev_info(adev->dev, "VI SDMA registers\n");
|
|
|
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
|
|
|
RREG32(mmSRBM_STATUS2));
|
|
|
- for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
|
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
|
|
|
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
|
|
|
dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
|
|
@@ -1367,7 +1363,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
|
|
|
case 0:
|
|
|
switch (queue_id) {
|
|
|
case 0:
|
|
|
- amdgpu_fence_process(&adev->sdma[0].ring);
|
|
|
+ amdgpu_fence_process(&adev->sdma.instance[0].ring);
|
|
|
break;
|
|
|
case 1:
|
|
|
/* XXX compute */
|
|
@@ -1380,7 +1376,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
|
|
|
case 1:
|
|
|
switch (queue_id) {
|
|
|
case 0:
|
|
|
- amdgpu_fence_process(&adev->sdma[1].ring);
|
|
|
+ amdgpu_fence_process(&adev->sdma.instance[1].ring);
|
|
|
break;
|
|
|
case 1:
|
|
|
/* XXX compute */
|
|
@@ -1468,8 +1464,10 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
|
|
|
|
|
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
|
|
|
{
|
|
|
- adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs;
|
|
|
- adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < adev->sdma.num_instances; i++)
|
|
|
+ adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
|
|
|
}
|
|
|
|
|
|
static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
|
|
@@ -1483,9 +1481,9 @@ static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
|
|
|
|
|
|
static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
|
|
|
{
|
|
|
- adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
|
|
- adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
|
|
|
- adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
|
|
|
+ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
|
|
+ adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
|
|
|
+ adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1551,7 +1549,7 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
|
|
|
{
|
|
|
if (adev->mman.buffer_funcs == NULL) {
|
|
|
adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
|
|
|
- adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
|
|
|
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1566,7 +1564,7 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|
|
{
|
|
|
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
|
|
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
|
|
|
- adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
|
|
|
+ adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
|
|
|
adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
|
|
|
}
|
|
|
}
|