|
@@ -24,6 +24,18 @@
|
|
|
#include "amdgpu.h"
|
|
|
#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
|
|
|
|
|
|
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
|
|
|
+{
|
|
|
+ uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
|
|
|
+
|
|
|
+ addr -= AMDGPU_VA_RESERVED_SIZE;
|
|
|
+
|
|
|
+ if (addr >= AMDGPU_VA_HOLE_START)
|
|
|
+ addr |= AMDGPU_VA_HOLE_END;
|
|
|
+
|
|
|
+ return addr;
|
|
|
+}
|
|
|
+
|
|
|
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
|
|
|
{
|
|
|
/* By now all MMIO pages except mailbox are blocked */
|
|
@@ -55,14 +67,14 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
|
|
|
|
|
|
/*
|
|
|
* amdgpu_map_static_csa should be called during amdgpu_vm_init
|
|
|
- * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
|
|
|
- * to this VM, and each command submission of GFX should use this virtual
|
|
|
- * address within META_DATA init package to support SRIOV gfx preemption.
|
|
|
+ * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
|
|
|
+ * submission of GFX should use this virtual address within META_DATA init
|
|
|
+ * package to support SRIOV gfx preemption.
|
|
|
*/
|
|
|
-
|
|
|
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
struct amdgpu_bo_va **bo_va)
|
|
|
{
|
|
|
+ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
|
|
|
struct ww_acquire_ctx ticket;
|
|
|
struct list_head list;
|
|
|
struct amdgpu_bo_list_entry pd;
|
|
@@ -90,7 +102,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
|
|
|
+ r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
|
|
|
AMDGPU_CSA_SIZE);
|
|
|
if (r) {
|
|
|
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
|
|
@@ -99,7 +111,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
- r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
|
|
|
+ r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
|
|
|
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
|
|
|
AMDGPU_PTE_EXECUTABLE);
|
|
|
|