|
@@ -238,17 +238,17 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
|
|
|
t = offset / AMDGPU_GPU_PAGE_SIZE;
|
|
|
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
|
|
for (i = 0; i < pages; i++, p++) {
|
|
|
- if (adev->gart.pages[p]) {
|
|
|
- adev->gart.pages[p] = NULL;
|
|
|
- page_base = adev->dummy_page.addr;
|
|
|
- if (!adev->gart.ptr)
|
|
|
- continue;
|
|
|
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
|
|
|
+ adev->gart.pages[p] = NULL;
|
|
|
+#endif
|
|
|
+ page_base = adev->dummy_page.addr;
|
|
|
+ if (!adev->gart.ptr)
|
|
|
+ continue;
|
|
|
|
|
|
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
|
|
|
- amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
|
|
|
- t, page_base, flags);
|
|
|
- page_base += AMDGPU_GPU_PAGE_SIZE;
|
|
|
- }
|
|
|
+ for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
|
|
|
+ amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
|
|
|
+ t, page_base, flags);
|
|
|
+ page_base += AMDGPU_GPU_PAGE_SIZE;
|
|
|
}
|
|
|
}
|
|
|
mb();
|
|
@@ -286,7 +286,9 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
|
|
|
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
|
|
|
|
|
for (i = 0; i < pages; i++, p++) {
|
|
|
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
|
|
|
adev->gart.pages[p] = pagelist[i];
|
|
|
+#endif
|
|
|
if (adev->gart.ptr) {
|
|
|
page_base = dma_addr[i];
|
|
|
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
|
|
@@ -312,9 +314,9 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
|
|
|
{
|
|
|
int r;
|
|
|
|
|
|
- if (adev->gart.pages) {
|
|
|
+ if (adev->dummy_page.page)
|
|
|
return 0;
|
|
|
- }
|
|
|
+
|
|
|
/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
|
|
|
if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
|
|
|
DRM_ERROR("Page size is smaller than GPU page size!\n");
|
|
@@ -328,12 +330,16 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
|
|
|
adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
|
|
|
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
|
|
|
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
|
|
|
+
|
|
|
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
|
|
|
/* Allocate pages table */
|
|
|
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
|
|
|
if (adev->gart.pages == NULL) {
|
|
|
amdgpu_gart_fini(adev);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
+#endif
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -346,13 +352,14 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
|
|
|
*/
|
|
|
void amdgpu_gart_fini(struct amdgpu_device *adev)
|
|
|
{
|
|
|
- if (adev->gart.pages && adev->gart.ready) {
|
|
|
+ if (adev->gart.ready) {
|
|
|
/* unbind pages */
|
|
|
amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
|
|
|
}
|
|
|
adev->gart.ready = false;
|
|
|
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
|
|
|
vfree(adev->gart.pages);
|
|
|
adev->gart.pages = NULL;
|
|
|
-
|
|
|
+#endif
|
|
|
amdgpu_dummy_page_fini(adev);
|
|
|
}
|