|
@@ -124,6 +124,28 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
|
|
return usage;
|
|
return usage;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * amdgpu_vram_mgr_virt_start - update virtual start address
|
|
|
|
+ *
|
|
|
|
+ * @mem: ttm_mem_reg to update
|
|
|
|
+ * @node: just allocated node
|
|
|
|
+ *
|
|
|
|
+ * Calculate a virtual BO start address to easily check if everything is CPU
|
|
|
|
+ * accessible.
|
|
|
|
+ */
|
|
|
|
+static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
|
|
|
|
+ struct drm_mm_node *node)
|
|
|
|
+{
|
|
|
|
+ unsigned long start;
|
|
|
|
+
|
|
|
|
+ start = node->start + node->size;
|
|
|
|
+ if (start > mem->num_pages)
|
|
|
|
+ start -= mem->num_pages;
|
|
|
|
+ else
|
|
|
|
+ start = 0;
|
|
|
|
+ mem->start = max(mem->start, start);
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* amdgpu_vram_mgr_new - allocate new ranges
|
|
* amdgpu_vram_mgr_new - allocate new ranges
|
|
*
|
|
*
|
|
@@ -176,10 +198,25 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
|
|
pages_left = mem->num_pages;
|
|
pages_left = mem->num_pages;
|
|
|
|
|
|
spin_lock(&mgr->lock);
|
|
spin_lock(&mgr->lock);
|
|
- for (i = 0; i < num_nodes; ++i) {
|
|
|
|
|
|
+ for (i = 0; pages_left >= pages_per_node; ++i) {
|
|
|
|
+ unsigned long pages = rounddown_pow_of_two(pages_left);
|
|
|
|
+
|
|
|
|
+ r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
|
|
|
|
+ pages_per_node, 0,
|
|
|
|
+ place->fpfn, lpfn,
|
|
|
|
+ mode);
|
|
|
|
+ if (unlikely(r))
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ usage += nodes[i].size << PAGE_SHIFT;
|
|
|
|
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
|
|
|
|
+ amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
|
|
|
|
+ pages_left -= pages;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (; pages_left; ++i) {
|
|
unsigned long pages = min(pages_left, pages_per_node);
|
|
unsigned long pages = min(pages_left, pages_per_node);
|
|
uint32_t alignment = mem->page_alignment;
|
|
uint32_t alignment = mem->page_alignment;
|
|
- unsigned long start;
|
|
|
|
|
|
|
|
if (pages == pages_per_node)
|
|
if (pages == pages_per_node)
|
|
alignment = pages_per_node;
|
|
alignment = pages_per_node;
|
|
@@ -193,16 +230,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
|
|
|
|
|
|
usage += nodes[i].size << PAGE_SHIFT;
|
|
usage += nodes[i].size << PAGE_SHIFT;
|
|
vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
|
|
vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
|
|
-
|
|
|
|
- /* Calculate a virtual BO start address to easily check if
|
|
|
|
- * everything is CPU accessible.
|
|
|
|
- */
|
|
|
|
- start = nodes[i].start + nodes[i].size;
|
|
|
|
- if (start > mem->num_pages)
|
|
|
|
- start -= mem->num_pages;
|
|
|
|
- else
|
|
|
|
- start = 0;
|
|
|
|
- mem->start = max(mem->start, start);
|
|
|
|
|
|
+ amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
|
|
pages_left -= pages;
|
|
pages_left -= pages;
|
|
}
|
|
}
|
|
spin_unlock(&mgr->lock);
|
|
spin_unlock(&mgr->lock);
|