|
@@ -104,6 +104,66 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
|
|
|
u64 end,
|
|
|
enum drm_mm_search_flags flags);
|
|
|
|
|
|
+#ifdef CONFIG_DRM_DEBUG_MM
|
|
|
+#define STACKDEPTH 32
|
|
|
+#define BUFSZ 4096
|
|
|
+
|
|
|
+static noinline void save_stack(struct drm_mm_node *node)
|
|
|
+{
|
|
|
+ unsigned long entries[STACKDEPTH];
|
|
|
+ struct stack_trace trace = {
|
|
|
+ .entries = entries,
|
|
|
+ .max_entries = STACKDEPTH,
|
|
|
+ .skip = 1
|
|
|
+ };
|
|
|
+
|
|
|
+ save_stack_trace(&trace);
|
|
|
+ if (trace.nr_entries != 0 &&
|
|
|
+ trace.entries[trace.nr_entries-1] == ULONG_MAX)
|
|
|
+ trace.nr_entries--;
|
|
|
+
|
|
|
+ /* May be called under spinlock, so avoid sleeping */
|
|
|
+ node->stack = depot_save_stack(&trace, GFP_NOWAIT);
|
|
|
+}
|
|
|
+
|
|
|
+static void show_leaks(struct drm_mm *mm)
|
|
|
+{
|
|
|
+ struct drm_mm_node *node;
|
|
|
+ unsigned long entries[STACKDEPTH];
|
|
|
+ char *buf;
|
|
|
+
|
|
|
+ buf = kmalloc(BUFSZ, GFP_KERNEL);
|
|
|
+ if (!buf)
|
|
|
+ return;
|
|
|
+
|
|
|
+ list_for_each_entry(node, &mm->head_node.node_list, node_list) {
|
|
|
+ struct stack_trace trace = {
|
|
|
+ .entries = entries,
|
|
|
+ .max_entries = STACKDEPTH
|
|
|
+ };
|
|
|
+
|
|
|
+ if (!node->stack) {
|
|
|
+ DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
|
|
|
+ node->start, node->size);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ depot_fetch_stack(node->stack, &trace);
|
|
|
+ snprint_stack_trace(buf, BUFSZ, &trace, 0);
|
|
|
+ DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
|
|
|
+ node->start, node->size, buf);
|
|
|
+ }
|
|
|
+
|
|
|
+ kfree(buf);
|
|
|
+}
|
|
|
+
|
|
|
+#undef STACKDEPTH
|
|
|
+#undef BUFSZ
|
|
|
+#else
|
|
|
+static void save_stack(struct drm_mm_node *node) { }
|
|
|
+static void show_leaks(struct drm_mm *mm) { }
|
|
|
+#endif
|
|
|
+
|
|
|
#define START(node) ((node)->start)
|
|
|
#define LAST(node) ((node)->start + (node)->size - 1)
|
|
|
|
|
@@ -228,6 +288,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
|
|
list_add(&node->hole_stack, &mm->hole_stack);
|
|
|
node->hole_follows = 1;
|
|
|
}
|
|
|
+
|
|
|
+ save_stack(node);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -293,6 +355,8 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
|
|
|
node->hole_follows = 1;
|
|
|
}
|
|
|
|
|
|
+ save_stack(node);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(drm_mm_reserve_node);
|
|
@@ -397,6 +461,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
|
|
|
list_add(&node->hole_stack, &mm->hole_stack);
|
|
|
node->hole_follows = 1;
|
|
|
}
|
|
|
+
|
|
|
+ save_stack(node);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -861,10 +927,12 @@ EXPORT_SYMBOL(drm_mm_init);
|
|
|
* Note that it is a bug to call this function on an allocator which is not
|
|
|
* clean.
|
|
|
*/
|
|
|
-void drm_mm_takedown(struct drm_mm * mm)
|
|
|
+void drm_mm_takedown(struct drm_mm *mm)
|
|
|
{
|
|
|
- WARN(!list_empty(&mm->head_node.node_list),
|
|
|
- "Memory manager not clean during takedown.\n");
|
|
|
+ if (WARN(!list_empty(&mm->head_node.node_list),
|
|
|
+ "Memory manager not clean during takedown.\n"))
|
|
|
+ show_leaks(mm);
|
|
|
+
|
|
|
}
|
|
|
EXPORT_SYMBOL(drm_mm_takedown);
|
|
|
|