|
@@ -751,13 +751,13 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
|
|
|
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
|
|
|
{
|
|
|
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
|
|
|
- struct hlist_node *tmp;
|
|
|
- HLIST_HEAD(freelist);
|
|
|
struct debug_obj_descr *descr;
|
|
|
enum debug_obj_state state;
|
|
|
struct debug_bucket *db;
|
|
|
+ struct hlist_node *tmp;
|
|
|
struct debug_obj *obj;
|
|
|
int cnt, objs_checked = 0;
|
|
|
+ bool work = false;
|
|
|
|
|
|
saddr = (unsigned long) address;
|
|
|
eaddr = saddr + size;
|
|
@@ -788,18 +788,12 @@ repeat:
|
|
|
goto repeat;
|
|
|
default:
|
|
|
hlist_del(&obj->node);
|
|
|
- hlist_add_head(&obj->node, &freelist);
|
|
|
+ work |= __free_object(obj);
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
raw_spin_unlock_irqrestore(&db->lock, flags);
|
|
|
|
|
|
- /* Now free them */
|
|
|
- hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
|
|
|
- hlist_del(&obj->node);
|
|
|
- free_object(obj);
|
|
|
- }
|
|
|
-
|
|
|
if (cnt > debug_objects_maxchain)
|
|
|
debug_objects_maxchain = cnt;
|
|
|
|
|
@@ -808,6 +802,10 @@ repeat:
|
|
|
|
|
|
if (objs_checked > debug_objects_maxchecked)
|
|
|
debug_objects_maxchecked = objs_checked;
|
|
|
+
|
|
|
+ /* Schedule work to actually kmem_cache_free() objects */
|
|
|
+ if (work)
|
|
|
+ schedule_work(&debug_obj_work);
|
|
|
}
|
|
|
|
|
|
void debug_check_no_obj_freed(const void *address, unsigned long size)
|