|
@@ -19,11 +19,13 @@ struct callchain_cpus_entries {
|
|
|
};
|
|
|
|
|
|
int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
|
|
|
+int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
|
|
|
|
|
|
static inline size_t perf_callchain_entry__sizeof(void)
|
|
|
{
|
|
|
return (sizeof(struct perf_callchain_entry) +
|
|
|
- sizeof(__u64) * sysctl_perf_event_max_stack);
|
|
|
+ sizeof(__u64) * (sysctl_perf_event_max_stack +
|
|
|
+ sysctl_perf_event_max_contexts_per_stack));
|
|
|
}
|
|
|
|
|
|
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
|
|
@@ -32,12 +34,12 @@ static DEFINE_MUTEX(callchain_mutex);
|
|
|
static struct callchain_cpus_entries *callchain_cpus_entries;
|
|
|
|
|
|
|
|
|
-__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
|
|
+__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
|
|
struct pt_regs *regs)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-__weak void perf_callchain_user(struct perf_callchain_entry *entry,
|
|
|
+__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
|
|
struct pt_regs *regs)
|
|
|
{
|
|
|
}
|
|
@@ -176,14 +178,15 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
|
|
if (!kernel && !user)
|
|
|
return NULL;
|
|
|
|
|
|
- return get_perf_callchain(regs, 0, kernel, user, crosstask, true);
|
|
|
+ return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
|
|
|
}
|
|
|
|
|
|
struct perf_callchain_entry *
|
|
|
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
|
|
- bool crosstask, bool add_mark)
|
|
|
+ u32 max_stack, bool crosstask, bool add_mark)
|
|
|
{
|
|
|
struct perf_callchain_entry *entry;
|
|
|
+ struct perf_callchain_entry_ctx ctx;
|
|
|
int rctx;
|
|
|
|
|
|
entry = get_callchain_entry(&rctx);
|
|
@@ -193,12 +196,16 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
|
|
if (!entry)
|
|
|
goto exit_put;
|
|
|
|
|
|
- entry->nr = init_nr;
|
|
|
+ ctx.entry = entry;
|
|
|
+ ctx.max_stack = max_stack;
|
|
|
+ ctx.nr = entry->nr = init_nr;
|
|
|
+ ctx.contexts = 0;
|
|
|
+ ctx.contexts_maxed = false;
|
|
|
|
|
|
if (kernel && !user_mode(regs)) {
|
|
|
if (add_mark)
|
|
|
- perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
|
- perf_callchain_kernel(entry, regs);
|
|
|
+ perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
|
|
|
+ perf_callchain_kernel(&ctx, regs);
|
|
|
}
|
|
|
|
|
|
if (user) {
|
|
@@ -214,8 +221,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
|
|
goto exit_put;
|
|
|
|
|
|
if (add_mark)
|
|
|
- perf_callchain_store(entry, PERF_CONTEXT_USER);
|
|
|
- perf_callchain_user(entry, regs);
|
|
|
+ perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
|
|
|
+ perf_callchain_user(&ctx, regs);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -225,10 +232,15 @@ exit_put:
|
|
|
return entry;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Used for sysctl_perf_event_max_stack and
|
|
|
+ * sysctl_perf_event_max_contexts_per_stack.
|
|
|
+ */
|
|
|
int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
|
|
void __user *buffer, size_t *lenp, loff_t *ppos)
|
|
|
{
|
|
|
- int new_value = sysctl_perf_event_max_stack, ret;
|
|
|
+ int *value = table->data;
|
|
|
+ int new_value = *value, ret;
|
|
|
struct ctl_table new_table = *table;
|
|
|
|
|
|
new_table.data = &new_value;
|
|
@@ -240,7 +252,7 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
|
|
if (atomic_read(&nr_callchain_events))
|
|
|
ret = -EBUSY;
|
|
|
else
|
|
|
- sysctl_perf_event_max_stack = new_value;
|
|
|
+ *value = new_value;
|
|
|
|
|
|
mutex_unlock(&callchain_mutex);
|
|
|
|