|
@@ -2958,6 +2958,20 @@ u64 scheduler_tick_max_deferment(void)
|
|
|
|
|
|
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
|
|
|
defined(CONFIG_PREEMPT_TRACER))
|
|
|
+/*
|
|
|
+ * If the value passed in is equal to the current preempt count
|
|
|
+ * then we just disabled preemption. Start timing the latency.
|
|
|
+ */
|
|
|
+static inline void preempt_latency_start(int val)
|
|
|
+{
|
|
|
+ if (preempt_count() == val) {
|
|
|
+ unsigned long ip = get_lock_parent_ip();
|
|
|
+#ifdef CONFIG_DEBUG_PREEMPT
|
|
|
+ current->preempt_disable_ip = ip;
|
|
|
+#endif
|
|
|
+ trace_preempt_off(CALLER_ADDR0, ip);
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
void preempt_count_add(int val)
|
|
|
{
|
|
@@ -2976,17 +2990,21 @@ void preempt_count_add(int val)
|
|
|
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
|
|
|
PREEMPT_MASK - 10);
|
|
|
#endif
|
|
|
- if (preempt_count() == val) {
|
|
|
- unsigned long ip = get_lock_parent_ip();
|
|
|
-#ifdef CONFIG_DEBUG_PREEMPT
|
|
|
- current->preempt_disable_ip = ip;
|
|
|
-#endif
|
|
|
- trace_preempt_off(CALLER_ADDR0, ip);
|
|
|
- }
|
|
|
+ preempt_latency_start(val);
|
|
|
}
|
|
|
EXPORT_SYMBOL(preempt_count_add);
|
|
|
NOKPROBE_SYMBOL(preempt_count_add);
|
|
|
|
|
|
+/*
|
|
|
+ * If the value passed in equals to the current preempt count
|
|
|
+ * then we just enabled preemption. Stop timing the latency.
|
|
|
+ */
|
|
|
+static inline void preempt_latency_stop(int val)
|
|
|
+{
|
|
|
+ if (preempt_count() == val)
|
|
|
+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
|
|
|
+}
|
|
|
+
|
|
|
void preempt_count_sub(int val)
|
|
|
{
|
|
|
#ifdef CONFIG_DEBUG_PREEMPT
|
|
@@ -3003,13 +3021,15 @@ void preempt_count_sub(int val)
|
|
|
return;
|
|
|
#endif
|
|
|
|
|
|
- if (preempt_count() == val)
|
|
|
- trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
|
|
|
+ preempt_latency_stop(val);
|
|
|
__preempt_count_sub(val);
|
|
|
}
|
|
|
EXPORT_SYMBOL(preempt_count_sub);
|
|
|
NOKPROBE_SYMBOL(preempt_count_sub);
|
|
|
|
|
|
+#else
|
|
|
+static inline void preempt_latency_start(int val) { }
|
|
|
+static inline void preempt_latency_stop(int val) { }
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
@@ -3284,8 +3304,23 @@ void __sched schedule_preempt_disabled(void)
|
|
|
static void __sched notrace preempt_schedule_common(void)
|
|
|
{
|
|
|
do {
|
|
|
+ /*
|
|
|
+ * Because the function tracer can trace preempt_count_sub()
|
|
|
+ * and it also uses preempt_enable/disable_notrace(), if
|
|
|
+ * NEED_RESCHED is set, the preempt_enable_notrace() called
|
|
|
+ * by the function tracer will call this function again and
|
|
|
+ * cause infinite recursion.
|
|
|
+ *
|
|
|
+ * Preemption must be disabled here before the function
|
|
|
+ * tracer can trace. Break up preempt_disable() into two
|
|
|
+ * calls. One to disable preemption without fear of being
|
|
|
+ * traced. The other to still record the preemption latency,
|
|
|
+ * which can also be traced by the function tracer.
|
|
|
+ */
|
|
|
preempt_disable_notrace();
|
|
|
+ preempt_latency_start(1);
|
|
|
__schedule(true);
|
|
|
+ preempt_latency_stop(1);
|
|
|
preempt_enable_no_resched_notrace();
|
|
|
|
|
|
/*
|
|
@@ -3337,7 +3372,21 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
|
|
|
return;
|
|
|
|
|
|
do {
|
|
|
+ /*
|
|
|
+ * Because the function tracer can trace preempt_count_sub()
|
|
|
+ * and it also uses preempt_enable/disable_notrace(), if
|
|
|
+ * NEED_RESCHED is set, the preempt_enable_notrace() called
|
|
|
+ * by the function tracer will call this function again and
|
|
|
+ * cause infinite recursion.
|
|
|
+ *
|
|
|
+ * Preemption must be disabled here before the function
|
|
|
+ * tracer can trace. Break up preempt_disable() into two
|
|
|
+ * calls. One to disable preemption without fear of being
|
|
|
+ * traced. The other to still record the preemption latency,
|
|
|
+ * which can also be traced by the function tracer.
|
|
|
+ */
|
|
|
preempt_disable_notrace();
|
|
|
+ preempt_latency_start(1);
|
|
|
/*
|
|
|
* Needs preempt disabled in case user_exit() is traced
|
|
|
* and the tracer calls preempt_enable_notrace() causing
|
|
@@ -3347,6 +3396,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
|
|
|
__schedule(true);
|
|
|
exception_exit(prev_ctx);
|
|
|
|
|
|
+ preempt_latency_stop(1);
|
|
|
preempt_enable_no_resched_notrace();
|
|
|
} while (need_resched());
|
|
|
}
|