|
@@ -39,15 +39,15 @@ void context_tracking_cpu_set(int cpu)
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * context_tracking_user_enter - Inform the context tracking that the CPU is going to
|
|
|
|
- * enter userspace mode.
|
|
|
|
|
|
+ * context_tracking_enter - Inform the context tracking that the CPU is going
|
|
|
|
+ * enter user or guest space mode.
|
|
*
|
|
*
|
|
* This function must be called right before we switch from the kernel
|
|
* This function must be called right before we switch from the kernel
|
|
- * to userspace, when it's guaranteed the remaining kernel instructions
|
|
|
|
- * to execute won't use any RCU read side critical section because this
|
|
|
|
- * function sets RCU in extended quiescent state.
|
|
|
|
|
|
+ * to user or guest space, when it's guaranteed the remaining kernel
|
|
|
|
+ * instructions to execute won't use any RCU read side critical section
|
|
|
|
+ * because this function sets RCU in extended quiescent state.
|
|
*/
|
|
*/
|
|
-void context_tracking_user_enter(void)
|
|
|
|
|
|
+void context_tracking_enter(enum ctx_state state)
|
|
{
|
|
{
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
@@ -75,9 +75,8 @@ void context_tracking_user_enter(void)
|
|
WARN_ON_ONCE(!current->mm);
|
|
WARN_ON_ONCE(!current->mm);
|
|
|
|
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
- if ( __this_cpu_read(context_tracking.state) != IN_USER) {
|
|
|
|
|
|
+ if ( __this_cpu_read(context_tracking.state) != state) {
|
|
if (__this_cpu_read(context_tracking.active)) {
|
|
if (__this_cpu_read(context_tracking.active)) {
|
|
- trace_user_enter(0);
|
|
|
|
/*
|
|
/*
|
|
* At this stage, only low level arch entry code remains and
|
|
* At this stage, only low level arch entry code remains and
|
|
* then we'll run in userspace. We can assume there won't be
|
|
* then we'll run in userspace. We can assume there won't be
|
|
@@ -85,7 +84,10 @@ void context_tracking_user_enter(void)
|
|
* user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
|
|
* user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
|
|
* on the tick.
|
|
* on the tick.
|
|
*/
|
|
*/
|
|
- vtime_user_enter(current);
|
|
|
|
|
|
+ if (state == CONTEXT_USER) {
|
|
|
|
+ trace_user_enter(0);
|
|
|
|
+ vtime_user_enter(current);
|
|
|
|
+ }
|
|
rcu_user_enter();
|
|
rcu_user_enter();
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -101,24 +103,32 @@ void context_tracking_user_enter(void)
|
|
* OTOH we can spare the calls to vtime and RCU when context_tracking.active
|
|
* OTOH we can spare the calls to vtime and RCU when context_tracking.active
|
|
* is false because we know that CPU is not tickless.
|
|
* is false because we know that CPU is not tickless.
|
|
*/
|
|
*/
|
|
- __this_cpu_write(context_tracking.state, IN_USER);
|
|
|
|
|
|
+ __this_cpu_write(context_tracking.state, state);
|
|
}
|
|
}
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
+NOKPROBE_SYMBOL(context_tracking_enter);
|
|
|
|
+EXPORT_SYMBOL_GPL(context_tracking_enter);
|
|
|
|
+
|
|
|
|
+void context_tracking_user_enter(void)
|
|
|
|
+{
|
|
|
|
+ context_tracking_enter(CONTEXT_USER);
|
|
|
|
+}
|
|
NOKPROBE_SYMBOL(context_tracking_user_enter);
|
|
NOKPROBE_SYMBOL(context_tracking_user_enter);
|
|
|
|
|
|
/**
|
|
/**
|
|
- * context_tracking_user_exit - Inform the context tracking that the CPU is
|
|
|
|
- * exiting userspace mode and entering the kernel.
|
|
|
|
|
|
+ * context_tracking_exit - Inform the context tracking that the CPU is
|
|
|
|
+ * exiting user or guest mode and entering the kernel.
|
|
*
|
|
*
|
|
- * This function must be called after we entered the kernel from userspace
|
|
|
|
- * before any use of RCU read side critical section. This potentially include
|
|
|
|
- * any high level kernel code like syscalls, exceptions, signal handling, etc...
|
|
|
|
|
|
+ * This function must be called after we entered the kernel from user or
|
|
|
|
+ * guest space before any use of RCU read side critical section. This
|
|
|
|
+ * potentially include any high level kernel code like syscalls, exceptions,
|
|
|
|
+ * signal handling, etc...
|
|
*
|
|
*
|
|
* This call supports re-entrancy. This way it can be called from any exception
|
|
* This call supports re-entrancy. This way it can be called from any exception
|
|
* handler without needing to know if we came from userspace or not.
|
|
* handler without needing to know if we came from userspace or not.
|
|
*/
|
|
*/
|
|
-void context_tracking_user_exit(void)
|
|
|
|
|
|
+void context_tracking_exit(enum ctx_state state)
|
|
{
|
|
{
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
@@ -129,20 +139,29 @@ void context_tracking_user_exit(void)
|
|
return;
|
|
return;
|
|
|
|
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
- if (__this_cpu_read(context_tracking.state) == IN_USER) {
|
|
|
|
|
|
+ if (__this_cpu_read(context_tracking.state) == state) {
|
|
if (__this_cpu_read(context_tracking.active)) {
|
|
if (__this_cpu_read(context_tracking.active)) {
|
|
/*
|
|
/*
|
|
* We are going to run code that may use RCU. Inform
|
|
* We are going to run code that may use RCU. Inform
|
|
* RCU core about that (ie: we may need the tick again).
|
|
* RCU core about that (ie: we may need the tick again).
|
|
*/
|
|
*/
|
|
rcu_user_exit();
|
|
rcu_user_exit();
|
|
- vtime_user_exit(current);
|
|
|
|
- trace_user_exit(0);
|
|
|
|
|
|
+ if (state == CONTEXT_USER) {
|
|
|
|
+ vtime_user_exit(current);
|
|
|
|
+ trace_user_exit(0);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
- __this_cpu_write(context_tracking.state, IN_KERNEL);
|
|
|
|
|
|
+ __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
|
|
}
|
|
}
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
+NOKPROBE_SYMBOL(context_tracking_exit);
|
|
|
|
+EXPORT_SYMBOL_GPL(context_tracking_exit);
|
|
|
|
+
|
|
|
|
+void context_tracking_user_exit(void)
|
|
|
|
+{
|
|
|
|
+ context_tracking_exit(CONTEXT_USER);
|
|
|
|
+}
|
|
NOKPROBE_SYMBOL(context_tracking_user_exit);
|
|
NOKPROBE_SYMBOL(context_tracking_user_exit);
|
|
|
|
|
|
/**
|
|
/**
|