|
@@ -3510,6 +3510,18 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
+/*
|
|
|
+ * In order to reduce various lock holder preemption latencies provide an
|
|
|
+ * interface to see if a vCPU is currently running or not.
|
|
|
+ *
|
|
|
+ * This allows us to terminate optimistic spin loops and block, analogous to
|
|
|
+ * the native optimistic spin heuristic of testing if the lock owner task is
|
|
|
+ * running or not.
|
|
|
+ */
|
|
|
+#ifndef vcpu_is_preempted
|
|
|
+# define vcpu_is_preempted(cpu) false
|
|
|
+#endif
|
|
|
+
|
|
|
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
|
|
|
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
|
|
|
|