|
@@ -96,7 +96,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
|
|
|
/*
|
|
|
* Some helpers for converting nanosecond timing to jiffy resolution
|
|
|
*/
|
|
|
-#define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
|
|
|
+#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (1000000000 / HZ))
|
|
|
#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
|
|
|
|
|
|
#define NICE_0_LOAD SCHED_LOAD_SCALE
|
|
@@ -105,11 +105,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
|
|
|
/*
|
|
|
* These are the 'tuning knobs' of the scheduler:
|
|
|
*
|
|
|
- * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
|
|
|
- * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
|
|
|
+ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
|
|
|
* Timeslices get refilled after they expire.
|
|
|
*/
|
|
|
-#define MIN_TIMESLICE max(5 * HZ / 1000, 1)
|
|
|
#define DEF_TIMESLICE (100 * HZ / 1000)
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -133,24 +131,6 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-#define SCALE_PRIO(x, prio) \
|
|
|
- max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
|
|
|
-
|
|
|
-/*
|
|
|
- * static_prio_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
|
|
|
- * to time slice values: [800ms ... 100ms ... 5ms]
|
|
|
- */
|
|
|
-static unsigned int static_prio_timeslice(int static_prio)
|
|
|
-{
|
|
|
- if (static_prio == NICE_TO_PRIO(19))
|
|
|
- return 1;
|
|
|
-
|
|
|
- if (static_prio < NICE_TO_PRIO(0))
|
|
|
- return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
|
|
|
- else
|
|
|
- return SCALE_PRIO(DEF_TIMESLICE, static_prio);
|
|
|
-}
|
|
|
-
|
|
|
static inline int rt_policy(int policy)
|
|
|
{
|
|
|
if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
|
|
@@ -171,31 +151,91 @@ struct rt_prio_array {
|
|
|
struct list_head queue[MAX_RT_PRIO];
|
|
|
};
|
|
|
|
|
|
-struct load_stat {
|
|
|
- struct load_weight load;
|
|
|
- u64 load_update_start, load_update_last;
|
|
|
- unsigned long delta_fair, delta_exec, delta_stat;
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
+
|
|
|
+struct cfs_rq;
|
|
|
+
|
|
|
+/* task group related information */
|
|
|
+struct task_group {
|
|
|
+ /* schedulable entities of this group on each cpu */
|
|
|
+ struct sched_entity **se;
|
|
|
+ /* runqueue "owned" by this group on each cpu */
|
|
|
+ struct cfs_rq **cfs_rq;
|
|
|
+ unsigned long shares;
|
|
|
+ /* spinlock to serialize modification to shares */
|
|
|
+ spinlock_t lock;
|
|
|
+};
|
|
|
+
|
|
|
+/* Default task group's sched entity on each cpu */
|
|
|
+static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
|
|
|
+/* Default task group's cfs_rq on each cpu */
|
|
|
+static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
|
|
|
+
|
|
|
+static struct sched_entity *init_sched_entity_p[NR_CPUS];
|
|
|
+static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
|
|
|
+
|
|
|
+/* Default task group.
|
|
|
+ * Every task in system belong to this group at bootup.
|
|
|
+ */
|
|
|
+struct task_group init_task_group = {
|
|
|
+ .se = init_sched_entity_p,
|
|
|
+ .cfs_rq = init_cfs_rq_p,
|
|
|
};
|
|
|
|
|
|
+#ifdef CONFIG_FAIR_USER_SCHED
|
|
|
+# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD
|
|
|
+#else
|
|
|
+# define INIT_TASK_GRP_LOAD NICE_0_LOAD
|
|
|
+#endif
|
|
|
+
|
|
|
+static int init_task_group_load = INIT_TASK_GRP_LOAD;
|
|
|
+
|
|
|
+/* return group to which a task belongs */
|
|
|
+static inline struct task_group *task_group(struct task_struct *p)
|
|
|
+{
|
|
|
+ struct task_group *tg;
|
|
|
+
|
|
|
+#ifdef CONFIG_FAIR_USER_SCHED
|
|
|
+ tg = p->user->tg;
|
|
|
+#else
|
|
|
+ tg = &init_task_group;
|
|
|
+#endif
|
|
|
+
|
|
|
+ return tg;
|
|
|
+}
|
|
|
+
|
|
|
+/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
|
|
|
+static inline void set_task_cfs_rq(struct task_struct *p)
|
|
|
+{
|
|
|
+ p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)];
|
|
|
+ p->se.parent = task_group(p)->se[task_cpu(p)];
|
|
|
+}
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+static inline void set_task_cfs_rq(struct task_struct *p) { }
|
|
|
+
|
|
|
+#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
+
|
|
|
/* CFS-related fields in a runqueue */
|
|
|
struct cfs_rq {
|
|
|
struct load_weight load;
|
|
|
unsigned long nr_running;
|
|
|
|
|
|
- s64 fair_clock;
|
|
|
u64 exec_clock;
|
|
|
- s64 wait_runtime;
|
|
|
- u64 sleeper_bonus;
|
|
|
- unsigned long wait_runtime_overruns, wait_runtime_underruns;
|
|
|
+ u64 min_vruntime;
|
|
|
|
|
|
struct rb_root tasks_timeline;
|
|
|
struct rb_node *rb_leftmost;
|
|
|
struct rb_node *rb_load_balance_curr;
|
|
|
-#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
/* 'curr' points to currently running entity on this cfs_rq.
|
|
|
* It is set to NULL otherwise (i.e when none are currently running).
|
|
|
*/
|
|
|
struct sched_entity *curr;
|
|
|
+
|
|
|
+ unsigned long nr_spread_over;
|
|
|
+
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
|
|
|
|
|
|
/* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
|
|
@@ -206,6 +246,8 @@ struct cfs_rq {
|
|
|
* list is used during load balance.
|
|
|
*/
|
|
|
struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
|
|
|
+ struct task_group *tg; /* group that "owns" this runqueue */
|
|
|
+ struct rcu_head rcu;
|
|
|
#endif
|
|
|
};
|
|
|
|
|
@@ -237,7 +279,7 @@ struct rq {
|
|
|
#ifdef CONFIG_NO_HZ
|
|
|
unsigned char in_nohz_recently;
|
|
|
#endif
|
|
|
- struct load_stat ls; /* capture load from *all* tasks on this cpu */
|
|
|
+ struct load_weight load; /* capture load from *all* tasks on this cpu */
|
|
|
unsigned long nr_load_updates;
|
|
|
u64 nr_switches;
|
|
|
|
|
@@ -289,16 +331,19 @@ struct rq {
|
|
|
unsigned long yld_exp_empty;
|
|
|
unsigned long yld_act_empty;
|
|
|
unsigned long yld_both_empty;
|
|
|
- unsigned long yld_cnt;
|
|
|
+ unsigned long yld_count;
|
|
|
|
|
|
/* schedule() stats */
|
|
|
unsigned long sched_switch;
|
|
|
- unsigned long sched_cnt;
|
|
|
+ unsigned long sched_count;
|
|
|
unsigned long sched_goidle;
|
|
|
|
|
|
/* try_to_wake_up() stats */
|
|
|
- unsigned long ttwu_cnt;
|
|
|
+ unsigned long ttwu_count;
|
|
|
unsigned long ttwu_local;
|
|
|
+
|
|
|
+ /* BKL stats */
|
|
|
+ unsigned long bkl_count;
|
|
|
#endif
|
|
|
struct lock_class_key rq_lock_key;
|
|
|
};
|
|
@@ -382,6 +427,37 @@ static void update_rq_clock(struct rq *rq)
|
|
|
#define task_rq(p) cpu_rq(task_cpu(p))
|
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
|
|
|
|
+/*
|
|
|
+ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
|
|
|
+ */
|
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
|
+# define const_debug __read_mostly
|
|
|
+#else
|
|
|
+# define const_debug static const
|
|
|
+#endif
|
|
|
+
|
|
|
+/*
|
|
|
+ * Debugging: various feature bits
|
|
|
+ */
|
|
|
+enum {
|
|
|
+ SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
|
|
|
+ SCHED_FEAT_START_DEBIT = 2,
|
|
|
+ SCHED_FEAT_TREE_AVG = 4,
|
|
|
+ SCHED_FEAT_APPROX_AVG = 8,
|
|
|
+ SCHED_FEAT_WAKEUP_PREEMPT = 16,
|
|
|
+ SCHED_FEAT_PREEMPT_RESTRICT = 32,
|
|
|
+};
|
|
|
+
|
|
|
+const_debug unsigned int sysctl_sched_features =
|
|
|
+ SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
|
|
|
+ SCHED_FEAT_START_DEBIT *1 |
|
|
|
+ SCHED_FEAT_TREE_AVG *0 |
|
|
|
+ SCHED_FEAT_APPROX_AVG *0 |
|
|
|
+ SCHED_FEAT_WAKEUP_PREEMPT *1 |
|
|
|
+ SCHED_FEAT_PREEMPT_RESTRICT *1;
|
|
|
+
|
|
|
+#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
|
|
|
+
|
|
|
/*
|
|
|
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
|
|
|
* clock constructed from sched_clock():
|
|
@@ -400,18 +476,7 @@ unsigned long long cpu_clock(int cpu)
|
|
|
|
|
|
return now;
|
|
|
}
|
|
|
-
|
|
|
-#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
-/* Change a task's ->cfs_rq if it moves across CPUs */
|
|
|
-static inline void set_task_cfs_rq(struct task_struct *p)
|
|
|
-{
|
|
|
- p->se.cfs_rq = &task_rq(p)->cfs;
|
|
|
-}
|
|
|
-#else
|
|
|
-static inline void set_task_cfs_rq(struct task_struct *p)
|
|
|
-{
|
|
|
-}
|
|
|
-#endif
|
|
|
+EXPORT_SYMBOL_GPL(cpu_clock);
|
|
|
|
|
|
#ifndef prepare_arch_switch
|
|
|
# define prepare_arch_switch(next) do { } while (0)
|
|
@@ -497,16 +562,13 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
|
|
static inline struct rq *__task_rq_lock(struct task_struct *p)
|
|
|
__acquires(rq->lock)
|
|
|
{
|
|
|
- struct rq *rq;
|
|
|
-
|
|
|
-repeat_lock_task:
|
|
|
- rq = task_rq(p);
|
|
|
- spin_lock(&rq->lock);
|
|
|
- if (unlikely(rq != task_rq(p))) {
|
|
|
+ for (;;) {
|
|
|
+ struct rq *rq = task_rq(p);
|
|
|
+ spin_lock(&rq->lock);
|
|
|
+ if (likely(rq == task_rq(p)))
|
|
|
+ return rq;
|
|
|
spin_unlock(&rq->lock);
|
|
|
- goto repeat_lock_task;
|
|
|
}
|
|
|
- return rq;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -519,18 +581,17 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
|
|
|
{
|
|
|
struct rq *rq;
|
|
|
|
|
|
-repeat_lock_task:
|
|
|
- local_irq_save(*flags);
|
|
|
- rq = task_rq(p);
|
|
|
- spin_lock(&rq->lock);
|
|
|
- if (unlikely(rq != task_rq(p))) {
|
|
|
+ for (;;) {
|
|
|
+ local_irq_save(*flags);
|
|
|
+ rq = task_rq(p);
|
|
|
+ spin_lock(&rq->lock);
|
|
|
+ if (likely(rq == task_rq(p)))
|
|
|
+ return rq;
|
|
|
spin_unlock_irqrestore(&rq->lock, *flags);
|
|
|
- goto repeat_lock_task;
|
|
|
}
|
|
|
- return rq;
|
|
|
}
|
|
|
|
|
|
-static inline void __task_rq_unlock(struct rq *rq)
|
|
|
+static void __task_rq_unlock(struct rq *rq)
|
|
|
__releases(rq->lock)
|
|
|
{
|
|
|
spin_unlock(&rq->lock);
|
|
@@ -545,7 +606,7 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
|
|
|
/*
|
|
|
* this_rq_lock - lock this runqueue and disable interrupts.
|
|
|
*/
|
|
|
-static inline struct rq *this_rq_lock(void)
|
|
|
+static struct rq *this_rq_lock(void)
|
|
|
__acquires(rq->lock)
|
|
|
{
|
|
|
struct rq *rq;
|
|
@@ -645,19 +706,6 @@ static inline void resched_task(struct task_struct *p)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static u64 div64_likely32(u64 divident, unsigned long divisor)
|
|
|
-{
|
|
|
-#if BITS_PER_LONG == 32
|
|
|
- if (likely(divident <= 0xffffffffULL))
|
|
|
- return (u32)divident / divisor;
|
|
|
- do_div(divident, divisor);
|
|
|
-
|
|
|
- return divident;
|
|
|
-#else
|
|
|
- return divident / divisor;
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
#if BITS_PER_LONG == 32
|
|
|
# define WMULT_CONST (~0UL)
|
|
|
#else
|
|
@@ -699,16 +747,14 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
|
|
|
return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
|
|
|
}
|
|
|
|
|
|
-static void update_load_add(struct load_weight *lw, unsigned long inc)
|
|
|
+static inline void update_load_add(struct load_weight *lw, unsigned long inc)
|
|
|
{
|
|
|
lw->weight += inc;
|
|
|
- lw->inv_weight = 0;
|
|
|
}
|
|
|
|
|
|
-static void update_load_sub(struct load_weight *lw, unsigned long dec)
|
|
|
+static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
|
|
|
{
|
|
|
lw->weight -= dec;
|
|
|
- lw->inv_weight = 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -784,29 +830,20 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
|
int *this_best_prio, struct rq_iterator *iterator);
|
|
|
|
|
|
#include "sched_stats.h"
|
|
|
-#include "sched_rt.c"
|
|
|
-#include "sched_fair.c"
|
|
|
#include "sched_idletask.c"
|
|
|
+#include "sched_fair.c"
|
|
|
+#include "sched_rt.c"
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
|
# include "sched_debug.c"
|
|
|
#endif
|
|
|
|
|
|
#define sched_class_highest (&rt_sched_class)
|
|
|
|
|
|
-static void __update_curr_load(struct rq *rq, struct load_stat *ls)
|
|
|
-{
|
|
|
- if (rq->curr != rq->idle && ls->load.weight) {
|
|
|
- ls->delta_exec += ls->delta_stat;
|
|
|
- ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
|
|
|
- ls->delta_stat = 0;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Update delta_exec, delta_fair fields for rq.
|
|
|
*
|
|
|
* delta_fair clock advances at a rate inversely proportional to
|
|
|
- * total load (rq->ls.load.weight) on the runqueue, while
|
|
|
+ * total load (rq->load.weight) on the runqueue, while
|
|
|
* delta_exec advances at the same rate as wall-clock (provided
|
|
|
* cpu is not idle).
|
|
|
*
|
|
@@ -814,35 +851,17 @@ static void __update_curr_load(struct rq *rq, struct load_stat *ls)
|
|
|
* runqueue over any given interval. This (smoothened) load is used
|
|
|
* during load balance.
|
|
|
*
|
|
|
- * This function is called /before/ updating rq->ls.load
|
|
|
+ * This function is called /before/ updating rq->load
|
|
|
* and when switching tasks.
|
|
|
*/
|
|
|
-static void update_curr_load(struct rq *rq)
|
|
|
-{
|
|
|
- struct load_stat *ls = &rq->ls;
|
|
|
- u64 start;
|
|
|
-
|
|
|
- start = ls->load_update_start;
|
|
|
- ls->load_update_start = rq->clock;
|
|
|
- ls->delta_stat += rq->clock - start;
|
|
|
- /*
|
|
|
- * Stagger updates to ls->delta_fair. Very frequent updates
|
|
|
- * can be expensive.
|
|
|
- */
|
|
|
- if (ls->delta_stat >= sysctl_sched_stat_granularity)
|
|
|
- __update_curr_load(rq, ls);
|
|
|
-}
|
|
|
-
|
|
|
static inline void inc_load(struct rq *rq, const struct task_struct *p)
|
|
|
{
|
|
|
- update_curr_load(rq);
|
|
|
- update_load_add(&rq->ls.load, p->se.load.weight);
|
|
|
+ update_load_add(&rq->load, p->se.load.weight);
|
|
|
}
|
|
|
|
|
|
static inline void dec_load(struct rq *rq, const struct task_struct *p)
|
|
|
{
|
|
|
- update_curr_load(rq);
|
|
|
- update_load_sub(&rq->ls.load, p->se.load.weight);
|
|
|
+ update_load_sub(&rq->load, p->se.load.weight);
|
|
|
}
|
|
|
|
|
|
static void inc_nr_running(struct task_struct *p, struct rq *rq)
|
|
@@ -859,8 +878,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)
|
|
|
|
|
|
static void set_load_weight(struct task_struct *p)
|
|
|
{
|
|
|
- p->se.wait_runtime = 0;
|
|
|
-
|
|
|
if (task_has_rt_policy(p)) {
|
|
|
p->se.load.weight = prio_to_weight[0] * 2;
|
|
|
p->se.load.inv_weight = prio_to_wmult[0] >> 1;
|
|
@@ -951,20 +968,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
|
|
|
inc_nr_running(p, rq);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * activate_idle_task - move idle task to the _front_ of runqueue.
|
|
|
- */
|
|
|
-static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
|
|
|
-{
|
|
|
- update_rq_clock(rq);
|
|
|
-
|
|
|
- if (p->state == TASK_UNINTERRUPTIBLE)
|
|
|
- rq->nr_uninterruptible--;
|
|
|
-
|
|
|
- enqueue_task(rq, p, 0);
|
|
|
- inc_nr_running(p, rq);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* deactivate_task - remove a task from the runqueue.
|
|
|
*/
|
|
@@ -989,32 +992,50 @@ inline int task_curr(const struct task_struct *p)
|
|
|
/* Used instead of source_load when we know the type == 0 */
|
|
|
unsigned long weighted_cpuload(const int cpu)
|
|
|
{
|
|
|
- return cpu_rq(cpu)->ls.load.weight;
|
|
|
+ return cpu_rq(cpu)->load.weight;
|
|
|
}
|
|
|
|
|
|
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
{
|
|
|
#ifdef CONFIG_SMP
|
|
|
task_thread_info(p)->cpu = cpu;
|
|
|
- set_task_cfs_rq(p);
|
|
|
#endif
|
|
|
+ set_task_cfs_rq(p);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
+/*
|
|
|
+ * Is this task likely cache-hot:
|
|
|
+ */
|
|
|
+static inline int
|
|
|
+task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
|
|
|
+{
|
|
|
+ s64 delta;
|
|
|
+
|
|
|
+ if (p->sched_class != &fair_sched_class)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (sysctl_sched_migration_cost == -1)
|
|
|
+ return 1;
|
|
|
+ if (sysctl_sched_migration_cost == 0)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ delta = now - p->se.exec_start;
|
|
|
+
|
|
|
+ return delta < (s64)sysctl_sched_migration_cost;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|
|
{
|
|
|
int old_cpu = task_cpu(p);
|
|
|
struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
|
|
|
- u64 clock_offset, fair_clock_offset;
|
|
|
+ struct cfs_rq *old_cfsrq = task_cfs_rq(p),
|
|
|
+ *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
|
|
|
+ u64 clock_offset;
|
|
|
|
|
|
clock_offset = old_rq->clock - new_rq->clock;
|
|
|
- fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock;
|
|
|
-
|
|
|
- if (p->se.wait_start_fair)
|
|
|
- p->se.wait_start_fair -= fair_clock_offset;
|
|
|
- if (p->se.sleep_start_fair)
|
|
|
- p->se.sleep_start_fair -= fair_clock_offset;
|
|
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
|
if (p->se.wait_start)
|
|
@@ -1023,7 +1044,14 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|
|
p->se.sleep_start -= clock_offset;
|
|
|
if (p->se.block_start)
|
|
|
p->se.block_start -= clock_offset;
|
|
|
+ if (old_cpu != new_cpu) {
|
|
|
+ schedstat_inc(p, se.nr_migrations);
|
|
|
+ if (task_hot(p, old_rq->clock, NULL))
|
|
|
+ schedstat_inc(p, se.nr_forced2_migrations);
|
|
|
+ }
|
|
|
#endif
|
|
|
+ p->se.vruntime -= old_cfsrq->min_vruntime -
|
|
|
+ new_cfsrq->min_vruntime;
|
|
|
|
|
|
__set_task_cpu(p, new_cpu);
|
|
|
}
|
|
@@ -1078,69 +1106,71 @@ void wait_task_inactive(struct task_struct *p)
|
|
|
int running, on_rq;
|
|
|
struct rq *rq;
|
|
|
|
|
|
-repeat:
|
|
|
- /*
|
|
|
- * We do the initial early heuristics without holding
|
|
|
- * any task-queue locks at all. We'll only try to get
|
|
|
- * the runqueue lock when things look like they will
|
|
|
- * work out!
|
|
|
- */
|
|
|
- rq = task_rq(p);
|
|
|
+ for (;;) {
|
|
|
+ /*
|
|
|
+ * We do the initial early heuristics without holding
|
|
|
+ * any task-queue locks at all. We'll only try to get
|
|
|
+ * the runqueue lock when things look like they will
|
|
|
+ * work out!
|
|
|
+ */
|
|
|
+ rq = task_rq(p);
|
|
|
|
|
|
- /*
|
|
|
- * If the task is actively running on another CPU
|
|
|
- * still, just relax and busy-wait without holding
|
|
|
- * any locks.
|
|
|
- *
|
|
|
- * NOTE! Since we don't hold any locks, it's not
|
|
|
- * even sure that "rq" stays as the right runqueue!
|
|
|
- * But we don't care, since "task_running()" will
|
|
|
- * return false if the runqueue has changed and p
|
|
|
- * is actually now running somewhere else!
|
|
|
- */
|
|
|
- while (task_running(rq, p))
|
|
|
- cpu_relax();
|
|
|
+ /*
|
|
|
+ * If the task is actively running on another CPU
|
|
|
+ * still, just relax and busy-wait without holding
|
|
|
+ * any locks.
|
|
|
+ *
|
|
|
+ * NOTE! Since we don't hold any locks, it's not
|
|
|
+ * even sure that "rq" stays as the right runqueue!
|
|
|
+ * But we don't care, since "task_running()" will
|
|
|
+ * return false if the runqueue has changed and p
|
|
|
+ * is actually now running somewhere else!
|
|
|
+ */
|
|
|
+ while (task_running(rq, p))
|
|
|
+ cpu_relax();
|
|
|
|
|
|
- /*
|
|
|
- * Ok, time to look more closely! We need the rq
|
|
|
- * lock now, to be *sure*. If we're wrong, we'll
|
|
|
- * just go back and repeat.
|
|
|
- */
|
|
|
- rq = task_rq_lock(p, &flags);
|
|
|
- running = task_running(rq, p);
|
|
|
- on_rq = p->se.on_rq;
|
|
|
- task_rq_unlock(rq, &flags);
|
|
|
+ /*
|
|
|
+ * Ok, time to look more closely! We need the rq
|
|
|
+ * lock now, to be *sure*. If we're wrong, we'll
|
|
|
+ * just go back and repeat.
|
|
|
+ */
|
|
|
+ rq = task_rq_lock(p, &flags);
|
|
|
+ running = task_running(rq, p);
|
|
|
+ on_rq = p->se.on_rq;
|
|
|
+ task_rq_unlock(rq, &flags);
|
|
|
|
|
|
- /*
|
|
|
- * Was it really running after all now that we
|
|
|
- * checked with the proper locks actually held?
|
|
|
- *
|
|
|
- * Oops. Go back and try again..
|
|
|
- */
|
|
|
- if (unlikely(running)) {
|
|
|
- cpu_relax();
|
|
|
- goto repeat;
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * Was it really running after all now that we
|
|
|
+ * checked with the proper locks actually held?
|
|
|
+ *
|
|
|
+ * Oops. Go back and try again..
|
|
|
+ */
|
|
|
+ if (unlikely(running)) {
|
|
|
+ cpu_relax();
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * It's not enough that it's not actively running,
|
|
|
- * it must be off the runqueue _entirely_, and not
|
|
|
- * preempted!
|
|
|
- *
|
|
|
- * So if it wa still runnable (but just not actively
|
|
|
- * running right now), it's preempted, and we should
|
|
|
- * yield - it could be a while.
|
|
|
- */
|
|
|
- if (unlikely(on_rq)) {
|
|
|
- yield();
|
|
|
- goto repeat;
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * It's not enough that it's not actively running,
|
|
|
+ * it must be off the runqueue _entirely_, and not
|
|
|
+ * preempted!
|
|
|
+ *
|
|
|
+ * So if it wa still runnable (but just not actively
|
|
|
+ * running right now), it's preempted, and we should
|
|
|
+ * yield - it could be a while.
|
|
|
+ */
|
|
|
+ if (unlikely(on_rq)) {
|
|
|
+ schedule_timeout_uninterruptible(1);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * Ahh, all good. It wasn't running, and it wasn't
|
|
|
- * runnable, which means that it will never become
|
|
|
- * running in the future either. We're all done!
|
|
|
- */
|
|
|
+ /*
|
|
|
+ * Ahh, all good. It wasn't running, and it wasn't
|
|
|
+ * runnable, which means that it will never become
|
|
|
+ * running in the future either. We're all done!
|
|
|
+ */
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/***
|
|
@@ -1174,7 +1204,7 @@ void kick_process(struct task_struct *p)
|
|
|
* We want to under-estimate the load of migration sources, to
|
|
|
* balance conservatively.
|
|
|
*/
|
|
|
-static inline unsigned long source_load(int cpu, int type)
|
|
|
+static unsigned long source_load(int cpu, int type)
|
|
|
{
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
unsigned long total = weighted_cpuload(cpu);
|
|
@@ -1189,7 +1219,7 @@ static inline unsigned long source_load(int cpu, int type)
|
|
|
* Return a high guess at the load of a migration-target cpu weighted
|
|
|
* according to the scheduling class and "nice" value.
|
|
|
*/
|
|
|
-static inline unsigned long target_load(int cpu, int type)
|
|
|
+static unsigned long target_load(int cpu, int type)
|
|
|
{
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
unsigned long total = weighted_cpuload(cpu);
|
|
@@ -1231,7 +1261,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
|
|
|
|
|
|
/* Skip over this group if it has no CPUs allowed */
|
|
|
if (!cpus_intersects(group->cpumask, p->cpus_allowed))
|
|
|
- goto nextgroup;
|
|
|
+ continue;
|
|
|
|
|
|
local_group = cpu_isset(this_cpu, group->cpumask);
|
|
|
|
|
@@ -1259,9 +1289,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
|
|
|
min_load = avg_load;
|
|
|
idlest = group;
|
|
|
}
|
|
|
-nextgroup:
|
|
|
- group = group->next;
|
|
|
- } while (group != sd->groups);
|
|
|
+ } while (group = group->next, group != sd->groups);
|
|
|
|
|
|
if (!idlest || 100*this_load < imbalance*min_load)
|
|
|
return NULL;
|
|
@@ -1393,8 +1421,13 @@ static int wake_idle(int cpu, struct task_struct *p)
|
|
|
if (sd->flags & SD_WAKE_IDLE) {
|
|
|
cpus_and(tmp, sd->span, p->cpus_allowed);
|
|
|
for_each_cpu_mask(i, tmp) {
|
|
|
- if (idle_cpu(i))
|
|
|
+ if (idle_cpu(i)) {
|
|
|
+ if (i != task_cpu(p)) {
|
|
|
+ schedstat_inc(p,
|
|
|
+ se.nr_wakeups_idle);
|
|
|
+ }
|
|
|
return i;
|
|
|
+ }
|
|
|
}
|
|
|
} else {
|
|
|
break;
|
|
@@ -1425,7 +1458,7 @@ static inline int wake_idle(int cpu, struct task_struct *p)
|
|
|
*/
|
|
|
static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
|
|
|
{
|
|
|
- int cpu, this_cpu, success = 0;
|
|
|
+ int cpu, orig_cpu, this_cpu, success = 0;
|
|
|
unsigned long flags;
|
|
|
long old_state;
|
|
|
struct rq *rq;
|
|
@@ -1444,6 +1477,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
|
|
|
goto out_running;
|
|
|
|
|
|
cpu = task_cpu(p);
|
|
|
+ orig_cpu = cpu;
|
|
|
this_cpu = smp_processor_id();
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -1452,7 +1486,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
|
|
|
|
|
|
new_cpu = cpu;
|
|
|
|
|
|
- schedstat_inc(rq, ttwu_cnt);
|
|
|
+ schedstat_inc(rq, ttwu_count);
|
|
|
if (cpu == this_cpu) {
|
|
|
schedstat_inc(rq, ttwu_local);
|
|
|
goto out_set_cpu;
|
|
@@ -1487,6 +1521,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
|
|
|
unsigned long tl = this_load;
|
|
|
unsigned long tl_per_task;
|
|
|
|
|
|
+ /*
|
|
|
+ * Attract cache-cold tasks on sync wakeups:
|
|
|
+ */
|
|
|
+ if (sync && !task_hot(p, rq->clock, this_sd))
|
|
|
+ goto out_set_cpu;
|
|
|
+
|
|
|
+ schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
|
|
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
|
|
|
|
|
/*
|
|
@@ -1506,6 +1547,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
|
|
|
* there is no bad imbalance.
|
|
|
*/
|
|
|
schedstat_inc(this_sd, ttwu_move_affine);
|
|
|
+ schedstat_inc(p, se.nr_wakeups_affine);
|
|
|
goto out_set_cpu;
|
|
|
}
|
|
|
}
|
|
@@ -1517,6 +1559,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
|
|
|
if (this_sd->flags & SD_WAKE_BALANCE) {
|
|
|
if (imbalance*this_load <= 100*load) {
|
|
|
schedstat_inc(this_sd, ttwu_move_balance);
|
|
|
+ schedstat_inc(p, se.nr_wakeups_passive);
|
|
|
goto out_set_cpu;
|
|
|
}
|
|
|
}
|
|
@@ -1542,18 +1585,18 @@ out_set_cpu:
|
|
|
|
|
|
out_activate:
|
|
|
#endif /* CONFIG_SMP */
|
|
|
+ schedstat_inc(p, se.nr_wakeups);
|
|
|
+ if (sync)
|
|
|
+ schedstat_inc(p, se.nr_wakeups_sync);
|
|
|
+ if (orig_cpu != cpu)
|
|
|
+ schedstat_inc(p, se.nr_wakeups_migrate);
|
|
|
+ if (cpu == this_cpu)
|
|
|
+ schedstat_inc(p, se.nr_wakeups_local);
|
|
|
+ else
|
|
|
+ schedstat_inc(p, se.nr_wakeups_remote);
|
|
|
update_rq_clock(rq);
|
|
|
activate_task(rq, p, 1);
|
|
|
- /*
|
|
|
- * Sync wakeups (i.e. those types of wakeups where the waker
|
|
|
- * has indicated that it will leave the CPU in short order)
|
|
|
- * don't trigger a preemption, if the woken up task will run on
|
|
|
- * this cpu. (in this case the 'I will reschedule' promise of
|
|
|
- * the waker guarantees that the freshly woken up task is going
|
|
|
- * to be considered on this CPU.)
|
|
|
- */
|
|
|
- if (!sync || cpu != this_cpu)
|
|
|
- check_preempt_curr(rq, p);
|
|
|
+ check_preempt_curr(rq, p);
|
|
|
success = 1;
|
|
|
|
|
|
out_running:
|
|
@@ -1584,28 +1627,20 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state)
|
|
|
*/
|
|
|
static void __sched_fork(struct task_struct *p)
|
|
|
{
|
|
|
- p->se.wait_start_fair = 0;
|
|
|
p->se.exec_start = 0;
|
|
|
p->se.sum_exec_runtime = 0;
|
|
|
p->se.prev_sum_exec_runtime = 0;
|
|
|
- p->se.delta_exec = 0;
|
|
|
- p->se.delta_fair_run = 0;
|
|
|
- p->se.delta_fair_sleep = 0;
|
|
|
- p->se.wait_runtime = 0;
|
|
|
- p->se.sleep_start_fair = 0;
|
|
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
|
p->se.wait_start = 0;
|
|
|
- p->se.sum_wait_runtime = 0;
|
|
|
p->se.sum_sleep_runtime = 0;
|
|
|
p->se.sleep_start = 0;
|
|
|
p->se.block_start = 0;
|
|
|
p->se.sleep_max = 0;
|
|
|
p->se.block_max = 0;
|
|
|
p->se.exec_max = 0;
|
|
|
+ p->se.slice_max = 0;
|
|
|
p->se.wait_max = 0;
|
|
|
- p->se.wait_runtime_overruns = 0;
|
|
|
- p->se.wait_runtime_underruns = 0;
|
|
|
#endif
|
|
|
|
|
|
INIT_LIST_HEAD(&p->run_list);
|
|
@@ -1636,12 +1671,14 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
|
|
#ifdef CONFIG_SMP
|
|
|
cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
|
|
|
#endif
|
|
|
- __set_task_cpu(p, cpu);
|
|
|
+ set_task_cpu(p, cpu);
|
|
|
|
|
|
/*
|
|
|
* Make sure we do not leak PI boosting priority to the child:
|
|
|
*/
|
|
|
p->prio = current->normal_prio;
|
|
|
+ if (!rt_prio(p->prio))
|
|
|
+ p->sched_class = &fair_sched_class;
|
|
|
|
|
|
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
|
|
if (likely(sched_info_on()))
|
|
@@ -1657,12 +1694,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
|
|
put_cpu();
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * After fork, child runs first. (default) If set to 0 then
|
|
|
- * parent will (try to) run first.
|
|
|
- */
|
|
|
-unsigned int __read_mostly sysctl_sched_child_runs_first = 1;
|
|
|
-
|
|
|
/*
|
|
|
* wake_up_new_task - wake up a newly created task for the first time.
|
|
|
*
|
|
@@ -1674,24 +1705,14 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct rq *rq;
|
|
|
- int this_cpu;
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
BUG_ON(p->state != TASK_RUNNING);
|
|
|
- this_cpu = smp_processor_id(); /* parent's CPU */
|
|
|
update_rq_clock(rq);
|
|
|
|
|
|
p->prio = effective_prio(p);
|
|
|
|
|
|
- if (rt_prio(p->prio))
|
|
|
- p->sched_class = &rt_sched_class;
|
|
|
- else
|
|
|
- p->sched_class = &fair_sched_class;
|
|
|
-
|
|
|
- if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||
|
|
|
- (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||
|
|
|
- !current->se.on_rq) {
|
|
|
-
|
|
|
+ if (!p->sched_class->task_new || !current->se.on_rq || !rq->cfs.curr) {
|
|
|
activate_task(rq, p, 0);
|
|
|
} else {
|
|
|
/*
|
|
@@ -1800,7 +1821,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
|
|
|
* with the lock held can cause deadlocks; see schedule() for
|
|
|
* details.)
|
|
|
*/
|
|
|
-static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
|
|
+static void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
|
|
__releases(rq->lock)
|
|
|
{
|
|
|
struct mm_struct *mm = rq->prev_mm;
|
|
@@ -1982,42 +2003,10 @@ unsigned long nr_active(void)
|
|
|
*/
|
|
|
static void update_cpu_load(struct rq *this_rq)
|
|
|
{
|
|
|
- u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64;
|
|
|
- unsigned long total_load = this_rq->ls.load.weight;
|
|
|
- unsigned long this_load = total_load;
|
|
|
- struct load_stat *ls = &this_rq->ls;
|
|
|
+ unsigned long this_load = this_rq->load.weight;
|
|
|
int i, scale;
|
|
|
|
|
|
this_rq->nr_load_updates++;
|
|
|
- if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
|
|
|
- goto do_avg;
|
|
|
-
|
|
|
- /* Update delta_fair/delta_exec fields first */
|
|
|
- update_curr_load(this_rq);
|
|
|
-
|
|
|
- fair_delta64 = ls->delta_fair + 1;
|
|
|
- ls->delta_fair = 0;
|
|
|
-
|
|
|
- exec_delta64 = ls->delta_exec + 1;
|
|
|
- ls->delta_exec = 0;
|
|
|
-
|
|
|
- sample_interval64 = this_rq->clock - ls->load_update_last;
|
|
|
- ls->load_update_last = this_rq->clock;
|
|
|
-
|
|
|
- if ((s64)sample_interval64 < (s64)TICK_NSEC)
|
|
|
- sample_interval64 = TICK_NSEC;
|
|
|
-
|
|
|
- if (exec_delta64 > sample_interval64)
|
|
|
- exec_delta64 = sample_interval64;
|
|
|
-
|
|
|
- idle_delta64 = sample_interval64 - exec_delta64;
|
|
|
-
|
|
|
- tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64);
|
|
|
- tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64);
|
|
|
-
|
|
|
- this_load = (unsigned long)tmp64;
|
|
|
-
|
|
|
-do_avg:
|
|
|
|
|
|
/* Update our load: */
|
|
|
for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
|
|
@@ -2027,7 +2016,13 @@ do_avg:
|
|
|
|
|
|
old_load = this_rq->cpu_load[i];
|
|
|
new_load = this_load;
|
|
|
-
|
|
|
+ /*
|
|
|
+ * Round up the averaging division if load is increasing. This
|
|
|
+ * prevents us from getting stuck on 9 if the load is 10, for
|
|
|
+ * example.
|
|
|
+ */
|
|
|
+ if (new_load > old_load)
|
|
|
+ new_load += scale-1;
|
|
|
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
|
|
|
}
|
|
|
}
|
|
@@ -2179,13 +2174,38 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
|
|
|
* 2) cannot be migrated to this CPU due to cpus_allowed, or
|
|
|
* 3) are cache-hot on their current CPU.
|
|
|
*/
|
|
|
- if (!cpu_isset(this_cpu, p->cpus_allowed))
|
|
|
+ if (!cpu_isset(this_cpu, p->cpus_allowed)) {
|
|
|
+ schedstat_inc(p, se.nr_failed_migrations_affine);
|
|
|
return 0;
|
|
|
+ }
|
|
|
*all_pinned = 0;
|
|
|
|
|
|
- if (task_running(rq, p))
|
|
|
+ if (task_running(rq, p)) {
|
|
|
+ schedstat_inc(p, se.nr_failed_migrations_running);
|
|
|
return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Aggressive migration if:
|
|
|
+ * 1) task is cache cold, or
|
|
|
+ * 2) too many balance attempts have failed.
|
|
|
+ */
|
|
|
+
|
|
|
+ if (!task_hot(p, rq->clock, sd) ||
|
|
|
+ sd->nr_balance_failed > sd->cache_nice_tries) {
|
|
|
+#ifdef CONFIG_SCHEDSTATS
|
|
|
+ if (task_hot(p, rq->clock, sd)) {
|
|
|
+ schedstat_inc(sd, lb_hot_gained[idle]);
|
|
|
+ schedstat_inc(p, se.nr_forced_migrations);
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
|
|
|
+ if (task_hot(p, rq->clock, sd)) {
|
|
|
+ schedstat_inc(p, se.nr_failed_migrations_hot);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
@@ -2264,7 +2284,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
|
struct sched_domain *sd, enum cpu_idle_type idle,
|
|
|
int *all_pinned)
|
|
|
{
|
|
|
- struct sched_class *class = sched_class_highest;
|
|
|
+ const struct sched_class *class = sched_class_highest;
|
|
|
unsigned long total_load_moved = 0;
|
|
|
int this_best_prio = this_rq->curr->prio;
|
|
|
|
|
@@ -2289,7 +2309,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
|
static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
|
struct sched_domain *sd, enum cpu_idle_type idle)
|
|
|
{
|
|
|
- struct sched_class *class;
|
|
|
+ const struct sched_class *class;
|
|
|
int this_best_prio = MAX_PRIO;
|
|
|
|
|
|
for (class = sched_class_highest; class; class = class->next)
|
|
@@ -2653,7 +2673,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
|
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
|
|
sd_idle = 1;
|
|
|
|
|
|
- schedstat_inc(sd, lb_cnt[idle]);
|
|
|
+ schedstat_inc(sd, lb_count[idle]);
|
|
|
|
|
|
redo:
|
|
|
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
|
|
@@ -2806,7 +2826,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
|
|
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
|
|
sd_idle = 1;
|
|
|
|
|
|
- schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
|
|
|
+ schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
|
|
|
redo:
|
|
|
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
|
|
|
&sd_idle, &cpus, NULL);
|
|
@@ -2940,7 +2960,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
|
|
|
}
|
|
|
|
|
|
if (likely(sd)) {
|
|
|
- schedstat_inc(sd, alb_cnt);
|
|
|
+ schedstat_inc(sd, alb_count);
|
|
|
|
|
|
if (move_one_task(target_rq, target_cpu, busiest_rq,
|
|
|
sd, CPU_IDLE))
|
|
@@ -3033,7 +3053,7 @@ static DEFINE_SPINLOCK(balancing);
|
|
|
*
|
|
|
* Balancing parameters are set up in arch_init_sched_domains.
|
|
|
*/
|
|
|
-static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
|
|
|
+static void rebalance_domains(int cpu, enum cpu_idle_type idle)
|
|
|
{
|
|
|
int balance = 1;
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
@@ -3279,6 +3299,25 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
|
|
|
cpustat->user = cputime64_add(cpustat->user, tmp);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Account guest cpu time to a process.
|
|
|
+ * @p: the process that the cpu time gets accounted to
|
|
|
+ * @cputime: the cpu time spent in virtual machine since the last update
|
|
|
+ */
|
|
|
+void account_guest_time(struct task_struct *p, cputime_t cputime)
|
|
|
+{
|
|
|
+ cputime64_t tmp;
|
|
|
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
|
|
+
|
|
|
+ tmp = cputime_to_cputime64(cputime);
|
|
|
+
|
|
|
+ p->utime = cputime_add(p->utime, cputime);
|
|
|
+ p->gtime = cputime_add(p->gtime, cputime);
|
|
|
+
|
|
|
+ cpustat->user = cputime64_add(cpustat->user, tmp);
|
|
|
+ cpustat->guest = cputime64_add(cpustat->guest, tmp);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Account system cpu time to a process.
|
|
|
* @p: the process that the cpu time gets accounted to
|
|
@@ -3292,6 +3331,12 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
|
|
|
struct rq *rq = this_rq();
|
|
|
cputime64_t tmp;
|
|
|
|
|
|
+ if (p->flags & PF_VCPU) {
|
|
|
+ account_guest_time(p, cputime);
|
|
|
+ p->flags &= ~PF_VCPU;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
p->stime = cputime_add(p->stime, cputime);
|
|
|
|
|
|
/* Add system time to cpustat. */
|
|
@@ -3430,7 +3475,13 @@ static inline void schedule_debug(struct task_struct *prev)
|
|
|
|
|
|
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
|
|
|
|
|
|
- schedstat_inc(this_rq(), sched_cnt);
|
|
|
+ schedstat_inc(this_rq(), sched_count);
|
|
|
+#ifdef CONFIG_SCHEDSTATS
|
|
|
+ if (unlikely(prev->lock_depth >= 0)) {
|
|
|
+ schedstat_inc(this_rq(), bkl_count);
|
|
|
+ schedstat_inc(prev, sched_info.bkl_count);
|
|
|
+ }
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3439,7 +3490,7 @@ static inline void schedule_debug(struct task_struct *prev)
|
|
|
static inline struct task_struct *
|
|
|
pick_next_task(struct rq *rq, struct task_struct *prev)
|
|
|
{
|
|
|
- struct sched_class *class;
|
|
|
+ const struct sched_class *class;
|
|
|
struct task_struct *p;
|
|
|
|
|
|
/*
|
|
@@ -3488,9 +3539,13 @@ need_resched_nonpreemptible:
|
|
|
|
|
|
schedule_debug(prev);
|
|
|
|
|
|
- spin_lock_irq(&rq->lock);
|
|
|
- clear_tsk_need_resched(prev);
|
|
|
+ /*
|
|
|
+ * Do the rq-clock update outside the rq lock:
|
|
|
+ */
|
|
|
+ local_irq_disable();
|
|
|
__update_rq_clock(rq);
|
|
|
+ spin_lock(&rq->lock);
|
|
|
+ clear_tsk_need_resched(prev);
|
|
|
|
|
|
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
|
|
if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
|
|
@@ -3550,27 +3605,30 @@ asmlinkage void __sched preempt_schedule(void)
|
|
|
if (likely(ti->preempt_count || irqs_disabled()))
|
|
|
return;
|
|
|
|
|
|
-need_resched:
|
|
|
- add_preempt_count(PREEMPT_ACTIVE);
|
|
|
- /*
|
|
|
- * We keep the big kernel semaphore locked, but we
|
|
|
- * clear ->lock_depth so that schedule() doesnt
|
|
|
- * auto-release the semaphore:
|
|
|
- */
|
|
|
+ do {
|
|
|
+ add_preempt_count(PREEMPT_ACTIVE);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We keep the big kernel semaphore locked, but we
|
|
|
+ * clear ->lock_depth so that schedule() doesnt
|
|
|
+ * auto-release the semaphore:
|
|
|
+ */
|
|
|
#ifdef CONFIG_PREEMPT_BKL
|
|
|
- saved_lock_depth = task->lock_depth;
|
|
|
- task->lock_depth = -1;
|
|
|
+ saved_lock_depth = task->lock_depth;
|
|
|
+ task->lock_depth = -1;
|
|
|
#endif
|
|
|
- schedule();
|
|
|
+ schedule();
|
|
|
#ifdef CONFIG_PREEMPT_BKL
|
|
|
- task->lock_depth = saved_lock_depth;
|
|
|
+ task->lock_depth = saved_lock_depth;
|
|
|
#endif
|
|
|
- sub_preempt_count(PREEMPT_ACTIVE);
|
|
|
+ sub_preempt_count(PREEMPT_ACTIVE);
|
|
|
|
|
|
- /* we could miss a preemption opportunity between schedule and now */
|
|
|
- barrier();
|
|
|
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
|
|
|
- goto need_resched;
|
|
|
+ /*
|
|
|
+ * Check again in case we missed a preemption opportunity
|
|
|
+ * between schedule and now.
|
|
|
+ */
|
|
|
+ barrier();
|
|
|
+ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
|
|
|
}
|
|
|
EXPORT_SYMBOL(preempt_schedule);
|
|
|
|
|
@@ -3590,29 +3648,32 @@ asmlinkage void __sched preempt_schedule_irq(void)
|
|
|
/* Catch callers which need to be fixed */
|
|
|
BUG_ON(ti->preempt_count || !irqs_disabled());
|
|
|
|
|
|
-need_resched:
|
|
|
- add_preempt_count(PREEMPT_ACTIVE);
|
|
|
- /*
|
|
|
- * We keep the big kernel semaphore locked, but we
|
|
|
- * clear ->lock_depth so that schedule() doesnt
|
|
|
- * auto-release the semaphore:
|
|
|
- */
|
|
|
+ do {
|
|
|
+ add_preempt_count(PREEMPT_ACTIVE);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We keep the big kernel semaphore locked, but we
|
|
|
+ * clear ->lock_depth so that schedule() doesnt
|
|
|
+ * auto-release the semaphore:
|
|
|
+ */
|
|
|
#ifdef CONFIG_PREEMPT_BKL
|
|
|
- saved_lock_depth = task->lock_depth;
|
|
|
- task->lock_depth = -1;
|
|
|
+ saved_lock_depth = task->lock_depth;
|
|
|
+ task->lock_depth = -1;
|
|
|
#endif
|
|
|
- local_irq_enable();
|
|
|
- schedule();
|
|
|
- local_irq_disable();
|
|
|
+ local_irq_enable();
|
|
|
+ schedule();
|
|
|
+ local_irq_disable();
|
|
|
#ifdef CONFIG_PREEMPT_BKL
|
|
|
- task->lock_depth = saved_lock_depth;
|
|
|
+ task->lock_depth = saved_lock_depth;
|
|
|
#endif
|
|
|
- sub_preempt_count(PREEMPT_ACTIVE);
|
|
|
+ sub_preempt_count(PREEMPT_ACTIVE);
|
|
|
|
|
|
- /* we could miss a preemption opportunity between schedule and now */
|
|
|
- barrier();
|
|
|
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
|
|
|
- goto need_resched;
|
|
|
+ /*
|
|
|
+ * Check again in case we missed a preemption opportunity
|
|
|
+ * between schedule and now.
|
|
|
+ */
|
|
|
+ barrier();
|
|
|
+ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
|
|
|
}
|
|
|
|
|
|
#endif /* CONFIG_PREEMPT */
|
|
@@ -3636,10 +3697,9 @@ EXPORT_SYMBOL(default_wake_function);
|
|
|
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
|
|
|
int nr_exclusive, int sync, void *key)
|
|
|
{
|
|
|
- struct list_head *tmp, *next;
|
|
|
+ wait_queue_t *curr, *next;
|
|
|
|
|
|
- list_for_each_safe(tmp, next, &q->task_list) {
|
|
|
- wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
|
|
|
+ list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
|
|
|
unsigned flags = curr->flags;
|
|
|
|
|
|
if (curr->func(curr, mode, sync, key) &&
|
|
@@ -3729,206 +3789,116 @@ void fastcall complete_all(struct completion *x)
|
|
|
}
|
|
|
EXPORT_SYMBOL(complete_all);
|
|
|
|
|
|
-void fastcall __sched wait_for_completion(struct completion *x)
|
|
|
+static inline long __sched
|
|
|
+do_wait_for_common(struct completion *x, long timeout, int state)
|
|
|
{
|
|
|
- might_sleep();
|
|
|
-
|
|
|
- spin_lock_irq(&x->wait.lock);
|
|
|
if (!x->done) {
|
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
|
|
|
wait.flags |= WQ_FLAG_EXCLUSIVE;
|
|
|
__add_wait_queue_tail(&x->wait, &wait);
|
|
|
do {
|
|
|
- __set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
- spin_unlock_irq(&x->wait.lock);
|
|
|
- schedule();
|
|
|
- spin_lock_irq(&x->wait.lock);
|
|
|
- } while (!x->done);
|
|
|
- __remove_wait_queue(&x->wait, &wait);
|
|
|
- }
|
|
|
- x->done--;
|
|
|
- spin_unlock_irq(&x->wait.lock);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(wait_for_completion);
|
|
|
-
|
|
|
-unsigned long fastcall __sched
|
|
|
-wait_for_completion_timeout(struct completion *x, unsigned long timeout)
|
|
|
-{
|
|
|
- might_sleep();
|
|
|
-
|
|
|
- spin_lock_irq(&x->wait.lock);
|
|
|
- if (!x->done) {
|
|
|
- DECLARE_WAITQUEUE(wait, current);
|
|
|
-
|
|
|
- wait.flags |= WQ_FLAG_EXCLUSIVE;
|
|
|
- __add_wait_queue_tail(&x->wait, &wait);
|
|
|
- do {
|
|
|
- __set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
+ if (state == TASK_INTERRUPTIBLE &&
|
|
|
+ signal_pending(current)) {
|
|
|
+ __remove_wait_queue(&x->wait, &wait);
|
|
|
+ return -ERESTARTSYS;
|
|
|
+ }
|
|
|
+ __set_current_state(state);
|
|
|
spin_unlock_irq(&x->wait.lock);
|
|
|
timeout = schedule_timeout(timeout);
|
|
|
spin_lock_irq(&x->wait.lock);
|
|
|
if (!timeout) {
|
|
|
__remove_wait_queue(&x->wait, &wait);
|
|
|
- goto out;
|
|
|
+ return timeout;
|
|
|
}
|
|
|
} while (!x->done);
|
|
|
__remove_wait_queue(&x->wait, &wait);
|
|
|
}
|
|
|
x->done--;
|
|
|
-out:
|
|
|
- spin_unlock_irq(&x->wait.lock);
|
|
|
return timeout;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(wait_for_completion_timeout);
|
|
|
|
|
|
-int fastcall __sched wait_for_completion_interruptible(struct completion *x)
|
|
|
+static long __sched
|
|
|
+wait_for_common(struct completion *x, long timeout, int state)
|
|
|
{
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
might_sleep();
|
|
|
|
|
|
spin_lock_irq(&x->wait.lock);
|
|
|
- if (!x->done) {
|
|
|
- DECLARE_WAITQUEUE(wait, current);
|
|
|
-
|
|
|
- wait.flags |= WQ_FLAG_EXCLUSIVE;
|
|
|
- __add_wait_queue_tail(&x->wait, &wait);
|
|
|
- do {
|
|
|
- if (signal_pending(current)) {
|
|
|
- ret = -ERESTARTSYS;
|
|
|
- __remove_wait_queue(&x->wait, &wait);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- __set_current_state(TASK_INTERRUPTIBLE);
|
|
|
- spin_unlock_irq(&x->wait.lock);
|
|
|
- schedule();
|
|
|
- spin_lock_irq(&x->wait.lock);
|
|
|
- } while (!x->done);
|
|
|
- __remove_wait_queue(&x->wait, &wait);
|
|
|
- }
|
|
|
- x->done--;
|
|
|
-out:
|
|
|
+ timeout = do_wait_for_common(x, timeout, state);
|
|
|
spin_unlock_irq(&x->wait.lock);
|
|
|
+ return timeout;
|
|
|
+}
|
|
|
|
|
|
- return ret;
|
|
|
+void fastcall __sched wait_for_completion(struct completion *x)
|
|
|
+{
|
|
|
+ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(wait_for_completion_interruptible);
|
|
|
+EXPORT_SYMBOL(wait_for_completion);
|
|
|
|
|
|
unsigned long fastcall __sched
|
|
|
-wait_for_completion_interruptible_timeout(struct completion *x,
|
|
|
- unsigned long timeout)
|
|
|
+wait_for_completion_timeout(struct completion *x, unsigned long timeout)
|
|
|
{
|
|
|
- might_sleep();
|
|
|
-
|
|
|
- spin_lock_irq(&x->wait.lock);
|
|
|
- if (!x->done) {
|
|
|
- DECLARE_WAITQUEUE(wait, current);
|
|
|
-
|
|
|
- wait.flags |= WQ_FLAG_EXCLUSIVE;
|
|
|
- __add_wait_queue_tail(&x->wait, &wait);
|
|
|
- do {
|
|
|
- if (signal_pending(current)) {
|
|
|
- timeout = -ERESTARTSYS;
|
|
|
- __remove_wait_queue(&x->wait, &wait);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- __set_current_state(TASK_INTERRUPTIBLE);
|
|
|
- spin_unlock_irq(&x->wait.lock);
|
|
|
- timeout = schedule_timeout(timeout);
|
|
|
- spin_lock_irq(&x->wait.lock);
|
|
|
- if (!timeout) {
|
|
|
- __remove_wait_queue(&x->wait, &wait);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- } while (!x->done);
|
|
|
- __remove_wait_queue(&x->wait, &wait);
|
|
|
- }
|
|
|
- x->done--;
|
|
|
-out:
|
|
|
- spin_unlock_irq(&x->wait.lock);
|
|
|
- return timeout;
|
|
|
+ return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
|
|
|
+EXPORT_SYMBOL(wait_for_completion_timeout);
|
|
|
|
|
|
-static inline void
|
|
|
-sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
|
|
|
+int __sched wait_for_completion_interruptible(struct completion *x)
|
|
|
{
|
|
|
- spin_lock_irqsave(&q->lock, *flags);
|
|
|
- __add_wait_queue(q, wait);
|
|
|
- spin_unlock(&q->lock);
|
|
|
+ return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(wait_for_completion_interruptible);
|
|
|
|
|
|
-static inline void
|
|
|
-sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
|
|
|
+unsigned long fastcall __sched
|
|
|
+wait_for_completion_interruptible_timeout(struct completion *x,
|
|
|
+ unsigned long timeout)
|
|
|
{
|
|
|
- spin_lock_irq(&q->lock);
|
|
|
- __remove_wait_queue(q, wait);
|
|
|
- spin_unlock_irqrestore(&q->lock, *flags);
|
|
|
+ return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
|
|
|
|
|
|
-void __sched interruptible_sleep_on(wait_queue_head_t *q)
|
|
|
+static long __sched
|
|
|
+sleep_on_common(wait_queue_head_t *q, int state, long timeout)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
wait_queue_t wait;
|
|
|
|
|
|
init_waitqueue_entry(&wait, current);
|
|
|
|
|
|
- current->state = TASK_INTERRUPTIBLE;
|
|
|
+ __set_current_state(state);
|
|
|
|
|
|
- sleep_on_head(q, &wait, &flags);
|
|
|
- schedule();
|
|
|
- sleep_on_tail(q, &wait, &flags);
|
|
|
+ spin_lock_irqsave(&q->lock, flags);
|
|
|
+ __add_wait_queue(q, &wait);
|
|
|
+ spin_unlock(&q->lock);
|
|
|
+ timeout = schedule_timeout(timeout);
|
|
|
+ spin_lock_irq(&q->lock);
|
|
|
+ __remove_wait_queue(q, &wait);
|
|
|
+ spin_unlock_irqrestore(&q->lock, flags);
|
|
|
+
|
|
|
+ return timeout;
|
|
|
+}
|
|
|
+
|
|
|
+void __sched interruptible_sleep_on(wait_queue_head_t *q)
|
|
|
+{
|
|
|
+ sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
|
|
|
}
|
|
|
EXPORT_SYMBOL(interruptible_sleep_on);
|
|
|
|
|
|
long __sched
|
|
|
interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
- wait_queue_t wait;
|
|
|
-
|
|
|
- init_waitqueue_entry(&wait, current);
|
|
|
-
|
|
|
- current->state = TASK_INTERRUPTIBLE;
|
|
|
-
|
|
|
- sleep_on_head(q, &wait, &flags);
|
|
|
- timeout = schedule_timeout(timeout);
|
|
|
- sleep_on_tail(q, &wait, &flags);
|
|
|
-
|
|
|
- return timeout;
|
|
|
+ return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
|
|
|
}
|
|
|
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
|
|
|
|
|
|
void __sched sleep_on(wait_queue_head_t *q)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
- wait_queue_t wait;
|
|
|
-
|
|
|
- init_waitqueue_entry(&wait, current);
|
|
|
-
|
|
|
- current->state = TASK_UNINTERRUPTIBLE;
|
|
|
-
|
|
|
- sleep_on_head(q, &wait, &flags);
|
|
|
- schedule();
|
|
|
- sleep_on_tail(q, &wait, &flags);
|
|
|
+ sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
|
|
|
}
|
|
|
EXPORT_SYMBOL(sleep_on);
|
|
|
|
|
|
long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
- wait_queue_t wait;
|
|
|
-
|
|
|
- init_waitqueue_entry(&wait, current);
|
|
|
-
|
|
|
- current->state = TASK_UNINTERRUPTIBLE;
|
|
|
-
|
|
|
- sleep_on_head(q, &wait, &flags);
|
|
|
- timeout = schedule_timeout(timeout);
|
|
|
- sleep_on_tail(q, &wait, &flags);
|
|
|
-
|
|
|
- return timeout;
|
|
|
+ return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
|
|
|
}
|
|
|
EXPORT_SYMBOL(sleep_on_timeout);
|
|
|
|
|
@@ -3947,7 +3917,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
|
|
|
void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
- int oldprio, on_rq;
|
|
|
+ int oldprio, on_rq, running;
|
|
|
struct rq *rq;
|
|
|
|
|
|
BUG_ON(prio < 0 || prio > MAX_PRIO);
|
|
@@ -3957,8 +3927,12 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
|
|
|
oldprio = p->prio;
|
|
|
on_rq = p->se.on_rq;
|
|
|
- if (on_rq)
|
|
|
+ running = task_running(rq, p);
|
|
|
+ if (on_rq) {
|
|
|
dequeue_task(rq, p, 0);
|
|
|
+ if (running)
|
|
|
+ p->sched_class->put_prev_task(rq, p);
|
|
|
+ }
|
|
|
|
|
|
if (rt_prio(prio))
|
|
|
p->sched_class = &rt_sched_class;
|
|
@@ -3968,13 +3942,15 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
p->prio = prio;
|
|
|
|
|
|
if (on_rq) {
|
|
|
+ if (running)
|
|
|
+ p->sched_class->set_curr_task(rq);
|
|
|
enqueue_task(rq, p, 0);
|
|
|
/*
|
|
|
* Reschedule if we are currently running on this runqueue and
|
|
|
* our priority decreased, or if we are not currently running on
|
|
|
* this runqueue and our priority is higher than the current's
|
|
|
*/
|
|
|
- if (task_running(rq, p)) {
|
|
|
+ if (running) {
|
|
|
if (p->prio > oldprio)
|
|
|
resched_task(rq->curr);
|
|
|
} else {
|
|
@@ -4138,7 +4114,7 @@ struct task_struct *idle_task(int cpu)
|
|
|
* find_process_by_pid - find a process with a matching PID value.
|
|
|
* @pid: the pid in question.
|
|
|
*/
|
|
|
-static inline struct task_struct *find_process_by_pid(pid_t pid)
|
|
|
+static struct task_struct *find_process_by_pid(pid_t pid)
|
|
|
{
|
|
|
return pid ? find_task_by_pid(pid) : current;
|
|
|
}
|
|
@@ -4180,7 +4156,7 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
|
|
|
int sched_setscheduler(struct task_struct *p, int policy,
|
|
|
struct sched_param *param)
|
|
|
{
|
|
|
- int retval, oldprio, oldpolicy = -1, on_rq;
|
|
|
+ int retval, oldprio, oldpolicy = -1, on_rq, running;
|
|
|
unsigned long flags;
|
|
|
struct rq *rq;
|
|
|
|
|
@@ -4262,18 +4238,26 @@ recheck:
|
|
|
}
|
|
|
update_rq_clock(rq);
|
|
|
on_rq = p->se.on_rq;
|
|
|
- if (on_rq)
|
|
|
+ running = task_running(rq, p);
|
|
|
+ if (on_rq) {
|
|
|
deactivate_task(rq, p, 0);
|
|
|
+ if (running)
|
|
|
+ p->sched_class->put_prev_task(rq, p);
|
|
|
+ }
|
|
|
+
|
|
|
oldprio = p->prio;
|
|
|
__setscheduler(rq, p, policy, param->sched_priority);
|
|
|
+
|
|
|
if (on_rq) {
|
|
|
+ if (running)
|
|
|
+ p->sched_class->set_curr_task(rq);
|
|
|
activate_task(rq, p, 0);
|
|
|
/*
|
|
|
* Reschedule if we are currently running on this runqueue and
|
|
|
* our priority decreased, or if we are not currently running on
|
|
|
* this runqueue and our priority is higher than the current's
|
|
|
*/
|
|
|
- if (task_running(rq, p)) {
|
|
|
+ if (running) {
|
|
|
if (p->prio > oldprio)
|
|
|
resched_task(rq->curr);
|
|
|
} else {
|
|
@@ -4344,10 +4328,10 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
|
|
|
asmlinkage long sys_sched_getscheduler(pid_t pid)
|
|
|
{
|
|
|
struct task_struct *p;
|
|
|
- int retval = -EINVAL;
|
|
|
+ int retval;
|
|
|
|
|
|
if (pid < 0)
|
|
|
- goto out_nounlock;
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
retval = -ESRCH;
|
|
|
read_lock(&tasklist_lock);
|
|
@@ -4358,8 +4342,6 @@ asmlinkage long sys_sched_getscheduler(pid_t pid)
|
|
|
retval = p->policy;
|
|
|
}
|
|
|
read_unlock(&tasklist_lock);
|
|
|
-
|
|
|
-out_nounlock:
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
@@ -4372,10 +4354,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
|
|
|
{
|
|
|
struct sched_param lp;
|
|
|
struct task_struct *p;
|
|
|
- int retval = -EINVAL;
|
|
|
+ int retval;
|
|
|
|
|
|
if (!param || pid < 0)
|
|
|
- goto out_nounlock;
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
read_lock(&tasklist_lock);
|
|
|
p = find_process_by_pid(pid);
|
|
@@ -4395,7 +4377,6 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
|
|
|
*/
|
|
|
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
|
|
|
|
|
|
-out_nounlock:
|
|
|
return retval;
|
|
|
|
|
|
out_unlock:
|
|
@@ -4555,8 +4536,8 @@ asmlinkage long sys_sched_yield(void)
|
|
|
{
|
|
|
struct rq *rq = this_rq_lock();
|
|
|
|
|
|
- schedstat_inc(rq, yld_cnt);
|
|
|
- current->sched_class->yield_task(rq, current);
|
|
|
+ schedstat_inc(rq, yld_count);
|
|
|
+ current->sched_class->yield_task(rq);
|
|
|
|
|
|
/*
|
|
|
* Since we are going to call schedule() anyway, there's
|
|
@@ -4750,11 +4731,12 @@ asmlinkage
|
|
|
long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
|
|
|
{
|
|
|
struct task_struct *p;
|
|
|
- int retval = -EINVAL;
|
|
|
+ unsigned int time_slice;
|
|
|
+ int retval;
|
|
|
struct timespec t;
|
|
|
|
|
|
if (pid < 0)
|
|
|
- goto out_nounlock;
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
retval = -ESRCH;
|
|
|
read_lock(&tasklist_lock);
|
|
@@ -4766,12 +4748,24 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
|
|
|
if (retval)
|
|
|
goto out_unlock;
|
|
|
|
|
|
- jiffies_to_timespec(p->policy == SCHED_FIFO ?
|
|
|
- 0 : static_prio_timeslice(p->static_prio), &t);
|
|
|
+ if (p->policy == SCHED_FIFO)
|
|
|
+ time_slice = 0;
|
|
|
+ else if (p->policy == SCHED_RR)
|
|
|
+ time_slice = DEF_TIMESLICE;
|
|
|
+ else {
|
|
|
+ struct sched_entity *se = &p->se;
|
|
|
+ unsigned long flags;
|
|
|
+ struct rq *rq;
|
|
|
+
|
|
|
+ rq = task_rq_lock(p, &flags);
|
|
|
+ time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
|
|
|
+ task_rq_unlock(rq, &flags);
|
|
|
+ }
|
|
|
read_unlock(&tasklist_lock);
|
|
|
+ jiffies_to_timespec(time_slice, &t);
|
|
|
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
|
|
|
-out_nounlock:
|
|
|
return retval;
|
|
|
+
|
|
|
out_unlock:
|
|
|
read_unlock(&tasklist_lock);
|
|
|
return retval;
|
|
@@ -4900,32 +4894,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
|
|
*/
|
|
|
cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
|
|
|
|
|
|
-/*
|
|
|
- * Increase the granularity value when there are more CPUs,
|
|
|
- * because with more CPUs the 'effective latency' as visible
|
|
|
- * to users decreases. But the relationship is not linear,
|
|
|
- * so pick a second-best guess by going with the log2 of the
|
|
|
- * number of CPUs.
|
|
|
- *
|
|
|
- * This idea comes from the SD scheduler of Con Kolivas:
|
|
|
- */
|
|
|
-static inline void sched_init_granularity(void)
|
|
|
-{
|
|
|
- unsigned int factor = 1 + ilog2(num_online_cpus());
|
|
|
- const unsigned long limit = 100000000;
|
|
|
-
|
|
|
- sysctl_sched_min_granularity *= factor;
|
|
|
- if (sysctl_sched_min_granularity > limit)
|
|
|
- sysctl_sched_min_granularity = limit;
|
|
|
-
|
|
|
- sysctl_sched_latency *= factor;
|
|
|
- if (sysctl_sched_latency > limit)
|
|
|
- sysctl_sched_latency = limit;
|
|
|
-
|
|
|
- sysctl_sched_runtime_limit = sysctl_sched_latency;
|
|
|
- sysctl_sched_wakeup_granularity = sysctl_sched_min_granularity / 2;
|
|
|
-}
|
|
|
-
|
|
|
#ifdef CONFIG_SMP
|
|
|
/*
|
|
|
* This is how migration works:
|
|
@@ -5103,35 +5071,34 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
|
|
struct rq *rq;
|
|
|
int dest_cpu;
|
|
|
|
|
|
-restart:
|
|
|
- /* On same node? */
|
|
|
- mask = node_to_cpumask(cpu_to_node(dead_cpu));
|
|
|
- cpus_and(mask, mask, p->cpus_allowed);
|
|
|
- dest_cpu = any_online_cpu(mask);
|
|
|
-
|
|
|
- /* On any allowed CPU? */
|
|
|
- if (dest_cpu == NR_CPUS)
|
|
|
- dest_cpu = any_online_cpu(p->cpus_allowed);
|
|
|
-
|
|
|
- /* No more Mr. Nice Guy. */
|
|
|
- if (dest_cpu == NR_CPUS) {
|
|
|
- rq = task_rq_lock(p, &flags);
|
|
|
- cpus_setall(p->cpus_allowed);
|
|
|
- dest_cpu = any_online_cpu(p->cpus_allowed);
|
|
|
- task_rq_unlock(rq, &flags);
|
|
|
+ do {
|
|
|
+ /* On same node? */
|
|
|
+ mask = node_to_cpumask(cpu_to_node(dead_cpu));
|
|
|
+ cpus_and(mask, mask, p->cpus_allowed);
|
|
|
+ dest_cpu = any_online_cpu(mask);
|
|
|
+
|
|
|
+ /* On any allowed CPU? */
|
|
|
+ if (dest_cpu == NR_CPUS)
|
|
|
+ dest_cpu = any_online_cpu(p->cpus_allowed);
|
|
|
+
|
|
|
+ /* No more Mr. Nice Guy. */
|
|
|
+ if (dest_cpu == NR_CPUS) {
|
|
|
+ rq = task_rq_lock(p, &flags);
|
|
|
+ cpus_setall(p->cpus_allowed);
|
|
|
+ dest_cpu = any_online_cpu(p->cpus_allowed);
|
|
|
+ task_rq_unlock(rq, &flags);
|
|
|
|
|
|
- /*
|
|
|
- * Don't tell them about moving exiting tasks or
|
|
|
- * kernel threads (both mm NULL), since they never
|
|
|
- * leave kernel.
|
|
|
- */
|
|
|
- if (p->mm && printk_ratelimit())
|
|
|
- printk(KERN_INFO "process %d (%s) no "
|
|
|
- "longer affine to cpu%d\n",
|
|
|
- p->pid, p->comm, dead_cpu);
|
|
|
- }
|
|
|
- if (!__migrate_task(p, dead_cpu, dest_cpu))
|
|
|
- goto restart;
|
|
|
+ /*
|
|
|
+ * Don't tell them about moving exiting tasks or
|
|
|
+ * kernel threads (both mm NULL), since they never
|
|
|
+ * leave kernel.
|
|
|
+ */
|
|
|
+ if (p->mm && printk_ratelimit())
|
|
|
+ printk(KERN_INFO "process %d (%s) no "
|
|
|
+ "longer affine to cpu%d\n",
|
|
|
+ p->pid, p->comm, dead_cpu);
|
|
|
+ }
|
|
|
+ } while (!__migrate_task(p, dead_cpu, dest_cpu));
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -5172,6 +5139,20 @@ static void migrate_live_tasks(int src_cpu)
|
|
|
write_unlock_irq(&tasklist_lock);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * activate_idle_task - move idle task to the _front_ of runqueue.
|
|
|
+ */
|
|
|
+static void activate_idle_task(struct task_struct *p, struct rq *rq)
|
|
|
+{
|
|
|
+ update_rq_clock(rq);
|
|
|
+
|
|
|
+ if (p->state == TASK_UNINTERRUPTIBLE)
|
|
|
+ rq->nr_uninterruptible--;
|
|
|
+
|
|
|
+ enqueue_task(rq, p, 0);
|
|
|
+ inc_nr_running(p, rq);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Schedules idle task to be the next runnable task on current CPU.
|
|
|
* It does so by boosting its priority to highest possible and adding it to
|
|
@@ -5284,14 +5265,23 @@ static struct ctl_table sd_ctl_root[] = {
|
|
|
static struct ctl_table *sd_alloc_ctl_entry(int n)
|
|
|
{
|
|
|
struct ctl_table *entry =
|
|
|
- kmalloc(n * sizeof(struct ctl_table), GFP_KERNEL);
|
|
|
-
|
|
|
- BUG_ON(!entry);
|
|
|
- memset(entry, 0, n * sizeof(struct ctl_table));
|
|
|
+ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
|
|
|
|
|
|
return entry;
|
|
|
}
|
|
|
|
|
|
+static void sd_free_ctl_entry(struct ctl_table **tablep)
|
|
|
+{
|
|
|
+ struct ctl_table *entry = *tablep;
|
|
|
+
|
|
|
+ for (entry = *tablep; entry->procname; entry++)
|
|
|
+ if (entry->child)
|
|
|
+ sd_free_ctl_entry(&entry->child);
|
|
|
+
|
|
|
+ kfree(*tablep);
|
|
|
+ *tablep = NULL;
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
set_table_entry(struct ctl_table *entry,
|
|
|
const char *procname, void *data, int maxlen,
|
|
@@ -5307,7 +5297,10 @@ set_table_entry(struct ctl_table *entry,
|
|
|
static struct ctl_table *
|
|
|
sd_alloc_ctl_domain_table(struct sched_domain *sd)
|
|
|
{
|
|
|
- struct ctl_table *table = sd_alloc_ctl_entry(14);
|
|
|
+ struct ctl_table *table = sd_alloc_ctl_entry(12);
|
|
|
+
|
|
|
+ if (table == NULL)
|
|
|
+ return NULL;
|
|
|
|
|
|
set_table_entry(&table[0], "min_interval", &sd->min_interval,
|
|
|
sizeof(long), 0644, proc_doulongvec_minmax);
|
|
@@ -5327,11 +5320,12 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
|
|
|
sizeof(int), 0644, proc_dointvec_minmax);
|
|
|
set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
|
|
|
sizeof(int), 0644, proc_dointvec_minmax);
|
|
|
- set_table_entry(&table[10], "cache_nice_tries",
|
|
|
+ set_table_entry(&table[9], "cache_nice_tries",
|
|
|
&sd->cache_nice_tries,
|
|
|
sizeof(int), 0644, proc_dointvec_minmax);
|
|
|
- set_table_entry(&table[12], "flags", &sd->flags,
|
|
|
+ set_table_entry(&table[10], "flags", &sd->flags,
|
|
|
sizeof(int), 0644, proc_dointvec_minmax);
|
|
|
+ /* &table[11] is terminator */
|
|
|
|
|
|
return table;
|
|
|
}
|
|
@@ -5346,6 +5340,8 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
|
|
|
for_each_domain(cpu, sd)
|
|
|
domain_num++;
|
|
|
entry = table = sd_alloc_ctl_entry(domain_num + 1);
|
|
|
+ if (table == NULL)
|
|
|
+ return NULL;
|
|
|
|
|
|
i = 0;
|
|
|
for_each_domain(cpu, sd) {
|
|
@@ -5360,24 +5356,38 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
|
|
|
}
|
|
|
|
|
|
static struct ctl_table_header *sd_sysctl_header;
|
|
|
-static void init_sched_domain_sysctl(void)
|
|
|
+static void register_sched_domain_sysctl(void)
|
|
|
{
|
|
|
int i, cpu_num = num_online_cpus();
|
|
|
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
|
|
|
char buf[32];
|
|
|
|
|
|
+ if (entry == NULL)
|
|
|
+ return;
|
|
|
+
|
|
|
sd_ctl_dir[0].child = entry;
|
|
|
|
|
|
- for (i = 0; i < cpu_num; i++, entry++) {
|
|
|
+ for_each_online_cpu(i) {
|
|
|
snprintf(buf, 32, "cpu%d", i);
|
|
|
entry->procname = kstrdup(buf, GFP_KERNEL);
|
|
|
entry->mode = 0555;
|
|
|
entry->child = sd_alloc_ctl_cpu_table(i);
|
|
|
+ entry++;
|
|
|
}
|
|
|
sd_sysctl_header = register_sysctl_table(sd_ctl_root);
|
|
|
}
|
|
|
+
|
|
|
+static void unregister_sched_domain_sysctl(void)
|
|
|
+{
|
|
|
+ unregister_sysctl_table(sd_sysctl_header);
|
|
|
+ sd_sysctl_header = NULL;
|
|
|
+ sd_free_ctl_entry(&sd_ctl_dir[0].child);
|
|
|
+}
|
|
|
#else
|
|
|
-static void init_sched_domain_sysctl(void)
|
|
|
+static void register_sched_domain_sysctl(void)
|
|
|
+{
|
|
|
+}
|
|
|
+static void unregister_sched_domain_sysctl(void)
|
|
|
{
|
|
|
}
|
|
|
#endif
|
|
@@ -5499,8 +5509,7 @@ int __init migration_init(void)
|
|
|
int nr_cpu_ids __read_mostly = NR_CPUS;
|
|
|
EXPORT_SYMBOL(nr_cpu_ids);
|
|
|
|
|
|
-#undef SCHED_DOMAIN_DEBUG
|
|
|
-#ifdef SCHED_DOMAIN_DEBUG
|
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
|
static void sched_domain_debug(struct sched_domain *sd, int cpu)
|
|
|
{
|
|
|
int level = 0;
|
|
@@ -5558,16 +5567,19 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
|
|
|
printk("\n");
|
|
|
printk(KERN_ERR "ERROR: domain->cpu_power not "
|
|
|
"set\n");
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
if (!cpus_weight(group->cpumask)) {
|
|
|
printk("\n");
|
|
|
printk(KERN_ERR "ERROR: empty group\n");
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
if (cpus_intersects(groupmask, group->cpumask)) {
|
|
|
printk("\n");
|
|
|
printk(KERN_ERR "ERROR: repeated CPUs\n");
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
cpus_or(groupmask, groupmask, group->cpumask);
|
|
@@ -5701,7 +5713,7 @@ static int __init isolated_cpu_setup(char *str)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-__setup ("isolcpus=", isolated_cpu_setup);
|
|
|
+__setup("isolcpus=", isolated_cpu_setup);
|
|
|
|
|
|
/*
|
|
|
* init_sched_build_groups takes the cpumask we wish to span, and a pointer
|
|
@@ -5930,24 +5942,23 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
|
|
|
|
|
|
if (!sg)
|
|
|
return;
|
|
|
-next_sg:
|
|
|
- for_each_cpu_mask(j, sg->cpumask) {
|
|
|
- struct sched_domain *sd;
|
|
|
+ do {
|
|
|
+ for_each_cpu_mask(j, sg->cpumask) {
|
|
|
+ struct sched_domain *sd;
|
|
|
|
|
|
- sd = &per_cpu(phys_domains, j);
|
|
|
- if (j != first_cpu(sd->groups->cpumask)) {
|
|
|
- /*
|
|
|
- * Only add "power" once for each
|
|
|
- * physical package.
|
|
|
- */
|
|
|
- continue;
|
|
|
- }
|
|
|
+ sd = &per_cpu(phys_domains, j);
|
|
|
+ if (j != first_cpu(sd->groups->cpumask)) {
|
|
|
+ /*
|
|
|
+ * Only add "power" once for each
|
|
|
+ * physical package.
|
|
|
+ */
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
- sg_inc_cpu_power(sg, sd->groups->__cpu_power);
|
|
|
- }
|
|
|
- sg = sg->next;
|
|
|
- if (sg != group_head)
|
|
|
- goto next_sg;
|
|
|
+ sg_inc_cpu_power(sg, sd->groups->__cpu_power);
|
|
|
+ }
|
|
|
+ sg = sg->next;
|
|
|
+ } while (sg != group_head);
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -6058,7 +6069,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
|
|
/*
|
|
|
* Allocate the per-node list of sched groups
|
|
|
*/
|
|
|
- sched_group_nodes = kzalloc(sizeof(struct sched_group *)*MAX_NUMNODES,
|
|
|
+ sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
|
|
|
GFP_KERNEL);
|
|
|
if (!sched_group_nodes) {
|
|
|
printk(KERN_WARNING "Can not alloc sched group node list\n");
|
|
@@ -6311,6 +6322,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
|
|
|
|
|
|
err = build_sched_domains(&cpu_default_map);
|
|
|
|
|
|
+ register_sched_domain_sysctl();
|
|
|
+
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -6327,6 +6340,8 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
+ unregister_sched_domain_sysctl();
|
|
|
+
|
|
|
for_each_cpu_mask(i, *cpu_map)
|
|
|
cpu_attach_domain(NULL, i);
|
|
|
synchronize_sched();
|
|
@@ -6357,6 +6372,8 @@ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
|
|
|
if (!err && !cpus_empty(*partition2))
|
|
|
err = build_sched_domains(partition2);
|
|
|
|
|
|
+ register_sched_domain_sysctl();
|
|
|
+
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -6488,17 +6505,13 @@ void __init sched_init_smp(void)
|
|
|
/* XXX: Theoretical race here - CPU may be hotplugged now */
|
|
|
hotcpu_notifier(update_sched_domains, 0);
|
|
|
|
|
|
- init_sched_domain_sysctl();
|
|
|
-
|
|
|
/* Move init over to a non-isolated CPU */
|
|
|
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
|
|
|
BUG();
|
|
|
- sched_init_granularity();
|
|
|
}
|
|
|
#else
|
|
|
void __init sched_init_smp(void)
|
|
|
{
|
|
|
- sched_init_granularity();
|
|
|
}
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
@@ -6512,28 +6525,20 @@ int in_sched_functions(unsigned long addr)
|
|
|
&& addr < (unsigned long)__sched_text_end);
|
|
|
}
|
|
|
|
|
|
-static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
|
|
|
+static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
|
|
|
{
|
|
|
cfs_rq->tasks_timeline = RB_ROOT;
|
|
|
- cfs_rq->fair_clock = 1;
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
cfs_rq->rq = rq;
|
|
|
#endif
|
|
|
+ cfs_rq->min_vruntime = (u64)(-(1LL << 20));
|
|
|
}
|
|
|
|
|
|
void __init sched_init(void)
|
|
|
{
|
|
|
- u64 now = sched_clock();
|
|
|
int highest_cpu = 0;
|
|
|
int i, j;
|
|
|
|
|
|
- /*
|
|
|
- * Link up the scheduling class hierarchy:
|
|
|
- */
|
|
|
- rt_sched_class.next = &fair_sched_class;
|
|
|
- fair_sched_class.next = &idle_sched_class;
|
|
|
- idle_sched_class.next = NULL;
|
|
|
-
|
|
|
for_each_possible_cpu(i) {
|
|
|
struct rt_prio_array *array;
|
|
|
struct rq *rq;
|
|
@@ -6546,10 +6551,28 @@ void __init sched_init(void)
|
|
|
init_cfs_rq(&rq->cfs, rq);
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
|
|
|
- list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
|
|
|
+ {
|
|
|
+ struct cfs_rq *cfs_rq = &per_cpu(init_cfs_rq, i);
|
|
|
+ struct sched_entity *se =
|
|
|
+ &per_cpu(init_sched_entity, i);
|
|
|
+
|
|
|
+ init_cfs_rq_p[i] = cfs_rq;
|
|
|
+ init_cfs_rq(cfs_rq, rq);
|
|
|
+ cfs_rq->tg = &init_task_group;
|
|
|
+ list_add(&cfs_rq->leaf_cfs_rq_list,
|
|
|
+ &rq->leaf_cfs_rq_list);
|
|
|
+
|
|
|
+ init_sched_entity_p[i] = se;
|
|
|
+ se->cfs_rq = &rq->cfs;
|
|
|
+ se->my_q = cfs_rq;
|
|
|
+ se->load.weight = init_task_group_load;
|
|
|
+ se->load.inv_weight =
|
|
|
+ div64_64(1ULL<<32, init_task_group_load);
|
|
|
+ se->parent = NULL;
|
|
|
+ }
|
|
|
+ init_task_group.shares = init_task_group_load;
|
|
|
+ spin_lock_init(&init_task_group.lock);
|
|
|
#endif
|
|
|
- rq->ls.load_update_last = now;
|
|
|
- rq->ls.load_update_start = now;
|
|
|
|
|
|
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
|
|
|
rq->cpu_load[j] = 0;
|
|
@@ -6634,26 +6657,40 @@ EXPORT_SYMBOL(__might_sleep);
|
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_MAGIC_SYSRQ
|
|
|
+static void normalize_task(struct rq *rq, struct task_struct *p)
|
|
|
+{
|
|
|
+ int on_rq;
|
|
|
+ update_rq_clock(rq);
|
|
|
+ on_rq = p->se.on_rq;
|
|
|
+ if (on_rq)
|
|
|
+ deactivate_task(rq, p, 0);
|
|
|
+ __setscheduler(rq, p, SCHED_NORMAL, 0);
|
|
|
+ if (on_rq) {
|
|
|
+ activate_task(rq, p, 0);
|
|
|
+ resched_task(rq->curr);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
void normalize_rt_tasks(void)
|
|
|
{
|
|
|
struct task_struct *g, *p;
|
|
|
unsigned long flags;
|
|
|
struct rq *rq;
|
|
|
- int on_rq;
|
|
|
|
|
|
read_lock_irq(&tasklist_lock);
|
|
|
do_each_thread(g, p) {
|
|
|
- p->se.fair_key = 0;
|
|
|
- p->se.wait_runtime = 0;
|
|
|
+ /*
|
|
|
+ * Only normalize user tasks:
|
|
|
+ */
|
|
|
+ if (!p->mm)
|
|
|
+ continue;
|
|
|
+
|
|
|
p->se.exec_start = 0;
|
|
|
- p->se.wait_start_fair = 0;
|
|
|
- p->se.sleep_start_fair = 0;
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
|
p->se.wait_start = 0;
|
|
|
p->se.sleep_start = 0;
|
|
|
p->se.block_start = 0;
|
|
|
#endif
|
|
|
- task_rq(p)->cfs.fair_clock = 0;
|
|
|
task_rq(p)->clock = 0;
|
|
|
|
|
|
if (!rt_task(p)) {
|
|
@@ -6668,26 +6705,9 @@ void normalize_rt_tasks(void)
|
|
|
|
|
|
spin_lock_irqsave(&p->pi_lock, flags);
|
|
|
rq = __task_rq_lock(p);
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- /*
|
|
|
- * Do not touch the migration thread:
|
|
|
- */
|
|
|
- if (p == rq->migration_thread)
|
|
|
- goto out_unlock;
|
|
|
-#endif
|
|
|
|
|
|
- update_rq_clock(rq);
|
|
|
- on_rq = p->se.on_rq;
|
|
|
- if (on_rq)
|
|
|
- deactivate_task(rq, p, 0);
|
|
|
- __setscheduler(rq, p, SCHED_NORMAL, 0);
|
|
|
- if (on_rq) {
|
|
|
- activate_task(rq, p, 0);
|
|
|
- resched_task(rq->curr);
|
|
|
- }
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- out_unlock:
|
|
|
-#endif
|
|
|
+ normalize_task(rq, p);
|
|
|
+
|
|
|
__task_rq_unlock(rq);
|
|
|
spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
|
} while_each_thread(g, p);
|
|
@@ -6740,3 +6760,201 @@ void set_curr_task(int cpu, struct task_struct *p)
|
|
|
}
|
|
|
|
|
|
#endif
|
|
|
+
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
+
|
|
|
+/* allocate runqueue etc for a new task group */
|
|
|
+struct task_group *sched_create_group(void)
|
|
|
+{
|
|
|
+ struct task_group *tg;
|
|
|
+ struct cfs_rq *cfs_rq;
|
|
|
+ struct sched_entity *se;
|
|
|
+ struct rq *rq;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
|
|
|
+ if (!tg)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+
|
|
|
+ tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL);
|
|
|
+ if (!tg->cfs_rq)
|
|
|
+ goto err;
|
|
|
+ tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL);
|
|
|
+ if (!tg->se)
|
|
|
+ goto err;
|
|
|
+
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
+ rq = cpu_rq(i);
|
|
|
+
|
|
|
+ cfs_rq = kmalloc_node(sizeof(struct cfs_rq), GFP_KERNEL,
|
|
|
+ cpu_to_node(i));
|
|
|
+ if (!cfs_rq)
|
|
|
+ goto err;
|
|
|
+
|
|
|
+ se = kmalloc_node(sizeof(struct sched_entity), GFP_KERNEL,
|
|
|
+ cpu_to_node(i));
|
|
|
+ if (!se)
|
|
|
+ goto err;
|
|
|
+
|
|
|
+ memset(cfs_rq, 0, sizeof(struct cfs_rq));
|
|
|
+ memset(se, 0, sizeof(struct sched_entity));
|
|
|
+
|
|
|
+ tg->cfs_rq[i] = cfs_rq;
|
|
|
+ init_cfs_rq(cfs_rq, rq);
|
|
|
+ cfs_rq->tg = tg;
|
|
|
+
|
|
|
+ tg->se[i] = se;
|
|
|
+ se->cfs_rq = &rq->cfs;
|
|
|
+ se->my_q = cfs_rq;
|
|
|
+ se->load.weight = NICE_0_LOAD;
|
|
|
+ se->load.inv_weight = div64_64(1ULL<<32, NICE_0_LOAD);
|
|
|
+ se->parent = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
+ rq = cpu_rq(i);
|
|
|
+ cfs_rq = tg->cfs_rq[i];
|
|
|
+ list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
|
|
|
+ }
|
|
|
+
|
|
|
+ tg->shares = NICE_0_LOAD;
|
|
|
+ spin_lock_init(&tg->lock);
|
|
|
+
|
|
|
+ return tg;
|
|
|
+
|
|
|
+err:
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
+ if (tg->cfs_rq)
|
|
|
+ kfree(tg->cfs_rq[i]);
|
|
|
+ if (tg->se)
|
|
|
+ kfree(tg->se[i]);
|
|
|
+ }
|
|
|
+ kfree(tg->cfs_rq);
|
|
|
+ kfree(tg->se);
|
|
|
+ kfree(tg);
|
|
|
+
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+}
|
|
|
+
|
|
|
+/* rcu callback to free various structures associated with a task group */
|
|
|
+static void free_sched_group(struct rcu_head *rhp)
|
|
|
+{
|
|
|
+ struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu);
|
|
|
+ struct task_group *tg = cfs_rq->tg;
|
|
|
+ struct sched_entity *se;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ /* now it should be safe to free those cfs_rqs */
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
+ cfs_rq = tg->cfs_rq[i];
|
|
|
+ kfree(cfs_rq);
|
|
|
+
|
|
|
+ se = tg->se[i];
|
|
|
+ kfree(se);
|
|
|
+ }
|
|
|
+
|
|
|
+ kfree(tg->cfs_rq);
|
|
|
+ kfree(tg->se);
|
|
|
+ kfree(tg);
|
|
|
+}
|
|
|
+
|
|
|
+/* Destroy runqueue etc associated with a task group */
|
|
|
+void sched_destroy_group(struct task_group *tg)
|
|
|
+{
|
|
|
+ struct cfs_rq *cfs_rq;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
+ cfs_rq = tg->cfs_rq[i];
|
|
|
+ list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
|
|
|
+ }
|
|
|
+
|
|
|
+ cfs_rq = tg->cfs_rq[0];
|
|
|
+
|
|
|
+ /* wait for possible concurrent references to cfs_rqs complete */
|
|
|
+ call_rcu(&cfs_rq->rcu, free_sched_group);
|
|
|
+}
|
|
|
+
|
|
|
+/* change task's runqueue when it moves between groups.
|
|
|
+ * The caller of this function should have put the task in its new group
|
|
|
+ * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
|
|
|
+ * reflect its new group.
|
|
|
+ */
|
|
|
+void sched_move_task(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ int on_rq, running;
|
|
|
+ unsigned long flags;
|
|
|
+ struct rq *rq;
|
|
|
+
|
|
|
+ rq = task_rq_lock(tsk, &flags);
|
|
|
+
|
|
|
+ if (tsk->sched_class != &fair_sched_class)
|
|
|
+ goto done;
|
|
|
+
|
|
|
+ update_rq_clock(rq);
|
|
|
+
|
|
|
+ running = task_running(rq, tsk);
|
|
|
+ on_rq = tsk->se.on_rq;
|
|
|
+
|
|
|
+ if (on_rq) {
|
|
|
+ dequeue_task(rq, tsk, 0);
|
|
|
+ if (unlikely(running))
|
|
|
+ tsk->sched_class->put_prev_task(rq, tsk);
|
|
|
+ }
|
|
|
+
|
|
|
+ set_task_cfs_rq(tsk);
|
|
|
+
|
|
|
+ if (on_rq) {
|
|
|
+ if (unlikely(running))
|
|
|
+ tsk->sched_class->set_curr_task(rq);
|
|
|
+ enqueue_task(rq, tsk, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+done:
|
|
|
+ task_rq_unlock(rq, &flags);
|
|
|
+}
|
|
|
+
|
|
|
+static void set_se_shares(struct sched_entity *se, unsigned long shares)
|
|
|
+{
|
|
|
+ struct cfs_rq *cfs_rq = se->cfs_rq;
|
|
|
+ struct rq *rq = cfs_rq->rq;
|
|
|
+ int on_rq;
|
|
|
+
|
|
|
+ spin_lock_irq(&rq->lock);
|
|
|
+
|
|
|
+ on_rq = se->on_rq;
|
|
|
+ if (on_rq)
|
|
|
+ dequeue_entity(cfs_rq, se, 0);
|
|
|
+
|
|
|
+ se->load.weight = shares;
|
|
|
+ se->load.inv_weight = div64_64((1ULL<<32), shares);
|
|
|
+
|
|
|
+ if (on_rq)
|
|
|
+ enqueue_entity(cfs_rq, se, 0);
|
|
|
+
|
|
|
+ spin_unlock_irq(&rq->lock);
|
|
|
+}
|
|
|
+
|
|
|
+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ spin_lock(&tg->lock);
|
|
|
+ if (tg->shares == shares)
|
|
|
+ goto done;
|
|
|
+
|
|
|
+ tg->shares = shares;
|
|
|
+ for_each_possible_cpu(i)
|
|
|
+ set_se_shares(tg->se[i], shares);
|
|
|
+
|
|
|
+done:
|
|
|
+ spin_unlock(&tg->lock);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+unsigned long sched_group_shares(struct task_group *tg)
|
|
|
+{
|
|
|
+ return tg->shares;
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* CONFIG_FAIR_GROUP_SCHED */
|