|
@@ -66,6 +66,10 @@
|
|
#include <linux/unistd.h>
|
|
#include <linux/unistd.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/hrtimer.h>
|
|
#include <linux/hrtimer.h>
|
|
|
|
+#include <linux/tick.h>
|
|
|
|
+#include <linux/bootmem.h>
|
|
|
|
+#include <linux/debugfs.h>
|
|
|
|
+#include <linux/ctype.h>
|
|
|
|
|
|
#include <asm/tlb.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/irq_regs.h>
|
|
#include <asm/irq_regs.h>
|
|
@@ -114,6 +118,11 @@ unsigned long long __attribute__((weak)) sched_clock(void)
|
|
*/
|
|
*/
|
|
#define DEF_TIMESLICE (100 * HZ / 1000)
|
|
#define DEF_TIMESLICE (100 * HZ / 1000)
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * single value that denotes runtime == period, ie unlimited time.
|
|
|
|
+ */
|
|
|
|
+#define RUNTIME_INF ((u64)~0ULL)
|
|
|
|
+
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
/*
|
|
* Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
|
|
* Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
|
|
@@ -155,6 +164,84 @@ struct rt_prio_array {
|
|
struct list_head queue[MAX_RT_PRIO];
|
|
struct list_head queue[MAX_RT_PRIO];
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+struct rt_bandwidth {
|
|
|
|
+ /* nests inside the rq lock: */
|
|
|
|
+ spinlock_t rt_runtime_lock;
|
|
|
|
+ ktime_t rt_period;
|
|
|
|
+ u64 rt_runtime;
|
|
|
|
+ struct hrtimer rt_period_timer;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static struct rt_bandwidth def_rt_bandwidth;
|
|
|
|
+
|
|
|
|
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
|
|
|
|
+
|
|
|
|
+static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
|
|
|
|
+{
|
|
|
|
+ struct rt_bandwidth *rt_b =
|
|
|
|
+ container_of(timer, struct rt_bandwidth, rt_period_timer);
|
|
|
|
+ ktime_t now;
|
|
|
|
+ int overrun;
|
|
|
|
+ int idle = 0;
|
|
|
|
+
|
|
|
|
+ for (;;) {
|
|
|
|
+ now = hrtimer_cb_get_time(timer);
|
|
|
|
+ overrun = hrtimer_forward(timer, now, rt_b->rt_period);
|
|
|
|
+
|
|
|
|
+ if (!overrun)
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ idle = do_sched_rt_period_timer(rt_b, overrun);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static
|
|
|
|
+void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
|
|
|
|
+{
|
|
|
|
+ rt_b->rt_period = ns_to_ktime(period);
|
|
|
|
+ rt_b->rt_runtime = runtime;
|
|
|
|
+
|
|
|
|
+ spin_lock_init(&rt_b->rt_runtime_lock);
|
|
|
|
+
|
|
|
|
+ hrtimer_init(&rt_b->rt_period_timer,
|
|
|
|
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
|
|
+ rt_b->rt_period_timer.function = sched_rt_period_timer;
|
|
|
|
+ rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
|
|
|
+{
|
|
|
|
+ ktime_t now;
|
|
|
|
+
|
|
|
|
+ if (rt_b->rt_runtime == RUNTIME_INF)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (hrtimer_active(&rt_b->rt_period_timer))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ spin_lock(&rt_b->rt_runtime_lock);
|
|
|
|
+ for (;;) {
|
|
|
|
+ if (hrtimer_active(&rt_b->rt_period_timer))
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
|
|
|
|
+ hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
|
|
|
|
+ hrtimer_start(&rt_b->rt_period_timer,
|
|
|
|
+ rt_b->rt_period_timer.expires,
|
|
|
|
+ HRTIMER_MODE_ABS);
|
|
|
|
+ }
|
|
|
|
+ spin_unlock(&rt_b->rt_runtime_lock);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
+static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
|
|
|
|
+{
|
|
|
|
+ hrtimer_cancel(&rt_b->rt_period_timer);
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
#ifdef CONFIG_GROUP_SCHED
|
|
#ifdef CONFIG_GROUP_SCHED
|
|
|
|
|
|
#include <linux/cgroup.h>
|
|
#include <linux/cgroup.h>
|
|
@@ -181,29 +268,39 @@ struct task_group {
|
|
struct sched_rt_entity **rt_se;
|
|
struct sched_rt_entity **rt_se;
|
|
struct rt_rq **rt_rq;
|
|
struct rt_rq **rt_rq;
|
|
|
|
|
|
- u64 rt_runtime;
|
|
|
|
|
|
+ struct rt_bandwidth rt_bandwidth;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
struct rcu_head rcu;
|
|
struct rcu_head rcu;
|
|
struct list_head list;
|
|
struct list_head list;
|
|
|
|
+
|
|
|
|
+ struct task_group *parent;
|
|
|
|
+ struct list_head siblings;
|
|
|
|
+ struct list_head children;
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+#ifdef CONFIG_USER_SCHED
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Root task group.
|
|
|
|
+ * Every UID task group (including init_task_group aka UID-0) will
|
|
|
|
+ * be a child to this group.
|
|
|
|
+ */
|
|
|
|
+struct task_group root_task_group;
|
|
|
|
+
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
/* Default task group's sched entity on each cpu */
|
|
/* Default task group's sched entity on each cpu */
|
|
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
|
|
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
|
|
/* Default task group's cfs_rq on each cpu */
|
|
/* Default task group's cfs_rq on each cpu */
|
|
static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
|
|
static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
|
|
-
|
|
|
|
-static struct sched_entity *init_sched_entity_p[NR_CPUS];
|
|
|
|
-static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
|
|
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
|
|
static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
|
|
static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
|
|
-
|
|
|
|
-static struct sched_rt_entity *init_sched_rt_entity_p[NR_CPUS];
|
|
|
|
-static struct rt_rq *init_rt_rq_p[NR_CPUS];
|
|
|
|
|
|
+#endif
|
|
|
|
+#else
|
|
|
|
+#define root_task_group init_task_group
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/* task_group_lock serializes add/remove of task groups and also changes to
|
|
/* task_group_lock serializes add/remove of task groups and also changes to
|
|
@@ -221,23 +318,15 @@ static DEFINE_MUTEX(doms_cur_mutex);
|
|
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
|
|
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+#define MIN_SHARES 2
|
|
|
|
+
|
|
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
|
|
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/* Default task group.
|
|
/* Default task group.
|
|
* Every task in system belong to this group at bootup.
|
|
* Every task in system belong to this group at bootup.
|
|
*/
|
|
*/
|
|
-struct task_group init_task_group = {
|
|
|
|
-#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
- .se = init_sched_entity_p,
|
|
|
|
- .cfs_rq = init_cfs_rq_p,
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
- .rt_se = init_sched_rt_entity_p,
|
|
|
|
- .rt_rq = init_rt_rq_p,
|
|
|
|
-#endif
|
|
|
|
-};
|
|
|
|
|
|
+struct task_group init_task_group;
|
|
|
|
|
|
/* return group to which a task belongs */
|
|
/* return group to which a task belongs */
|
|
static inline struct task_group *task_group(struct task_struct *p)
|
|
static inline struct task_group *task_group(struct task_struct *p)
|
|
@@ -297,8 +386,12 @@ struct cfs_rq {
|
|
|
|
|
|
struct rb_root tasks_timeline;
|
|
struct rb_root tasks_timeline;
|
|
struct rb_node *rb_leftmost;
|
|
struct rb_node *rb_leftmost;
|
|
- struct rb_node *rb_load_balance_curr;
|
|
|
|
- /* 'curr' points to currently running entity on this cfs_rq.
|
|
|
|
|
|
+
|
|
|
|
+ struct list_head tasks;
|
|
|
|
+ struct list_head *balance_iterator;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * 'curr' points to currently running entity on this cfs_rq.
|
|
* It is set to NULL otherwise (i.e when none are currently running).
|
|
* It is set to NULL otherwise (i.e when none are currently running).
|
|
*/
|
|
*/
|
|
struct sched_entity *curr, *next;
|
|
struct sched_entity *curr, *next;
|
|
@@ -318,6 +411,43 @@ struct cfs_rq {
|
|
*/
|
|
*/
|
|
struct list_head leaf_cfs_rq_list;
|
|
struct list_head leaf_cfs_rq_list;
|
|
struct task_group *tg; /* group that "owns" this runqueue */
|
|
struct task_group *tg; /* group that "owns" this runqueue */
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+ unsigned long task_weight;
|
|
|
|
+ unsigned long shares;
|
|
|
|
+ /*
|
|
|
|
+ * We need space to build a sched_domain wide view of the full task
|
|
|
|
+ * group tree, in order to avoid depending on dynamic memory allocation
|
|
|
|
+ * during the load balancing we place this in the per cpu task group
|
|
|
|
+ * hierarchy. This limits the load balancing to one instance per cpu,
|
|
|
|
+ * but more should not be needed anyway.
|
|
|
|
+ */
|
|
|
|
+ struct aggregate_struct {
|
|
|
|
+ /*
|
|
|
|
+ * load = weight(cpus) * f(tg)
|
|
|
|
+ *
|
|
|
|
+ * Where f(tg) is the recursive weight fraction assigned to
|
|
|
|
+ * this group.
|
|
|
|
+ */
|
|
|
|
+ unsigned long load;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * part of the group weight distributed to this span.
|
|
|
|
+ */
|
|
|
|
+ unsigned long shares;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * The sum of all runqueue weights within this span.
|
|
|
|
+ */
|
|
|
|
+ unsigned long rq_weight;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Weight contributed by tasks; this is the part we can
|
|
|
|
+ * influence by moving tasks around.
|
|
|
|
+ */
|
|
|
|
+ unsigned long task_weight;
|
|
|
|
+ } aggregate;
|
|
|
|
+#endif
|
|
#endif
|
|
#endif
|
|
};
|
|
};
|
|
|
|
|
|
@@ -334,6 +464,9 @@ struct rt_rq {
|
|
#endif
|
|
#endif
|
|
int rt_throttled;
|
|
int rt_throttled;
|
|
u64 rt_time;
|
|
u64 rt_time;
|
|
|
|
+ u64 rt_runtime;
|
|
|
|
+ /* Nests inside the rq lock: */
|
|
|
|
+ spinlock_t rt_runtime_lock;
|
|
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
unsigned long rt_nr_boosted;
|
|
unsigned long rt_nr_boosted;
|
|
@@ -396,6 +529,7 @@ struct rq {
|
|
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
|
|
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
|
|
unsigned char idle_at_tick;
|
|
unsigned char idle_at_tick;
|
|
#ifdef CONFIG_NO_HZ
|
|
#ifdef CONFIG_NO_HZ
|
|
|
|
+ unsigned long last_tick_seen;
|
|
unsigned char in_nohz_recently;
|
|
unsigned char in_nohz_recently;
|
|
#endif
|
|
#endif
|
|
/* capture load from *all* tasks on this cpu: */
|
|
/* capture load from *all* tasks on this cpu: */
|
|
@@ -405,8 +539,6 @@ struct rq {
|
|
|
|
|
|
struct cfs_rq cfs;
|
|
struct cfs_rq cfs;
|
|
struct rt_rq rt;
|
|
struct rt_rq rt;
|
|
- u64 rt_period_expire;
|
|
|
|
- int rt_throttled;
|
|
|
|
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
/* list of leaf cfs_rq on this cpu: */
|
|
/* list of leaf cfs_rq on this cpu: */
|
|
@@ -499,6 +631,32 @@ static inline int cpu_of(struct rq *rq)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_NO_HZ
|
|
|
|
+static inline bool nohz_on(int cpu)
|
|
|
|
+{
|
|
|
|
+ return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline u64 max_skipped_ticks(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void update_last_tick_seen(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ rq->last_tick_seen = jiffies;
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static inline u64 max_skipped_ticks(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void update_last_tick_seen(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Update the per-runqueue clock, as finegrained as the platform can give
|
|
* Update the per-runqueue clock, as finegrained as the platform can give
|
|
* us, but without assuming monotonicity, etc.:
|
|
* us, but without assuming monotonicity, etc.:
|
|
@@ -523,9 +681,12 @@ static void __update_rq_clock(struct rq *rq)
|
|
/*
|
|
/*
|
|
* Catch too large forward jumps too:
|
|
* Catch too large forward jumps too:
|
|
*/
|
|
*/
|
|
- if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) {
|
|
|
|
- if (clock < rq->tick_timestamp + TICK_NSEC)
|
|
|
|
- clock = rq->tick_timestamp + TICK_NSEC;
|
|
|
|
|
|
+ u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC;
|
|
|
|
+ u64 max_time = rq->tick_timestamp + max_jump;
|
|
|
|
+
|
|
|
|
+ if (unlikely(clock + delta > max_time)) {
|
|
|
|
+ if (clock < max_time)
|
|
|
|
+ clock = max_time;
|
|
else
|
|
else
|
|
clock++;
|
|
clock++;
|
|
rq->clock_overflows++;
|
|
rq->clock_overflows++;
|
|
@@ -561,23 +722,6 @@ static void update_rq_clock(struct rq *rq)
|
|
#define task_rq(p) cpu_rq(task_cpu(p))
|
|
#define task_rq(p) cpu_rq(task_cpu(p))
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
|
|
|
|
-unsigned long rt_needs_cpu(int cpu)
|
|
|
|
-{
|
|
|
|
- struct rq *rq = cpu_rq(cpu);
|
|
|
|
- u64 delta;
|
|
|
|
-
|
|
|
|
- if (!rq->rt_throttled)
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- if (rq->clock > rq->rt_period_expire)
|
|
|
|
- return 1;
|
|
|
|
-
|
|
|
|
- delta = rq->rt_period_expire - rq->clock;
|
|
|
|
- do_div(delta, NSEC_PER_SEC / HZ);
|
|
|
|
-
|
|
|
|
- return (unsigned long)delta;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
|
|
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
|
|
*/
|
|
*/
|
|
@@ -590,22 +734,137 @@ unsigned long rt_needs_cpu(int cpu)
|
|
/*
|
|
/*
|
|
* Debugging: various feature bits
|
|
* Debugging: various feature bits
|
|
*/
|
|
*/
|
|
|
|
+
|
|
|
|
+#define SCHED_FEAT(name, enabled) \
|
|
|
|
+ __SCHED_FEAT_##name ,
|
|
|
|
+
|
|
enum {
|
|
enum {
|
|
- SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
|
|
|
|
- SCHED_FEAT_WAKEUP_PREEMPT = 2,
|
|
|
|
- SCHED_FEAT_START_DEBIT = 4,
|
|
|
|
- SCHED_FEAT_HRTICK = 8,
|
|
|
|
- SCHED_FEAT_DOUBLE_TICK = 16,
|
|
|
|
|
|
+#include "sched_features.h"
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+#undef SCHED_FEAT
|
|
|
|
+
|
|
|
|
+#define SCHED_FEAT(name, enabled) \
|
|
|
|
+ (1UL << __SCHED_FEAT_##name) * enabled |
|
|
|
|
+
|
|
const_debug unsigned int sysctl_sched_features =
|
|
const_debug unsigned int sysctl_sched_features =
|
|
- SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
|
|
|
|
- SCHED_FEAT_WAKEUP_PREEMPT * 1 |
|
|
|
|
- SCHED_FEAT_START_DEBIT * 1 |
|
|
|
|
- SCHED_FEAT_HRTICK * 1 |
|
|
|
|
- SCHED_FEAT_DOUBLE_TICK * 0;
|
|
|
|
|
|
+#include "sched_features.h"
|
|
|
|
+ 0;
|
|
|
|
+
|
|
|
|
+#undef SCHED_FEAT
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
|
|
+#define SCHED_FEAT(name, enabled) \
|
|
|
|
+ #name ,
|
|
|
|
|
|
-#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
|
|
|
|
|
|
+__read_mostly char *sched_feat_names[] = {
|
|
|
|
+#include "sched_features.h"
|
|
|
|
+ NULL
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+#undef SCHED_FEAT
|
|
|
|
+
|
|
|
|
+int sched_feat_open(struct inode *inode, struct file *filp)
|
|
|
|
+{
|
|
|
|
+ filp->private_data = inode->i_private;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t
|
|
|
|
+sched_feat_read(struct file *filp, char __user *ubuf,
|
|
|
|
+ size_t cnt, loff_t *ppos)
|
|
|
|
+{
|
|
|
|
+ char *buf;
|
|
|
|
+ int r = 0;
|
|
|
|
+ int len = 0;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; sched_feat_names[i]; i++) {
|
|
|
|
+ len += strlen(sched_feat_names[i]);
|
|
|
|
+ len += 4;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ buf = kmalloc(len + 2, GFP_KERNEL);
|
|
|
|
+ if (!buf)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ for (i = 0; sched_feat_names[i]; i++) {
|
|
|
|
+ if (sysctl_sched_features & (1UL << i))
|
|
|
|
+ r += sprintf(buf + r, "%s ", sched_feat_names[i]);
|
|
|
|
+ else
|
|
|
|
+ r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ r += sprintf(buf + r, "\n");
|
|
|
|
+ WARN_ON(r >= len + 2);
|
|
|
|
+
|
|
|
|
+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
|
+
|
|
|
|
+ kfree(buf);
|
|
|
|
+
|
|
|
|
+ return r;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t
|
|
|
|
+sched_feat_write(struct file *filp, const char __user *ubuf,
|
|
|
|
+ size_t cnt, loff_t *ppos)
|
|
|
|
+{
|
|
|
|
+ char buf[64];
|
|
|
|
+ char *cmp = buf;
|
|
|
|
+ int neg = 0;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ if (cnt > 63)
|
|
|
|
+ cnt = 63;
|
|
|
|
+
|
|
|
|
+ if (copy_from_user(&buf, ubuf, cnt))
|
|
|
|
+ return -EFAULT;
|
|
|
|
+
|
|
|
|
+ buf[cnt] = 0;
|
|
|
|
+
|
|
|
|
+ if (strncmp(buf, "NO_", 3) == 0) {
|
|
|
|
+ neg = 1;
|
|
|
|
+ cmp += 3;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = 0; sched_feat_names[i]; i++) {
|
|
|
|
+ int len = strlen(sched_feat_names[i]);
|
|
|
|
+
|
|
|
|
+ if (strncmp(cmp, sched_feat_names[i], len) == 0) {
|
|
|
|
+ if (neg)
|
|
|
|
+ sysctl_sched_features &= ~(1UL << i);
|
|
|
|
+ else
|
|
|
|
+ sysctl_sched_features |= (1UL << i);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!sched_feat_names[i])
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ filp->f_pos += cnt;
|
|
|
|
+
|
|
|
|
+ return cnt;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct file_operations sched_feat_fops = {
|
|
|
|
+ .open = sched_feat_open,
|
|
|
|
+ .read = sched_feat_read,
|
|
|
|
+ .write = sched_feat_write,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static __init int sched_init_debug(void)
|
|
|
|
+{
|
|
|
|
+ debugfs_create_file("sched_features", 0644, NULL, NULL,
|
|
|
|
+ &sched_feat_fops);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+late_initcall(sched_init_debug);
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
|
|
|
|
|
|
/*
|
|
/*
|
|
* Number of tasks to iterate in a single balance run.
|
|
* Number of tasks to iterate in a single balance run.
|
|
@@ -627,16 +886,52 @@ static __read_mostly int scheduler_running;
|
|
*/
|
|
*/
|
|
int sysctl_sched_rt_runtime = 950000;
|
|
int sysctl_sched_rt_runtime = 950000;
|
|
|
|
|
|
-/*
|
|
|
|
- * single value that denotes runtime == period, ie unlimited time.
|
|
|
|
- */
|
|
|
|
-#define RUNTIME_INF ((u64)~0ULL)
|
|
|
|
|
|
+static inline u64 global_rt_period(void)
|
|
|
|
+{
|
|
|
|
+ return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline u64 global_rt_runtime(void)
|
|
|
|
+{
|
|
|
|
+ if (sysctl_sched_rt_period < 0)
|
|
|
|
+ return RUNTIME_INF;
|
|
|
|
+
|
|
|
|
+ return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static const unsigned long long time_sync_thresh = 100000;
|
|
|
|
+
|
|
|
|
+static DEFINE_PER_CPU(unsigned long long, time_offset);
|
|
|
|
+static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
|
|
|
|
|
|
/*
|
|
/*
|
|
- * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
|
|
|
|
- * clock constructed from sched_clock():
|
|
|
|
|
|
+ * Global lock which we take every now and then to synchronize
|
|
|
|
+ * the CPUs time. This method is not warp-safe, but it's good
|
|
|
|
+ * enough to synchronize slowly diverging time sources and thus
|
|
|
|
+ * it's good enough for tracing:
|
|
*/
|
|
*/
|
|
-unsigned long long cpu_clock(int cpu)
|
|
|
|
|
|
+static DEFINE_SPINLOCK(time_sync_lock);
|
|
|
|
+static unsigned long long prev_global_time;
|
|
|
|
+
|
|
|
|
+static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&time_sync_lock, flags);
|
|
|
|
+
|
|
|
|
+ if (time < prev_global_time) {
|
|
|
|
+ per_cpu(time_offset, cpu) += prev_global_time - time;
|
|
|
|
+ time = prev_global_time;
|
|
|
|
+ } else {
|
|
|
|
+ prev_global_time = time;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_unlock_irqrestore(&time_sync_lock, flags);
|
|
|
|
+
|
|
|
|
+ return time;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static unsigned long long __cpu_clock(int cpu)
|
|
{
|
|
{
|
|
unsigned long long now;
|
|
unsigned long long now;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
@@ -657,6 +952,24 @@ unsigned long long cpu_clock(int cpu)
|
|
|
|
|
|
return now;
|
|
return now;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
|
|
|
|
+ * clock constructed from sched_clock():
|
|
|
|
+ */
|
|
|
|
+unsigned long long cpu_clock(int cpu)
|
|
|
|
+{
|
|
|
|
+ unsigned long long prev_cpu_time, time, delta_time;
|
|
|
|
+
|
|
|
|
+ prev_cpu_time = per_cpu(prev_cpu_time, cpu);
|
|
|
|
+ time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
|
|
|
|
+ delta_time = time-prev_cpu_time;
|
|
|
|
+
|
|
|
|
+ if (unlikely(delta_time > time_sync_thresh))
|
|
|
|
+ time = __sync_cpu_clock(time, cpu);
|
|
|
|
+
|
|
|
|
+ return time;
|
|
|
|
+}
|
|
EXPORT_SYMBOL_GPL(cpu_clock);
|
|
EXPORT_SYMBOL_GPL(cpu_clock);
|
|
|
|
|
|
#ifndef prepare_arch_switch
|
|
#ifndef prepare_arch_switch
|
|
@@ -1116,6 +1429,9 @@ static void __resched_task(struct task_struct *p, int tif_bit)
|
|
*/
|
|
*/
|
|
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
|
|
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * delta *= weight / lw
|
|
|
|
+ */
|
|
static unsigned long
|
|
static unsigned long
|
|
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
|
|
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
|
|
struct load_weight *lw)
|
|
struct load_weight *lw)
|
|
@@ -1138,12 +1454,6 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
|
|
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
|
|
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline unsigned long
|
|
|
|
-calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
|
|
|
|
-{
|
|
|
|
- return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
|
|
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
|
|
{
|
|
{
|
|
lw->weight += inc;
|
|
lw->weight += inc;
|
|
@@ -1241,11 +1551,390 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
|
|
static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
|
|
static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+static inline void inc_cpu_load(struct rq *rq, unsigned long load)
|
|
|
|
+{
|
|
|
|
+ update_load_add(&rq->load, load);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void dec_cpu_load(struct rq *rq, unsigned long load)
|
|
|
|
+{
|
|
|
|
+ update_load_sub(&rq->load, load);
|
|
|
|
+}
|
|
|
|
+
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
static unsigned long source_load(int cpu, int type);
|
|
static unsigned long source_load(int cpu, int type);
|
|
static unsigned long target_load(int cpu, int type);
|
|
static unsigned long target_load(int cpu, int type);
|
|
static unsigned long cpu_avg_load_per_task(int cpu);
|
|
static unsigned long cpu_avg_load_per_task(int cpu);
|
|
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
|
|
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Group load balancing.
|
|
|
|
+ *
|
|
|
|
+ * We calculate a few balance domain wide aggregate numbers; load and weight.
|
|
|
|
+ * Given the pictures below, and assuming each item has equal weight:
|
|
|
|
+ *
|
|
|
|
+ * root 1 - thread
|
|
|
|
+ * / | \ A - group
|
|
|
|
+ * A 1 B
|
|
|
|
+ * /|\ / \
|
|
|
|
+ * C 2 D 3 4
|
|
|
|
+ * | |
|
|
|
|
+ * 5 6
|
|
|
|
+ *
|
|
|
|
+ * load:
|
|
|
|
+ * A and B get 1/3-rd of the total load. C and D get 1/3-rd of A's 1/3-rd,
|
|
|
|
+ * which equals 1/9-th of the total load.
|
|
|
|
+ *
|
|
|
|
+ * shares:
|
|
|
|
+ * The weight of this group on the selected cpus.
|
|
|
|
+ *
|
|
|
|
+ * rq_weight:
|
|
|
|
+ * Direct sum of all the cpu's their rq weight, e.g. A would get 3 while
|
|
|
|
+ * B would get 2.
|
|
|
|
+ *
|
|
|
|
+ * task_weight:
|
|
|
|
+ * Part of the rq_weight contributed by tasks; all groups except B would
|
|
|
|
+ * get 1, B gets 2.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+static inline struct aggregate_struct *
|
|
|
|
+aggregate(struct task_group *tg, struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ return &tg->cfs_rq[sd->first_cpu]->aggregate;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+typedef void (*aggregate_func)(struct task_group *, struct sched_domain *);
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Iterate the full tree, calling @down when first entering a node and @up when
|
|
|
|
+ * leaving it for the final time.
|
|
|
|
+ */
|
|
|
|
+static
|
|
|
|
+void aggregate_walk_tree(aggregate_func down, aggregate_func up,
|
|
|
|
+ struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ struct task_group *parent, *child;
|
|
|
|
+
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ parent = &root_task_group;
|
|
|
|
+down:
|
|
|
|
+ (*down)(parent, sd);
|
|
|
|
+ list_for_each_entry_rcu(child, &parent->children, siblings) {
|
|
|
|
+ parent = child;
|
|
|
|
+ goto down;
|
|
|
|
+
|
|
|
|
+up:
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ (*up)(parent, sd);
|
|
|
|
+
|
|
|
|
+ child = parent;
|
|
|
|
+ parent = parent->parent;
|
|
|
|
+ if (parent)
|
|
|
|
+ goto up;
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Calculate the aggregate runqueue weight.
|
|
|
|
+ */
|
|
|
|
+static
|
|
|
|
+void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ unsigned long rq_weight = 0;
|
|
|
|
+ unsigned long task_weight = 0;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for_each_cpu_mask(i, sd->span) {
|
|
|
|
+ rq_weight += tg->cfs_rq[i]->load.weight;
|
|
|
|
+ task_weight += tg->cfs_rq[i]->task_weight;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ aggregate(tg, sd)->rq_weight = rq_weight;
|
|
|
|
+ aggregate(tg, sd)->task_weight = task_weight;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Redistribute tg->shares amongst all tg->cfs_rq[]s.
|
|
|
|
+ */
|
|
|
|
+static void __aggregate_redistribute_shares(struct task_group *tg)
|
|
|
|
+{
|
|
|
|
+ int i, max_cpu = smp_processor_id();
|
|
|
|
+ unsigned long rq_weight = 0;
|
|
|
|
+ unsigned long shares, max_shares = 0, shares_rem = tg->shares;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(i)
|
|
|
|
+ rq_weight += tg->cfs_rq[i]->load.weight;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
|
+ /*
|
|
|
|
+ * divide shares proportional to the rq_weights.
|
|
|
|
+ */
|
|
|
|
+ shares = tg->shares * tg->cfs_rq[i]->load.weight;
|
|
|
|
+ shares /= rq_weight + 1;
|
|
|
|
+
|
|
|
|
+ tg->cfs_rq[i]->shares = shares;
|
|
|
|
+
|
|
|
|
+ if (shares > max_shares) {
|
|
|
|
+ max_shares = shares;
|
|
|
|
+ max_cpu = i;
|
|
|
|
+ }
|
|
|
|
+ shares_rem -= shares;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Ensure it all adds up to tg->shares; we can loose a few
|
|
|
|
+ * due to rounding down when computing the per-cpu shares.
|
|
|
|
+ */
|
|
|
|
+ if (shares_rem)
|
|
|
|
+ tg->cfs_rq[max_cpu]->shares += shares_rem;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Compute the weight of this group on the given cpus.
|
|
|
|
+ */
|
|
|
|
+static
|
|
|
|
+void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ unsigned long shares = 0;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+again:
|
|
|
|
+ for_each_cpu_mask(i, sd->span)
|
|
|
|
+ shares += tg->cfs_rq[i]->shares;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * When the span doesn't have any shares assigned, but does have
|
|
|
|
+ * tasks to run do a machine wide rebalance (should be rare).
|
|
|
|
+ */
|
|
|
|
+ if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) {
|
|
|
|
+ __aggregate_redistribute_shares(tg);
|
|
|
|
+ goto again;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ aggregate(tg, sd)->shares = shares;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Compute the load fraction assigned to this group, relies on the aggregate
|
|
|
|
+ * weight and this group's parent's load, i.e. top-down.
|
|
|
|
+ */
|
|
|
|
+static
|
|
|
|
+void aggregate_group_load(struct task_group *tg, struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ unsigned long load;
|
|
|
|
+
|
|
|
|
+ if (!tg->parent) {
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ load = 0;
|
|
|
|
+ for_each_cpu_mask(i, sd->span)
|
|
|
|
+ load += cpu_rq(i)->load.weight;
|
|
|
|
+
|
|
|
|
+ } else {
|
|
|
|
+ load = aggregate(tg->parent, sd)->load;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * shares is our weight in the parent's rq so
|
|
|
|
+ * shares/parent->rq_weight gives our fraction of the load
|
|
|
|
+ */
|
|
|
|
+ load *= aggregate(tg, sd)->shares;
|
|
|
|
+ load /= aggregate(tg->parent, sd)->rq_weight + 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ aggregate(tg, sd)->load = load;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __set_se_shares(struct sched_entity *se, unsigned long shares);
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Calculate and set the cpu's group shares.
|
|
|
|
+ */
|
|
|
|
+static void
|
|
|
|
+__update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd,
|
|
|
|
+ int tcpu)
|
|
|
|
+{
|
|
|
|
+ int boost = 0;
|
|
|
|
+ unsigned long shares;
|
|
|
|
+ unsigned long rq_weight;
|
|
|
|
+
|
|
|
|
+ if (!tg->se[tcpu])
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ rq_weight = tg->cfs_rq[tcpu]->load.weight;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If there are currently no tasks on the cpu pretend there is one of
|
|
|
|
+ * average load so that when a new task gets to run here it will not
|
|
|
|
+ * get delayed by group starvation.
|
|
|
|
+ */
|
|
|
|
+ if (!rq_weight) {
|
|
|
|
+ boost = 1;
|
|
|
|
+ rq_weight = NICE_0_LOAD;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * \Sum shares * rq_weight
|
|
|
|
+ * shares = -----------------------
|
|
|
|
+ * \Sum rq_weight
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+ shares = aggregate(tg, sd)->shares * rq_weight;
|
|
|
|
+ shares /= aggregate(tg, sd)->rq_weight + 1;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * record the actual number of shares, not the boosted amount.
|
|
|
|
+ */
|
|
|
|
+ tg->cfs_rq[tcpu]->shares = boost ? 0 : shares;
|
|
|
|
+
|
|
|
|
+ if (shares < MIN_SHARES)
|
|
|
|
+ shares = MIN_SHARES;
|
|
|
|
+
|
|
|
|
+ __set_se_shares(tg->se[tcpu], shares);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Re-adjust the weights on the cpu the task came from and on the cpu the
|
|
|
|
+ * task went to.
|
|
|
|
+ */
|
|
|
|
+static void
|
|
|
|
+__move_group_shares(struct task_group *tg, struct sched_domain *sd,
|
|
|
|
+ int scpu, int dcpu)
|
|
|
|
+{
|
|
|
|
+ unsigned long shares;
|
|
|
|
+
|
|
|
|
+ shares = tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
|
|
|
|
+
|
|
|
|
+ __update_group_shares_cpu(tg, sd, scpu);
|
|
|
|
+ __update_group_shares_cpu(tg, sd, dcpu);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * ensure we never loose shares due to rounding errors in the
|
|
|
|
+ * above redistribution.
|
|
|
|
+ */
|
|
|
|
+ shares -= tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
|
|
|
|
+ if (shares)
|
|
|
|
+ tg->cfs_rq[dcpu]->shares += shares;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Because changing a group's shares changes the weight of the super-group
|
|
|
|
+ * we need to walk up the tree and change all shares until we hit the root.
|
|
|
|
+ */
|
|
|
|
+static void
|
|
|
|
+move_group_shares(struct task_group *tg, struct sched_domain *sd,
|
|
|
|
+ int scpu, int dcpu)
|
|
|
|
+{
|
|
|
|
+ while (tg) {
|
|
|
|
+ __move_group_shares(tg, sd, scpu, dcpu);
|
|
|
|
+ tg = tg->parent;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static
|
|
|
|
+void aggregate_group_set_shares(struct task_group *tg, struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ unsigned long shares = aggregate(tg, sd)->shares;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for_each_cpu_mask(i, sd->span) {
|
|
|
|
+ struct rq *rq = cpu_rq(i);
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
+ __update_group_shares_cpu(tg, sd, i);
|
|
|
|
+ spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ aggregate_group_shares(tg, sd);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * ensure we never loose shares due to rounding errors in the
|
|
|
|
+ * above redistribution.
|
|
|
|
+ */
|
|
|
|
+ shares -= aggregate(tg, sd)->shares;
|
|
|
|
+ if (shares) {
|
|
|
|
+ tg->cfs_rq[sd->first_cpu]->shares += shares;
|
|
|
|
+ aggregate(tg, sd)->shares += shares;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Calculate the accumulative weight and recursive load of each task group
|
|
|
|
+ * while walking down the tree.
|
|
|
|
+ */
|
|
|
|
+static
|
|
|
|
+void aggregate_get_down(struct task_group *tg, struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ aggregate_group_weight(tg, sd);
|
|
|
|
+ aggregate_group_shares(tg, sd);
|
|
|
|
+ aggregate_group_load(tg, sd);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Rebalance the cpu shares while walking back up the tree.
|
|
|
|
+ */
|
|
|
|
+static
|
|
|
|
+void aggregate_get_up(struct task_group *tg, struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ aggregate_group_set_shares(tg, sd);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
|
|
|
|
+
|
|
|
|
+static void __init init_aggregate(void)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(i)
|
|
|
|
+ spin_lock_init(&per_cpu(aggregate_lock, i));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int get_aggregate(struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ if (!spin_trylock(&per_cpu(aggregate_lock, sd->first_cpu)))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ aggregate_walk_tree(aggregate_get_down, aggregate_get_up, sd);
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void put_aggregate(struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ spin_unlock(&per_cpu(aggregate_lock, sd->first_cpu));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
|
|
|
|
+{
|
|
|
|
+ cfs_rq->shares = shares;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#else
|
|
|
|
+
|
|
|
|
+static inline void init_aggregate(void)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int get_aggregate(struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void put_aggregate(struct sched_domain *sd)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#else /* CONFIG_SMP */
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
+static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
#endif /* CONFIG_SMP */
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
#include "sched_stats.h"
|
|
#include "sched_stats.h"
|
|
@@ -1258,26 +1947,14 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
|
|
|
|
|
|
#define sched_class_highest (&rt_sched_class)
|
|
#define sched_class_highest (&rt_sched_class)
|
|
|
|
|
|
-static inline void inc_load(struct rq *rq, const struct task_struct *p)
|
|
|
|
-{
|
|
|
|
- update_load_add(&rq->load, p->se.load.weight);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void dec_load(struct rq *rq, const struct task_struct *p)
|
|
|
|
-{
|
|
|
|
- update_load_sub(&rq->load, p->se.load.weight);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void inc_nr_running(struct task_struct *p, struct rq *rq)
|
|
|
|
|
|
+static void inc_nr_running(struct rq *rq)
|
|
{
|
|
{
|
|
rq->nr_running++;
|
|
rq->nr_running++;
|
|
- inc_load(rq, p);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
-static void dec_nr_running(struct task_struct *p, struct rq *rq)
|
|
|
|
|
|
+static void dec_nr_running(struct rq *rq)
|
|
{
|
|
{
|
|
rq->nr_running--;
|
|
rq->nr_running--;
|
|
- dec_load(rq, p);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void set_load_weight(struct task_struct *p)
|
|
static void set_load_weight(struct task_struct *p)
|
|
@@ -1369,7 +2046,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
|
|
rq->nr_uninterruptible--;
|
|
rq->nr_uninterruptible--;
|
|
|
|
|
|
enqueue_task(rq, p, wakeup);
|
|
enqueue_task(rq, p, wakeup);
|
|
- inc_nr_running(p, rq);
|
|
|
|
|
|
+ inc_nr_running(rq);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1381,7 +2058,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
|
|
rq->nr_uninterruptible++;
|
|
rq->nr_uninterruptible++;
|
|
|
|
|
|
dequeue_task(rq, p, sleep);
|
|
dequeue_task(rq, p, sleep);
|
|
- dec_nr_running(p, rq);
|
|
|
|
|
|
+ dec_nr_running(rq);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -1438,7 +2115,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
|
|
/*
|
|
/*
|
|
* Buddy candidates are cache hot:
|
|
* Buddy candidates are cache hot:
|
|
*/
|
|
*/
|
|
- if (&p->se == cfs_rq_of(&p->se)->next)
|
|
|
|
|
|
+ if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
if (p->sched_class != &fair_sched_class)
|
|
if (p->sched_class != &fair_sched_class)
|
|
@@ -1728,17 +2405,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
|
|
* find_idlest_cpu - find the idlest cpu among the cpus in group.
|
|
* find_idlest_cpu - find the idlest cpu among the cpus in group.
|
|
*/
|
|
*/
|
|
static int
|
|
static int
|
|
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
|
|
|
|
|
+find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
|
|
|
|
+ cpumask_t *tmp)
|
|
{
|
|
{
|
|
- cpumask_t tmp;
|
|
|
|
unsigned long load, min_load = ULONG_MAX;
|
|
unsigned long load, min_load = ULONG_MAX;
|
|
int idlest = -1;
|
|
int idlest = -1;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
/* Traverse only the allowed CPUs */
|
|
/* Traverse only the allowed CPUs */
|
|
- cpus_and(tmp, group->cpumask, p->cpus_allowed);
|
|
|
|
|
|
+ cpus_and(*tmp, group->cpumask, p->cpus_allowed);
|
|
|
|
|
|
- for_each_cpu_mask(i, tmp) {
|
|
|
|
|
|
+ for_each_cpu_mask(i, *tmp) {
|
|
load = weighted_cpuload(i);
|
|
load = weighted_cpuload(i);
|
|
|
|
|
|
if (load < min_load || (load == min_load && i == this_cpu)) {
|
|
if (load < min_load || (load == min_load && i == this_cpu)) {
|
|
@@ -1777,7 +2454,7 @@ static int sched_balance_self(int cpu, int flag)
|
|
}
|
|
}
|
|
|
|
|
|
while (sd) {
|
|
while (sd) {
|
|
- cpumask_t span;
|
|
|
|
|
|
+ cpumask_t span, tmpmask;
|
|
struct sched_group *group;
|
|
struct sched_group *group;
|
|
int new_cpu, weight;
|
|
int new_cpu, weight;
|
|
|
|
|
|
@@ -1793,7 +2470,7 @@ static int sched_balance_self(int cpu, int flag)
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
- new_cpu = find_idlest_cpu(group, t, cpu);
|
|
|
|
|
|
+ new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask);
|
|
if (new_cpu == -1 || new_cpu == cpu) {
|
|
if (new_cpu == -1 || new_cpu == cpu) {
|
|
/* Now try balancing at a lower domain level of cpu */
|
|
/* Now try balancing at a lower domain level of cpu */
|
|
sd = sd->child;
|
|
sd = sd->child;
|
|
@@ -1839,6 +2516,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
|
|
long old_state;
|
|
long old_state;
|
|
struct rq *rq;
|
|
struct rq *rq;
|
|
|
|
|
|
|
|
+ if (!sched_feat(SYNC_WAKEUPS))
|
|
|
|
+ sync = 0;
|
|
|
|
+
|
|
smp_wmb();
|
|
smp_wmb();
|
|
rq = task_rq_lock(p, &flags);
|
|
rq = task_rq_lock(p, &flags);
|
|
old_state = p->state;
|
|
old_state = p->state;
|
|
@@ -1955,6 +2635,7 @@ static void __sched_fork(struct task_struct *p)
|
|
|
|
|
|
INIT_LIST_HEAD(&p->rt.run_list);
|
|
INIT_LIST_HEAD(&p->rt.run_list);
|
|
p->se.on_rq = 0;
|
|
p->se.on_rq = 0;
|
|
|
|
+ INIT_LIST_HEAD(&p->se.group_node);
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
INIT_HLIST_HEAD(&p->preempt_notifiers);
|
|
INIT_HLIST_HEAD(&p->preempt_notifiers);
|
|
@@ -2030,7 +2711,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
* management (if any):
|
|
* management (if any):
|
|
*/
|
|
*/
|
|
p->sched_class->task_new(rq, p);
|
|
p->sched_class->task_new(rq, p);
|
|
- inc_nr_running(p, rq);
|
|
|
|
|
|
+ inc_nr_running(rq);
|
|
}
|
|
}
|
|
check_preempt_curr(rq, p);
|
|
check_preempt_curr(rq, p);
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
@@ -2674,7 +3355,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
static struct sched_group *
|
|
static struct sched_group *
|
|
find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|
find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|
unsigned long *imbalance, enum cpu_idle_type idle,
|
|
unsigned long *imbalance, enum cpu_idle_type idle,
|
|
- int *sd_idle, cpumask_t *cpus, int *balance)
|
|
|
|
|
|
+ int *sd_idle, const cpumask_t *cpus, int *balance)
|
|
{
|
|
{
|
|
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
|
|
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
|
|
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
|
|
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
|
|
@@ -2975,7 +3656,7 @@ ret:
|
|
*/
|
|
*/
|
|
static struct rq *
|
|
static struct rq *
|
|
find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
|
|
find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
|
|
- unsigned long imbalance, cpumask_t *cpus)
|
|
|
|
|
|
+ unsigned long imbalance, const cpumask_t *cpus)
|
|
{
|
|
{
|
|
struct rq *busiest = NULL, *rq;
|
|
struct rq *busiest = NULL, *rq;
|
|
unsigned long max_load = 0;
|
|
unsigned long max_load = 0;
|
|
@@ -3014,14 +3695,18 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
|
|
*/
|
|
*/
|
|
static int load_balance(int this_cpu, struct rq *this_rq,
|
|
static int load_balance(int this_cpu, struct rq *this_rq,
|
|
struct sched_domain *sd, enum cpu_idle_type idle,
|
|
struct sched_domain *sd, enum cpu_idle_type idle,
|
|
- int *balance)
|
|
|
|
|
|
+ int *balance, cpumask_t *cpus)
|
|
{
|
|
{
|
|
int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
|
|
int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
|
|
struct sched_group *group;
|
|
struct sched_group *group;
|
|
unsigned long imbalance;
|
|
unsigned long imbalance;
|
|
struct rq *busiest;
|
|
struct rq *busiest;
|
|
- cpumask_t cpus = CPU_MASK_ALL;
|
|
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
+ int unlock_aggregate;
|
|
|
|
+
|
|
|
|
+ cpus_setall(*cpus);
|
|
|
|
+
|
|
|
|
+ unlock_aggregate = get_aggregate(sd);
|
|
|
|
|
|
/*
|
|
/*
|
|
* When power savings policy is enabled for the parent domain, idle
|
|
* When power savings policy is enabled for the parent domain, idle
|
|
@@ -3037,7 +3722,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
|
|
|
|
redo:
|
|
redo:
|
|
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
|
|
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
|
|
- &cpus, balance);
|
|
|
|
|
|
+ cpus, balance);
|
|
|
|
|
|
if (*balance == 0)
|
|
if (*balance == 0)
|
|
goto out_balanced;
|
|
goto out_balanced;
|
|
@@ -3047,7 +3732,7 @@ redo:
|
|
goto out_balanced;
|
|
goto out_balanced;
|
|
}
|
|
}
|
|
|
|
|
|
- busiest = find_busiest_queue(group, idle, imbalance, &cpus);
|
|
|
|
|
|
+ busiest = find_busiest_queue(group, idle, imbalance, cpus);
|
|
if (!busiest) {
|
|
if (!busiest) {
|
|
schedstat_inc(sd, lb_nobusyq[idle]);
|
|
schedstat_inc(sd, lb_nobusyq[idle]);
|
|
goto out_balanced;
|
|
goto out_balanced;
|
|
@@ -3080,8 +3765,8 @@ redo:
|
|
|
|
|
|
/* All tasks on this runqueue were pinned by CPU affinity */
|
|
/* All tasks on this runqueue were pinned by CPU affinity */
|
|
if (unlikely(all_pinned)) {
|
|
if (unlikely(all_pinned)) {
|
|
- cpu_clear(cpu_of(busiest), cpus);
|
|
|
|
- if (!cpus_empty(cpus))
|
|
|
|
|
|
+ cpu_clear(cpu_of(busiest), *cpus);
|
|
|
|
+ if (!cpus_empty(*cpus))
|
|
goto redo;
|
|
goto redo;
|
|
goto out_balanced;
|
|
goto out_balanced;
|
|
}
|
|
}
|
|
@@ -3138,8 +3823,9 @@ redo:
|
|
|
|
|
|
if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
|
|
if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
|
|
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
|
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
|
- return -1;
|
|
|
|
- return ld_moved;
|
|
|
|
|
|
+ ld_moved = -1;
|
|
|
|
+
|
|
|
|
+ goto out;
|
|
|
|
|
|
out_balanced:
|
|
out_balanced:
|
|
schedstat_inc(sd, lb_balanced[idle]);
|
|
schedstat_inc(sd, lb_balanced[idle]);
|
|
@@ -3154,8 +3840,13 @@ out_one_pinned:
|
|
|
|
|
|
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
|
|
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
|
|
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
|
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
|
- return -1;
|
|
|
|
- return 0;
|
|
|
|
|
|
+ ld_moved = -1;
|
|
|
|
+ else
|
|
|
|
+ ld_moved = 0;
|
|
|
|
+out:
|
|
|
|
+ if (unlock_aggregate)
|
|
|
|
+ put_aggregate(sd);
|
|
|
|
+ return ld_moved;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3166,7 +3857,8 @@ out_one_pinned:
|
|
* this_rq is locked.
|
|
* this_rq is locked.
|
|
*/
|
|
*/
|
|
static int
|
|
static int
|
|
-load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
|
|
|
|
|
+load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
|
|
|
|
+ cpumask_t *cpus)
|
|
{
|
|
{
|
|
struct sched_group *group;
|
|
struct sched_group *group;
|
|
struct rq *busiest = NULL;
|
|
struct rq *busiest = NULL;
|
|
@@ -3174,7 +3866,8 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
|
int ld_moved = 0;
|
|
int ld_moved = 0;
|
|
int sd_idle = 0;
|
|
int sd_idle = 0;
|
|
int all_pinned = 0;
|
|
int all_pinned = 0;
|
|
- cpumask_t cpus = CPU_MASK_ALL;
|
|
|
|
|
|
+
|
|
|
|
+ cpus_setall(*cpus);
|
|
|
|
|
|
/*
|
|
/*
|
|
* When power savings policy is enabled for the parent domain, idle
|
|
* When power savings policy is enabled for the parent domain, idle
|
|
@@ -3189,14 +3882,13 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
|
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
|
|
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
|
|
redo:
|
|
redo:
|
|
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
|
|
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
|
|
- &sd_idle, &cpus, NULL);
|
|
|
|
|
|
+ &sd_idle, cpus, NULL);
|
|
if (!group) {
|
|
if (!group) {
|
|
schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
|
|
schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
|
|
goto out_balanced;
|
|
goto out_balanced;
|
|
}
|
|
}
|
|
|
|
|
|
- busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance,
|
|
|
|
- &cpus);
|
|
|
|
|
|
+ busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
|
|
if (!busiest) {
|
|
if (!busiest) {
|
|
schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
|
|
schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
|
|
goto out_balanced;
|
|
goto out_balanced;
|
|
@@ -3218,8 +3910,8 @@ redo:
|
|
spin_unlock(&busiest->lock);
|
|
spin_unlock(&busiest->lock);
|
|
|
|
|
|
if (unlikely(all_pinned)) {
|
|
if (unlikely(all_pinned)) {
|
|
- cpu_clear(cpu_of(busiest), cpus);
|
|
|
|
- if (!cpus_empty(cpus))
|
|
|
|
|
|
+ cpu_clear(cpu_of(busiest), *cpus);
|
|
|
|
+ if (!cpus_empty(*cpus))
|
|
goto redo;
|
|
goto redo;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -3253,6 +3945,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
|
struct sched_domain *sd;
|
|
struct sched_domain *sd;
|
|
int pulled_task = -1;
|
|
int pulled_task = -1;
|
|
unsigned long next_balance = jiffies + HZ;
|
|
unsigned long next_balance = jiffies + HZ;
|
|
|
|
+ cpumask_t tmpmask;
|
|
|
|
|
|
for_each_domain(this_cpu, sd) {
|
|
for_each_domain(this_cpu, sd) {
|
|
unsigned long interval;
|
|
unsigned long interval;
|
|
@@ -3262,8 +3955,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
|
|
|
|
|
if (sd->flags & SD_BALANCE_NEWIDLE)
|
|
if (sd->flags & SD_BALANCE_NEWIDLE)
|
|
/* If we've pulled tasks over stop searching: */
|
|
/* If we've pulled tasks over stop searching: */
|
|
- pulled_task = load_balance_newidle(this_cpu,
|
|
|
|
- this_rq, sd);
|
|
|
|
|
|
+ pulled_task = load_balance_newidle(this_cpu, this_rq,
|
|
|
|
+ sd, &tmpmask);
|
|
|
|
|
|
interval = msecs_to_jiffies(sd->balance_interval);
|
|
interval = msecs_to_jiffies(sd->balance_interval);
|
|
if (time_after(next_balance, sd->last_balance + interval))
|
|
if (time_after(next_balance, sd->last_balance + interval))
|
|
@@ -3422,6 +4115,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
|
|
/* Earliest time when we have to do rebalance again */
|
|
/* Earliest time when we have to do rebalance again */
|
|
unsigned long next_balance = jiffies + 60*HZ;
|
|
unsigned long next_balance = jiffies + 60*HZ;
|
|
int update_next_balance = 0;
|
|
int update_next_balance = 0;
|
|
|
|
+ cpumask_t tmp;
|
|
|
|
|
|
for_each_domain(cpu, sd) {
|
|
for_each_domain(cpu, sd) {
|
|
if (!(sd->flags & SD_LOAD_BALANCE))
|
|
if (!(sd->flags & SD_LOAD_BALANCE))
|
|
@@ -3445,7 +4139,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
|
|
}
|
|
}
|
|
|
|
|
|
if (time_after_eq(jiffies, sd->last_balance + interval)) {
|
|
if (time_after_eq(jiffies, sd->last_balance + interval)) {
|
|
- if (load_balance(cpu, rq, sd, idle, &balance)) {
|
|
|
|
|
|
+ if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) {
|
|
/*
|
|
/*
|
|
* We've pulled tasks over so either we're no
|
|
* We've pulled tasks over so either we're no
|
|
* longer idle, or one of our SMT siblings is
|
|
* longer idle, or one of our SMT siblings is
|
|
@@ -3561,7 +4255,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
|
|
*/
|
|
*/
|
|
int ilb = first_cpu(nohz.cpu_mask);
|
|
int ilb = first_cpu(nohz.cpu_mask);
|
|
|
|
|
|
- if (ilb != NR_CPUS)
|
|
|
|
|
|
+ if (ilb < nr_cpu_ids)
|
|
resched_cpu(ilb);
|
|
resched_cpu(ilb);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -3765,9 +4459,9 @@ void scheduler_tick(void)
|
|
rq->clock_underflows++;
|
|
rq->clock_underflows++;
|
|
}
|
|
}
|
|
rq->tick_timestamp = rq->clock;
|
|
rq->tick_timestamp = rq->clock;
|
|
|
|
+ update_last_tick_seen(rq);
|
|
update_cpu_load(rq);
|
|
update_cpu_load(rq);
|
|
curr->sched_class->task_tick(rq, curr, 0);
|
|
curr->sched_class->task_tick(rq, curr, 0);
|
|
- update_sched_rt_period(rq);
|
|
|
|
spin_unlock(&rq->lock);
|
|
spin_unlock(&rq->lock);
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
@@ -4367,10 +5061,8 @@ void set_user_nice(struct task_struct *p, long nice)
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
on_rq = p->se.on_rq;
|
|
on_rq = p->se.on_rq;
|
|
- if (on_rq) {
|
|
|
|
|
|
+ if (on_rq)
|
|
dequeue_task(rq, p, 0);
|
|
dequeue_task(rq, p, 0);
|
|
- dec_load(rq, p);
|
|
|
|
- }
|
|
|
|
|
|
|
|
p->static_prio = NICE_TO_PRIO(nice);
|
|
p->static_prio = NICE_TO_PRIO(nice);
|
|
set_load_weight(p);
|
|
set_load_weight(p);
|
|
@@ -4380,7 +5072,6 @@ void set_user_nice(struct task_struct *p, long nice)
|
|
|
|
|
|
if (on_rq) {
|
|
if (on_rq) {
|
|
enqueue_task(rq, p, 0);
|
|
enqueue_task(rq, p, 0);
|
|
- inc_load(rq, p);
|
|
|
|
/*
|
|
/*
|
|
* If the task increased its priority or is running and
|
|
* If the task increased its priority or is running and
|
|
* lowered its priority, then reschedule its CPU:
|
|
* lowered its priority, then reschedule its CPU:
|
|
@@ -4602,7 +5293,7 @@ recheck:
|
|
* Do not allow realtime tasks into groups that have no runtime
|
|
* Do not allow realtime tasks into groups that have no runtime
|
|
* assigned.
|
|
* assigned.
|
|
*/
|
|
*/
|
|
- if (rt_policy(policy) && task_group(p)->rt_runtime == 0)
|
|
|
|
|
|
+ if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
|
|
return -EPERM;
|
|
return -EPERM;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -4764,9 +5455,10 @@ out_unlock:
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|
|
|
|
|
|
-long sched_setaffinity(pid_t pid, cpumask_t new_mask)
|
|
|
|
|
|
+long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
|
|
{
|
|
{
|
|
cpumask_t cpus_allowed;
|
|
cpumask_t cpus_allowed;
|
|
|
|
+ cpumask_t new_mask = *in_mask;
|
|
struct task_struct *p;
|
|
struct task_struct *p;
|
|
int retval;
|
|
int retval;
|
|
|
|
|
|
@@ -4797,13 +5489,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
|
|
if (retval)
|
|
if (retval)
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
|
|
|
|
- cpus_allowed = cpuset_cpus_allowed(p);
|
|
|
|
|
|
+ cpuset_cpus_allowed(p, &cpus_allowed);
|
|
cpus_and(new_mask, new_mask, cpus_allowed);
|
|
cpus_and(new_mask, new_mask, cpus_allowed);
|
|
again:
|
|
again:
|
|
- retval = set_cpus_allowed(p, new_mask);
|
|
|
|
|
|
+ retval = set_cpus_allowed_ptr(p, &new_mask);
|
|
|
|
|
|
if (!retval) {
|
|
if (!retval) {
|
|
- cpus_allowed = cpuset_cpus_allowed(p);
|
|
|
|
|
|
+ cpuset_cpus_allowed(p, &cpus_allowed);
|
|
if (!cpus_subset(new_mask, cpus_allowed)) {
|
|
if (!cpus_subset(new_mask, cpus_allowed)) {
|
|
/*
|
|
/*
|
|
* We must have raced with a concurrent cpuset
|
|
* We must have raced with a concurrent cpuset
|
|
@@ -4847,7 +5539,7 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
|
|
if (retval)
|
|
if (retval)
|
|
return retval;
|
|
return retval;
|
|
|
|
|
|
- return sched_setaffinity(pid, new_mask);
|
|
|
|
|
|
+ return sched_setaffinity(pid, &new_mask);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -5309,7 +6001,6 @@ static inline void sched_init_granularity(void)
|
|
sysctl_sched_latency = limit;
|
|
sysctl_sched_latency = limit;
|
|
|
|
|
|
sysctl_sched_wakeup_granularity *= factor;
|
|
sysctl_sched_wakeup_granularity *= factor;
|
|
- sysctl_sched_batch_wakeup_granularity *= factor;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
@@ -5338,7 +6029,7 @@ static inline void sched_init_granularity(void)
|
|
* task must not exit() & deallocate itself prematurely. The
|
|
* task must not exit() & deallocate itself prematurely. The
|
|
* call is not atomic; no spinlocks may be held.
|
|
* call is not atomic; no spinlocks may be held.
|
|
*/
|
|
*/
|
|
-int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
|
|
|
|
|
+int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
|
|
{
|
|
{
|
|
struct migration_req req;
|
|
struct migration_req req;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
@@ -5346,23 +6037,23 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
rq = task_rq_lock(p, &flags);
|
|
- if (!cpus_intersects(new_mask, cpu_online_map)) {
|
|
|
|
|
|
+ if (!cpus_intersects(*new_mask, cpu_online_map)) {
|
|
ret = -EINVAL;
|
|
ret = -EINVAL;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (p->sched_class->set_cpus_allowed)
|
|
if (p->sched_class->set_cpus_allowed)
|
|
- p->sched_class->set_cpus_allowed(p, &new_mask);
|
|
|
|
|
|
+ p->sched_class->set_cpus_allowed(p, new_mask);
|
|
else {
|
|
else {
|
|
- p->cpus_allowed = new_mask;
|
|
|
|
- p->rt.nr_cpus_allowed = cpus_weight(new_mask);
|
|
|
|
|
|
+ p->cpus_allowed = *new_mask;
|
|
|
|
+ p->rt.nr_cpus_allowed = cpus_weight(*new_mask);
|
|
}
|
|
}
|
|
|
|
|
|
/* Can the task run on the task's current CPU? If so, we're done */
|
|
/* Can the task run on the task's current CPU? If so, we're done */
|
|
- if (cpu_isset(task_cpu(p), new_mask))
|
|
|
|
|
|
+ if (cpu_isset(task_cpu(p), *new_mask))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- if (migrate_task(p, any_online_cpu(new_mask), &req)) {
|
|
|
|
|
|
+ if (migrate_task(p, any_online_cpu(*new_mask), &req)) {
|
|
/* Need help from migration thread: drop lock and wait. */
|
|
/* Need help from migration thread: drop lock and wait. */
|
|
task_rq_unlock(rq, &flags);
|
|
task_rq_unlock(rq, &flags);
|
|
wake_up_process(rq->migration_thread);
|
|
wake_up_process(rq->migration_thread);
|
|
@@ -5375,7 +6066,7 @@ out:
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL_GPL(set_cpus_allowed);
|
|
|
|
|
|
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Move (not current) task off this cpu, onto dest cpu. We're doing
|
|
* Move (not current) task off this cpu, onto dest cpu. We're doing
|
|
@@ -5513,12 +6204,14 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
|
dest_cpu = any_online_cpu(mask);
|
|
dest_cpu = any_online_cpu(mask);
|
|
|
|
|
|
/* On any allowed CPU? */
|
|
/* On any allowed CPU? */
|
|
- if (dest_cpu == NR_CPUS)
|
|
|
|
|
|
+ if (dest_cpu >= nr_cpu_ids)
|
|
dest_cpu = any_online_cpu(p->cpus_allowed);
|
|
dest_cpu = any_online_cpu(p->cpus_allowed);
|
|
|
|
|
|
/* No more Mr. Nice Guy. */
|
|
/* No more Mr. Nice Guy. */
|
|
- if (dest_cpu == NR_CPUS) {
|
|
|
|
- cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p);
|
|
|
|
|
|
+ if (dest_cpu >= nr_cpu_ids) {
|
|
|
|
+ cpumask_t cpus_allowed;
|
|
|
|
+
|
|
|
|
+ cpuset_cpus_allowed_locked(p, &cpus_allowed);
|
|
/*
|
|
/*
|
|
* Try to stay on the same cpuset, where the
|
|
* Try to stay on the same cpuset, where the
|
|
* current cpuset may be a subset of all cpus.
|
|
* current cpuset may be a subset of all cpus.
|
|
@@ -5554,7 +6247,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
|
*/
|
|
*/
|
|
static void migrate_nr_uninterruptible(struct rq *rq_src)
|
|
static void migrate_nr_uninterruptible(struct rq *rq_src)
|
|
{
|
|
{
|
|
- struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
|
|
|
|
|
|
+ struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR));
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
@@ -5966,20 +6659,16 @@ void __init migration_init(void)
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
-/* Number of possible processor ids */
|
|
|
|
-int nr_cpu_ids __read_mostly = NR_CPUS;
|
|
|
|
-EXPORT_SYMBOL(nr_cpu_ids);
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
|
|
|
|
-static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
|
|
|
|
|
|
+static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
|
|
|
+ cpumask_t *groupmask)
|
|
{
|
|
{
|
|
struct sched_group *group = sd->groups;
|
|
struct sched_group *group = sd->groups;
|
|
- cpumask_t groupmask;
|
|
|
|
- char str[NR_CPUS];
|
|
|
|
|
|
+ char str[256];
|
|
|
|
|
|
- cpumask_scnprintf(str, NR_CPUS, sd->span);
|
|
|
|
- cpus_clear(groupmask);
|
|
|
|
|
|
+ cpulist_scnprintf(str, sizeof(str), sd->span);
|
|
|
|
+ cpus_clear(*groupmask);
|
|
|
|
|
|
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
|
|
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
|
|
|
|
|
|
@@ -6023,25 +6712,25 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- if (cpus_intersects(groupmask, group->cpumask)) {
|
|
|
|
|
|
+ if (cpus_intersects(*groupmask, group->cpumask)) {
|
|
printk(KERN_CONT "\n");
|
|
printk(KERN_CONT "\n");
|
|
printk(KERN_ERR "ERROR: repeated CPUs\n");
|
|
printk(KERN_ERR "ERROR: repeated CPUs\n");
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- cpus_or(groupmask, groupmask, group->cpumask);
|
|
|
|
|
|
+ cpus_or(*groupmask, *groupmask, group->cpumask);
|
|
|
|
|
|
- cpumask_scnprintf(str, NR_CPUS, group->cpumask);
|
|
|
|
|
|
+ cpulist_scnprintf(str, sizeof(str), group->cpumask);
|
|
printk(KERN_CONT " %s", str);
|
|
printk(KERN_CONT " %s", str);
|
|
|
|
|
|
group = group->next;
|
|
group = group->next;
|
|
} while (group != sd->groups);
|
|
} while (group != sd->groups);
|
|
printk(KERN_CONT "\n");
|
|
printk(KERN_CONT "\n");
|
|
|
|
|
|
- if (!cpus_equal(sd->span, groupmask))
|
|
|
|
|
|
+ if (!cpus_equal(sd->span, *groupmask))
|
|
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
|
|
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
|
|
|
|
|
|
- if (sd->parent && !cpus_subset(groupmask, sd->parent->span))
|
|
|
|
|
|
+ if (sd->parent && !cpus_subset(*groupmask, sd->parent->span))
|
|
printk(KERN_ERR "ERROR: parent span is not a superset "
|
|
printk(KERN_ERR "ERROR: parent span is not a superset "
|
|
"of domain->span\n");
|
|
"of domain->span\n");
|
|
return 0;
|
|
return 0;
|
|
@@ -6049,6 +6738,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
|
|
|
|
|
|
static void sched_domain_debug(struct sched_domain *sd, int cpu)
|
|
static void sched_domain_debug(struct sched_domain *sd, int cpu)
|
|
{
|
|
{
|
|
|
|
+ cpumask_t *groupmask;
|
|
int level = 0;
|
|
int level = 0;
|
|
|
|
|
|
if (!sd) {
|
|
if (!sd) {
|
|
@@ -6058,14 +6748,21 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
|
|
|
|
|
|
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
|
|
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
|
|
|
|
|
|
|
|
+ groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
|
|
|
|
+ if (!groupmask) {
|
|
|
|
+ printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
for (;;) {
|
|
for (;;) {
|
|
- if (sched_domain_debug_one(sd, cpu, level))
|
|
|
|
|
|
+ if (sched_domain_debug_one(sd, cpu, level, groupmask))
|
|
break;
|
|
break;
|
|
level++;
|
|
level++;
|
|
sd = sd->parent;
|
|
sd = sd->parent;
|
|
if (!sd)
|
|
if (!sd)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
+ kfree(groupmask);
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
# define sched_domain_debug(sd, cpu) do { } while (0)
|
|
# define sched_domain_debug(sd, cpu) do { } while (0)
|
|
@@ -6253,30 +6950,33 @@ __setup("isolcpus=", isolated_cpu_setup);
|
|
* and ->cpu_power to 0.
|
|
* and ->cpu_power to 0.
|
|
*/
|
|
*/
|
|
static void
|
|
static void
|
|
-init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
|
|
|
|
|
|
+init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
|
|
int (*group_fn)(int cpu, const cpumask_t *cpu_map,
|
|
int (*group_fn)(int cpu, const cpumask_t *cpu_map,
|
|
- struct sched_group **sg))
|
|
|
|
|
|
+ struct sched_group **sg,
|
|
|
|
+ cpumask_t *tmpmask),
|
|
|
|
+ cpumask_t *covered, cpumask_t *tmpmask)
|
|
{
|
|
{
|
|
struct sched_group *first = NULL, *last = NULL;
|
|
struct sched_group *first = NULL, *last = NULL;
|
|
- cpumask_t covered = CPU_MASK_NONE;
|
|
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- for_each_cpu_mask(i, span) {
|
|
|
|
|
|
+ cpus_clear(*covered);
|
|
|
|
+
|
|
|
|
+ for_each_cpu_mask(i, *span) {
|
|
struct sched_group *sg;
|
|
struct sched_group *sg;
|
|
- int group = group_fn(i, cpu_map, &sg);
|
|
|
|
|
|
+ int group = group_fn(i, cpu_map, &sg, tmpmask);
|
|
int j;
|
|
int j;
|
|
|
|
|
|
- if (cpu_isset(i, covered))
|
|
|
|
|
|
+ if (cpu_isset(i, *covered))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- sg->cpumask = CPU_MASK_NONE;
|
|
|
|
|
|
+ cpus_clear(sg->cpumask);
|
|
sg->__cpu_power = 0;
|
|
sg->__cpu_power = 0;
|
|
|
|
|
|
- for_each_cpu_mask(j, span) {
|
|
|
|
- if (group_fn(j, cpu_map, NULL) != group)
|
|
|
|
|
|
+ for_each_cpu_mask(j, *span) {
|
|
|
|
+ if (group_fn(j, cpu_map, NULL, tmpmask) != group)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- cpu_set(j, covered);
|
|
|
|
|
|
+ cpu_set(j, *covered);
|
|
cpu_set(j, sg->cpumask);
|
|
cpu_set(j, sg->cpumask);
|
|
}
|
|
}
|
|
if (!first)
|
|
if (!first)
|
|
@@ -6302,7 +7002,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
|
|
*
|
|
*
|
|
* Should use nodemask_t.
|
|
* Should use nodemask_t.
|
|
*/
|
|
*/
|
|
-static int find_next_best_node(int node, unsigned long *used_nodes)
|
|
|
|
|
|
+static int find_next_best_node(int node, nodemask_t *used_nodes)
|
|
{
|
|
{
|
|
int i, n, val, min_val, best_node = 0;
|
|
int i, n, val, min_val, best_node = 0;
|
|
|
|
|
|
@@ -6316,7 +7016,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
/* Skip already used nodes */
|
|
/* Skip already used nodes */
|
|
- if (test_bit(n, used_nodes))
|
|
|
|
|
|
+ if (node_isset(n, *used_nodes))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
/* Simple min distance search */
|
|
/* Simple min distance search */
|
|
@@ -6328,40 +7028,36 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- set_bit(best_node, used_nodes);
|
|
|
|
|
|
+ node_set(best_node, *used_nodes);
|
|
return best_node;
|
|
return best_node;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
* sched_domain_node_span - get a cpumask for a node's sched_domain
|
|
* sched_domain_node_span - get a cpumask for a node's sched_domain
|
|
* @node: node whose cpumask we're constructing
|
|
* @node: node whose cpumask we're constructing
|
|
- * @size: number of nodes to include in this span
|
|
|
|
*
|
|
*
|
|
* Given a node, construct a good cpumask for its sched_domain to span. It
|
|
* Given a node, construct a good cpumask for its sched_domain to span. It
|
|
* should be one that prevents unnecessary balancing, but also spreads tasks
|
|
* should be one that prevents unnecessary balancing, but also spreads tasks
|
|
* out optimally.
|
|
* out optimally.
|
|
*/
|
|
*/
|
|
-static cpumask_t sched_domain_node_span(int node)
|
|
|
|
|
|
+static void sched_domain_node_span(int node, cpumask_t *span)
|
|
{
|
|
{
|
|
- DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
|
|
|
|
- cpumask_t span, nodemask;
|
|
|
|
|
|
+ nodemask_t used_nodes;
|
|
|
|
+ node_to_cpumask_ptr(nodemask, node);
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- cpus_clear(span);
|
|
|
|
- bitmap_zero(used_nodes, MAX_NUMNODES);
|
|
|
|
|
|
+ cpus_clear(*span);
|
|
|
|
+ nodes_clear(used_nodes);
|
|
|
|
|
|
- nodemask = node_to_cpumask(node);
|
|
|
|
- cpus_or(span, span, nodemask);
|
|
|
|
- set_bit(node, used_nodes);
|
|
|
|
|
|
+ cpus_or(*span, *span, *nodemask);
|
|
|
|
+ node_set(node, used_nodes);
|
|
|
|
|
|
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
|
|
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
|
|
- int next_node = find_next_best_node(node, used_nodes);
|
|
|
|
|
|
+ int next_node = find_next_best_node(node, &used_nodes);
|
|
|
|
|
|
- nodemask = node_to_cpumask(next_node);
|
|
|
|
- cpus_or(span, span, nodemask);
|
|
|
|
|
|
+ node_to_cpumask_ptr_next(nodemask, next_node);
|
|
|
|
+ cpus_or(*span, *span, *nodemask);
|
|
}
|
|
}
|
|
-
|
|
|
|
- return span;
|
|
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -6375,7 +7071,8 @@ static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
|
|
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
|
|
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
|
|
|
|
|
|
static int
|
|
static int
|
|
-cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
|
|
|
|
|
+cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
|
|
|
|
+ cpumask_t *unused)
|
|
{
|
|
{
|
|
if (sg)
|
|
if (sg)
|
|
*sg = &per_cpu(sched_group_cpus, cpu);
|
|
*sg = &per_cpu(sched_group_cpus, cpu);
|
|
@@ -6393,19 +7090,22 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
|
|
|
|
|
|
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
|
|
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
|
|
static int
|
|
static int
|
|
-cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
|
|
|
|
|
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
|
|
|
|
+ cpumask_t *mask)
|
|
{
|
|
{
|
|
int group;
|
|
int group;
|
|
- cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
|
|
|
|
- cpus_and(mask, mask, *cpu_map);
|
|
|
|
- group = first_cpu(mask);
|
|
|
|
|
|
+
|
|
|
|
+ *mask = per_cpu(cpu_sibling_map, cpu);
|
|
|
|
+ cpus_and(*mask, *mask, *cpu_map);
|
|
|
|
+ group = first_cpu(*mask);
|
|
if (sg)
|
|
if (sg)
|
|
*sg = &per_cpu(sched_group_core, group);
|
|
*sg = &per_cpu(sched_group_core, group);
|
|
return group;
|
|
return group;
|
|
}
|
|
}
|
|
#elif defined(CONFIG_SCHED_MC)
|
|
#elif defined(CONFIG_SCHED_MC)
|
|
static int
|
|
static int
|
|
-cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
|
|
|
|
|
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
|
|
|
|
+ cpumask_t *unused)
|
|
{
|
|
{
|
|
if (sg)
|
|
if (sg)
|
|
*sg = &per_cpu(sched_group_core, cpu);
|
|
*sg = &per_cpu(sched_group_core, cpu);
|
|
@@ -6417,17 +7117,18 @@ static DEFINE_PER_CPU(struct sched_domain, phys_domains);
|
|
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
|
|
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
|
|
|
|
|
|
static int
|
|
static int
|
|
-cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
|
|
|
|
|
+cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
|
|
|
|
+ cpumask_t *mask)
|
|
{
|
|
{
|
|
int group;
|
|
int group;
|
|
#ifdef CONFIG_SCHED_MC
|
|
#ifdef CONFIG_SCHED_MC
|
|
- cpumask_t mask = cpu_coregroup_map(cpu);
|
|
|
|
- cpus_and(mask, mask, *cpu_map);
|
|
|
|
- group = first_cpu(mask);
|
|
|
|
|
|
+ *mask = cpu_coregroup_map(cpu);
|
|
|
|
+ cpus_and(*mask, *mask, *cpu_map);
|
|
|
|
+ group = first_cpu(*mask);
|
|
#elif defined(CONFIG_SCHED_SMT)
|
|
#elif defined(CONFIG_SCHED_SMT)
|
|
- cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
|
|
|
|
- cpus_and(mask, mask, *cpu_map);
|
|
|
|
- group = first_cpu(mask);
|
|
|
|
|
|
+ *mask = per_cpu(cpu_sibling_map, cpu);
|
|
|
|
+ cpus_and(*mask, *mask, *cpu_map);
|
|
|
|
+ group = first_cpu(*mask);
|
|
#else
|
|
#else
|
|
group = cpu;
|
|
group = cpu;
|
|
#endif
|
|
#endif
|
|
@@ -6443,19 +7144,19 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
|
* gets dynamically allocated.
|
|
* gets dynamically allocated.
|
|
*/
|
|
*/
|
|
static DEFINE_PER_CPU(struct sched_domain, node_domains);
|
|
static DEFINE_PER_CPU(struct sched_domain, node_domains);
|
|
-static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
|
|
|
|
|
|
+static struct sched_group ***sched_group_nodes_bycpu;
|
|
|
|
|
|
static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
|
|
static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
|
|
static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
|
|
static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
|
|
|
|
|
|
static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
|
|
static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
|
|
- struct sched_group **sg)
|
|
|
|
|
|
+ struct sched_group **sg, cpumask_t *nodemask)
|
|
{
|
|
{
|
|
- cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
|
|
|
|
int group;
|
|
int group;
|
|
|
|
|
|
- cpus_and(nodemask, nodemask, *cpu_map);
|
|
|
|
- group = first_cpu(nodemask);
|
|
|
|
|
|
+ *nodemask = node_to_cpumask(cpu_to_node(cpu));
|
|
|
|
+ cpus_and(*nodemask, *nodemask, *cpu_map);
|
|
|
|
+ group = first_cpu(*nodemask);
|
|
|
|
|
|
if (sg)
|
|
if (sg)
|
|
*sg = &per_cpu(sched_group_allnodes, group);
|
|
*sg = &per_cpu(sched_group_allnodes, group);
|
|
@@ -6491,7 +7192,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
/* Free memory allocated for various sched_group structures */
|
|
/* Free memory allocated for various sched_group structures */
|
|
-static void free_sched_groups(const cpumask_t *cpu_map)
|
|
|
|
|
|
+static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
|
|
{
|
|
{
|
|
int cpu, i;
|
|
int cpu, i;
|
|
|
|
|
|
@@ -6503,11 +7204,11 @@ static void free_sched_groups(const cpumask_t *cpu_map)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
for (i = 0; i < MAX_NUMNODES; i++) {
|
|
for (i = 0; i < MAX_NUMNODES; i++) {
|
|
- cpumask_t nodemask = node_to_cpumask(i);
|
|
|
|
struct sched_group *oldsg, *sg = sched_group_nodes[i];
|
|
struct sched_group *oldsg, *sg = sched_group_nodes[i];
|
|
|
|
|
|
- cpus_and(nodemask, nodemask, *cpu_map);
|
|
|
|
- if (cpus_empty(nodemask))
|
|
|
|
|
|
+ *nodemask = node_to_cpumask(i);
|
|
|
|
+ cpus_and(*nodemask, *nodemask, *cpu_map);
|
|
|
|
+ if (cpus_empty(*nodemask))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
if (sg == NULL)
|
|
if (sg == NULL)
|
|
@@ -6525,7 +7226,7 @@ next_sg:
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
-static void free_sched_groups(const cpumask_t *cpu_map)
|
|
|
|
|
|
+static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
@@ -6572,24 +7273,117 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * add cpu_power of each child group to this groups cpu_power
|
|
|
|
- */
|
|
|
|
- group = child->groups;
|
|
|
|
- do {
|
|
|
|
- sg_inc_cpu_power(sd->groups, group->__cpu_power);
|
|
|
|
- group = group->next;
|
|
|
|
- } while (group != child->groups);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * add cpu_power of each child group to this groups cpu_power
|
|
|
|
+ */
|
|
|
|
+ group = child->groups;
|
|
|
|
+ do {
|
|
|
|
+ sg_inc_cpu_power(sd->groups, group->__cpu_power);
|
|
|
|
+ group = group->next;
|
|
|
|
+ } while (group != child->groups);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Initializers for schedule domains
|
|
|
|
+ * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+#define SD_INIT(sd, type) sd_init_##type(sd)
|
|
|
|
+#define SD_INIT_FUNC(type) \
|
|
|
|
+static noinline void sd_init_##type(struct sched_domain *sd) \
|
|
|
|
+{ \
|
|
|
|
+ memset(sd, 0, sizeof(*sd)); \
|
|
|
|
+ *sd = SD_##type##_INIT; \
|
|
|
|
+ sd->level = SD_LV_##type; \
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+SD_INIT_FUNC(CPU)
|
|
|
|
+#ifdef CONFIG_NUMA
|
|
|
|
+ SD_INIT_FUNC(ALLNODES)
|
|
|
|
+ SD_INIT_FUNC(NODE)
|
|
|
|
+#endif
|
|
|
|
+#ifdef CONFIG_SCHED_SMT
|
|
|
|
+ SD_INIT_FUNC(SIBLING)
|
|
|
|
+#endif
|
|
|
|
+#ifdef CONFIG_SCHED_MC
|
|
|
|
+ SD_INIT_FUNC(MC)
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * To minimize stack usage kmalloc room for cpumasks and share the
|
|
|
|
+ * space as the usage in build_sched_domains() dictates. Used only
|
|
|
|
+ * if the amount of space is significant.
|
|
|
|
+ */
|
|
|
|
+struct allmasks {
|
|
|
|
+ cpumask_t tmpmask; /* make this one first */
|
|
|
|
+ union {
|
|
|
|
+ cpumask_t nodemask;
|
|
|
|
+ cpumask_t this_sibling_map;
|
|
|
|
+ cpumask_t this_core_map;
|
|
|
|
+ };
|
|
|
|
+ cpumask_t send_covered;
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_NUMA
|
|
|
|
+ cpumask_t domainspan;
|
|
|
|
+ cpumask_t covered;
|
|
|
|
+ cpumask_t notcovered;
|
|
|
|
+#endif
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+#if NR_CPUS > 128
|
|
|
|
+#define SCHED_CPUMASK_ALLOC 1
|
|
|
|
+#define SCHED_CPUMASK_FREE(v) kfree(v)
|
|
|
|
+#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v
|
|
|
|
+#else
|
|
|
|
+#define SCHED_CPUMASK_ALLOC 0
|
|
|
|
+#define SCHED_CPUMASK_FREE(v)
|
|
|
|
+#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
|
|
|
|
+ ((unsigned long)(a) + offsetof(struct allmasks, v))
|
|
|
|
+
|
|
|
|
+static int default_relax_domain_level = -1;
|
|
|
|
+
|
|
|
|
+static int __init setup_relax_domain_level(char *str)
|
|
|
|
+{
|
|
|
|
+ default_relax_domain_level = simple_strtoul(str, NULL, 0);
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+__setup("relax_domain_level=", setup_relax_domain_level);
|
|
|
|
+
|
|
|
|
+static void set_domain_attribute(struct sched_domain *sd,
|
|
|
|
+ struct sched_domain_attr *attr)
|
|
|
|
+{
|
|
|
|
+ int request;
|
|
|
|
+
|
|
|
|
+ if (!attr || attr->relax_domain_level < 0) {
|
|
|
|
+ if (default_relax_domain_level < 0)
|
|
|
|
+ return;
|
|
|
|
+ else
|
|
|
|
+ request = default_relax_domain_level;
|
|
|
|
+ } else
|
|
|
|
+ request = attr->relax_domain_level;
|
|
|
|
+ if (request < sd->level) {
|
|
|
|
+ /* turn off idle balance on this domain */
|
|
|
|
+ sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
|
|
|
|
+ } else {
|
|
|
|
+ /* turn on idle balance on this domain */
|
|
|
|
+ sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
* Build sched domains for a given set of cpus and attach the sched domains
|
|
* Build sched domains for a given set of cpus and attach the sched domains
|
|
* to the individual cpus
|
|
* to the individual cpus
|
|
*/
|
|
*/
|
|
-static int build_sched_domains(const cpumask_t *cpu_map)
|
|
|
|
|
|
+static int __build_sched_domains(const cpumask_t *cpu_map,
|
|
|
|
+ struct sched_domain_attr *attr)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
struct root_domain *rd;
|
|
struct root_domain *rd;
|
|
|
|
+ SCHED_CPUMASK_DECLARE(allmasks);
|
|
|
|
+ cpumask_t *tmpmask;
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
struct sched_group **sched_group_nodes = NULL;
|
|
struct sched_group **sched_group_nodes = NULL;
|
|
int sd_allnodes = 0;
|
|
int sd_allnodes = 0;
|
|
@@ -6603,39 +7397,65 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
|
printk(KERN_WARNING "Can not alloc sched group node list\n");
|
|
printk(KERN_WARNING "Can not alloc sched group node list\n");
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
- sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
rd = alloc_rootdomain();
|
|
rd = alloc_rootdomain();
|
|
if (!rd) {
|
|
if (!rd) {
|
|
printk(KERN_WARNING "Cannot alloc root domain\n");
|
|
printk(KERN_WARNING "Cannot alloc root domain\n");
|
|
|
|
+#ifdef CONFIG_NUMA
|
|
|
|
+ kfree(sched_group_nodes);
|
|
|
|
+#endif
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+#if SCHED_CPUMASK_ALLOC
|
|
|
|
+ /* get space for all scratch cpumask variables */
|
|
|
|
+ allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL);
|
|
|
|
+ if (!allmasks) {
|
|
|
|
+ printk(KERN_WARNING "Cannot alloc cpumask array\n");
|
|
|
|
+ kfree(rd);
|
|
|
|
+#ifdef CONFIG_NUMA
|
|
|
|
+ kfree(sched_group_nodes);
|
|
|
|
+#endif
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
|
|
+ tmpmask = (cpumask_t *)allmasks;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_NUMA
|
|
|
|
+ sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
|
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
/*
|
|
* Set up domains for cpus specified by the cpu_map.
|
|
* Set up domains for cpus specified by the cpu_map.
|
|
*/
|
|
*/
|
|
for_each_cpu_mask(i, *cpu_map) {
|
|
for_each_cpu_mask(i, *cpu_map) {
|
|
struct sched_domain *sd = NULL, *p;
|
|
struct sched_domain *sd = NULL, *p;
|
|
- cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
|
|
|
|
|
|
+ SCHED_CPUMASK_VAR(nodemask, allmasks);
|
|
|
|
|
|
- cpus_and(nodemask, nodemask, *cpu_map);
|
|
|
|
|
|
+ *nodemask = node_to_cpumask(cpu_to_node(i));
|
|
|
|
+ cpus_and(*nodemask, *nodemask, *cpu_map);
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
if (cpus_weight(*cpu_map) >
|
|
if (cpus_weight(*cpu_map) >
|
|
- SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
|
|
|
|
|
|
+ SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) {
|
|
sd = &per_cpu(allnodes_domains, i);
|
|
sd = &per_cpu(allnodes_domains, i);
|
|
- *sd = SD_ALLNODES_INIT;
|
|
|
|
|
|
+ SD_INIT(sd, ALLNODES);
|
|
|
|
+ set_domain_attribute(sd, attr);
|
|
sd->span = *cpu_map;
|
|
sd->span = *cpu_map;
|
|
- cpu_to_allnodes_group(i, cpu_map, &sd->groups);
|
|
|
|
|
|
+ sd->first_cpu = first_cpu(sd->span);
|
|
|
|
+ cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
|
|
p = sd;
|
|
p = sd;
|
|
sd_allnodes = 1;
|
|
sd_allnodes = 1;
|
|
} else
|
|
} else
|
|
p = NULL;
|
|
p = NULL;
|
|
|
|
|
|
sd = &per_cpu(node_domains, i);
|
|
sd = &per_cpu(node_domains, i);
|
|
- *sd = SD_NODE_INIT;
|
|
|
|
- sd->span = sched_domain_node_span(cpu_to_node(i));
|
|
|
|
|
|
+ SD_INIT(sd, NODE);
|
|
|
|
+ set_domain_attribute(sd, attr);
|
|
|
|
+ sched_domain_node_span(cpu_to_node(i), &sd->span);
|
|
|
|
+ sd->first_cpu = first_cpu(sd->span);
|
|
sd->parent = p;
|
|
sd->parent = p;
|
|
if (p)
|
|
if (p)
|
|
p->child = sd;
|
|
p->child = sd;
|
|
@@ -6644,94 +7464,120 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
|
|
|
|
|
p = sd;
|
|
p = sd;
|
|
sd = &per_cpu(phys_domains, i);
|
|
sd = &per_cpu(phys_domains, i);
|
|
- *sd = SD_CPU_INIT;
|
|
|
|
- sd->span = nodemask;
|
|
|
|
|
|
+ SD_INIT(sd, CPU);
|
|
|
|
+ set_domain_attribute(sd, attr);
|
|
|
|
+ sd->span = *nodemask;
|
|
|
|
+ sd->first_cpu = first_cpu(sd->span);
|
|
sd->parent = p;
|
|
sd->parent = p;
|
|
if (p)
|
|
if (p)
|
|
p->child = sd;
|
|
p->child = sd;
|
|
- cpu_to_phys_group(i, cpu_map, &sd->groups);
|
|
|
|
|
|
+ cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
|
|
|
|
|
|
#ifdef CONFIG_SCHED_MC
|
|
#ifdef CONFIG_SCHED_MC
|
|
p = sd;
|
|
p = sd;
|
|
sd = &per_cpu(core_domains, i);
|
|
sd = &per_cpu(core_domains, i);
|
|
- *sd = SD_MC_INIT;
|
|
|
|
|
|
+ SD_INIT(sd, MC);
|
|
|
|
+ set_domain_attribute(sd, attr);
|
|
sd->span = cpu_coregroup_map(i);
|
|
sd->span = cpu_coregroup_map(i);
|
|
|
|
+ sd->first_cpu = first_cpu(sd->span);
|
|
cpus_and(sd->span, sd->span, *cpu_map);
|
|
cpus_and(sd->span, sd->span, *cpu_map);
|
|
sd->parent = p;
|
|
sd->parent = p;
|
|
p->child = sd;
|
|
p->child = sd;
|
|
- cpu_to_core_group(i, cpu_map, &sd->groups);
|
|
|
|
|
|
+ cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
#ifdef CONFIG_SCHED_SMT
|
|
p = sd;
|
|
p = sd;
|
|
sd = &per_cpu(cpu_domains, i);
|
|
sd = &per_cpu(cpu_domains, i);
|
|
- *sd = SD_SIBLING_INIT;
|
|
|
|
|
|
+ SD_INIT(sd, SIBLING);
|
|
|
|
+ set_domain_attribute(sd, attr);
|
|
sd->span = per_cpu(cpu_sibling_map, i);
|
|
sd->span = per_cpu(cpu_sibling_map, i);
|
|
|
|
+ sd->first_cpu = first_cpu(sd->span);
|
|
cpus_and(sd->span, sd->span, *cpu_map);
|
|
cpus_and(sd->span, sd->span, *cpu_map);
|
|
sd->parent = p;
|
|
sd->parent = p;
|
|
p->child = sd;
|
|
p->child = sd;
|
|
- cpu_to_cpu_group(i, cpu_map, &sd->groups);
|
|
|
|
|
|
+ cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
#ifdef CONFIG_SCHED_SMT
|
|
/* Set up CPU (sibling) groups */
|
|
/* Set up CPU (sibling) groups */
|
|
for_each_cpu_mask(i, *cpu_map) {
|
|
for_each_cpu_mask(i, *cpu_map) {
|
|
- cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
|
|
|
|
- cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
|
|
|
|
- if (i != first_cpu(this_sibling_map))
|
|
|
|
|
|
+ SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
|
|
|
|
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
|
|
|
|
+
|
|
|
|
+ *this_sibling_map = per_cpu(cpu_sibling_map, i);
|
|
|
|
+ cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map);
|
|
|
|
+ if (i != first_cpu(*this_sibling_map))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
init_sched_build_groups(this_sibling_map, cpu_map,
|
|
init_sched_build_groups(this_sibling_map, cpu_map,
|
|
- &cpu_to_cpu_group);
|
|
|
|
|
|
+ &cpu_to_cpu_group,
|
|
|
|
+ send_covered, tmpmask);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_SCHED_MC
|
|
#ifdef CONFIG_SCHED_MC
|
|
/* Set up multi-core groups */
|
|
/* Set up multi-core groups */
|
|
for_each_cpu_mask(i, *cpu_map) {
|
|
for_each_cpu_mask(i, *cpu_map) {
|
|
- cpumask_t this_core_map = cpu_coregroup_map(i);
|
|
|
|
- cpus_and(this_core_map, this_core_map, *cpu_map);
|
|
|
|
- if (i != first_cpu(this_core_map))
|
|
|
|
|
|
+ SCHED_CPUMASK_VAR(this_core_map, allmasks);
|
|
|
|
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
|
|
|
|
+
|
|
|
|
+ *this_core_map = cpu_coregroup_map(i);
|
|
|
|
+ cpus_and(*this_core_map, *this_core_map, *cpu_map);
|
|
|
|
+ if (i != first_cpu(*this_core_map))
|
|
continue;
|
|
continue;
|
|
|
|
+
|
|
init_sched_build_groups(this_core_map, cpu_map,
|
|
init_sched_build_groups(this_core_map, cpu_map,
|
|
- &cpu_to_core_group);
|
|
|
|
|
|
+ &cpu_to_core_group,
|
|
|
|
+ send_covered, tmpmask);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/* Set up physical groups */
|
|
/* Set up physical groups */
|
|
for (i = 0; i < MAX_NUMNODES; i++) {
|
|
for (i = 0; i < MAX_NUMNODES; i++) {
|
|
- cpumask_t nodemask = node_to_cpumask(i);
|
|
|
|
|
|
+ SCHED_CPUMASK_VAR(nodemask, allmasks);
|
|
|
|
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
|
|
|
|
|
|
- cpus_and(nodemask, nodemask, *cpu_map);
|
|
|
|
- if (cpus_empty(nodemask))
|
|
|
|
|
|
+ *nodemask = node_to_cpumask(i);
|
|
|
|
+ cpus_and(*nodemask, *nodemask, *cpu_map);
|
|
|
|
+ if (cpus_empty(*nodemask))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
|
|
|
|
|
|
+ init_sched_build_groups(nodemask, cpu_map,
|
|
|
|
+ &cpu_to_phys_group,
|
|
|
|
+ send_covered, tmpmask);
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
/* Set up node groups */
|
|
/* Set up node groups */
|
|
- if (sd_allnodes)
|
|
|
|
- init_sched_build_groups(*cpu_map, cpu_map,
|
|
|
|
- &cpu_to_allnodes_group);
|
|
|
|
|
|
+ if (sd_allnodes) {
|
|
|
|
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
|
|
|
|
+
|
|
|
|
+ init_sched_build_groups(cpu_map, cpu_map,
|
|
|
|
+ &cpu_to_allnodes_group,
|
|
|
|
+ send_covered, tmpmask);
|
|
|
|
+ }
|
|
|
|
|
|
for (i = 0; i < MAX_NUMNODES; i++) {
|
|
for (i = 0; i < MAX_NUMNODES; i++) {
|
|
/* Set up node groups */
|
|
/* Set up node groups */
|
|
struct sched_group *sg, *prev;
|
|
struct sched_group *sg, *prev;
|
|
- cpumask_t nodemask = node_to_cpumask(i);
|
|
|
|
- cpumask_t domainspan;
|
|
|
|
- cpumask_t covered = CPU_MASK_NONE;
|
|
|
|
|
|
+ SCHED_CPUMASK_VAR(nodemask, allmasks);
|
|
|
|
+ SCHED_CPUMASK_VAR(domainspan, allmasks);
|
|
|
|
+ SCHED_CPUMASK_VAR(covered, allmasks);
|
|
int j;
|
|
int j;
|
|
|
|
|
|
- cpus_and(nodemask, nodemask, *cpu_map);
|
|
|
|
- if (cpus_empty(nodemask)) {
|
|
|
|
|
|
+ *nodemask = node_to_cpumask(i);
|
|
|
|
+ cpus_clear(*covered);
|
|
|
|
+
|
|
|
|
+ cpus_and(*nodemask, *nodemask, *cpu_map);
|
|
|
|
+ if (cpus_empty(*nodemask)) {
|
|
sched_group_nodes[i] = NULL;
|
|
sched_group_nodes[i] = NULL;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
- domainspan = sched_domain_node_span(i);
|
|
|
|
- cpus_and(domainspan, domainspan, *cpu_map);
|
|
|
|
|
|
+ sched_domain_node_span(i, domainspan);
|
|
|
|
+ cpus_and(*domainspan, *domainspan, *cpu_map);
|
|
|
|
|
|
sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
|
|
sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
|
|
if (!sg) {
|
|
if (!sg) {
|
|
@@ -6740,31 +7586,31 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
|
goto error;
|
|
goto error;
|
|
}
|
|
}
|
|
sched_group_nodes[i] = sg;
|
|
sched_group_nodes[i] = sg;
|
|
- for_each_cpu_mask(j, nodemask) {
|
|
|
|
|
|
+ for_each_cpu_mask(j, *nodemask) {
|
|
struct sched_domain *sd;
|
|
struct sched_domain *sd;
|
|
|
|
|
|
sd = &per_cpu(node_domains, j);
|
|
sd = &per_cpu(node_domains, j);
|
|
sd->groups = sg;
|
|
sd->groups = sg;
|
|
}
|
|
}
|
|
sg->__cpu_power = 0;
|
|
sg->__cpu_power = 0;
|
|
- sg->cpumask = nodemask;
|
|
|
|
|
|
+ sg->cpumask = *nodemask;
|
|
sg->next = sg;
|
|
sg->next = sg;
|
|
- cpus_or(covered, covered, nodemask);
|
|
|
|
|
|
+ cpus_or(*covered, *covered, *nodemask);
|
|
prev = sg;
|
|
prev = sg;
|
|
|
|
|
|
for (j = 0; j < MAX_NUMNODES; j++) {
|
|
for (j = 0; j < MAX_NUMNODES; j++) {
|
|
- cpumask_t tmp, notcovered;
|
|
|
|
|
|
+ SCHED_CPUMASK_VAR(notcovered, allmasks);
|
|
int n = (i + j) % MAX_NUMNODES;
|
|
int n = (i + j) % MAX_NUMNODES;
|
|
|
|
+ node_to_cpumask_ptr(pnodemask, n);
|
|
|
|
|
|
- cpus_complement(notcovered, covered);
|
|
|
|
- cpus_and(tmp, notcovered, *cpu_map);
|
|
|
|
- cpus_and(tmp, tmp, domainspan);
|
|
|
|
- if (cpus_empty(tmp))
|
|
|
|
|
|
+ cpus_complement(*notcovered, *covered);
|
|
|
|
+ cpus_and(*tmpmask, *notcovered, *cpu_map);
|
|
|
|
+ cpus_and(*tmpmask, *tmpmask, *domainspan);
|
|
|
|
+ if (cpus_empty(*tmpmask))
|
|
break;
|
|
break;
|
|
|
|
|
|
- nodemask = node_to_cpumask(n);
|
|
|
|
- cpus_and(tmp, tmp, nodemask);
|
|
|
|
- if (cpus_empty(tmp))
|
|
|
|
|
|
+ cpus_and(*tmpmask, *tmpmask, *pnodemask);
|
|
|
|
+ if (cpus_empty(*tmpmask))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
sg = kmalloc_node(sizeof(struct sched_group),
|
|
sg = kmalloc_node(sizeof(struct sched_group),
|
|
@@ -6775,9 +7621,9 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
|
goto error;
|
|
goto error;
|
|
}
|
|
}
|
|
sg->__cpu_power = 0;
|
|
sg->__cpu_power = 0;
|
|
- sg->cpumask = tmp;
|
|
|
|
|
|
+ sg->cpumask = *tmpmask;
|
|
sg->next = prev->next;
|
|
sg->next = prev->next;
|
|
- cpus_or(covered, covered, tmp);
|
|
|
|
|
|
+ cpus_or(*covered, *covered, *tmpmask);
|
|
prev->next = sg;
|
|
prev->next = sg;
|
|
prev = sg;
|
|
prev = sg;
|
|
}
|
|
}
|
|
@@ -6813,7 +7659,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
|
if (sd_allnodes) {
|
|
if (sd_allnodes) {
|
|
struct sched_group *sg;
|
|
struct sched_group *sg;
|
|
|
|
|
|
- cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
|
|
|
|
|
|
+ cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg,
|
|
|
|
+ tmpmask);
|
|
init_numa_sched_groups_power(sg);
|
|
init_numa_sched_groups_power(sg);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
@@ -6831,17 +7678,26 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
|
cpu_attach_domain(sd, rd, i);
|
|
cpu_attach_domain(sd, rd, i);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ SCHED_CPUMASK_FREE((void *)allmasks);
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
error:
|
|
error:
|
|
- free_sched_groups(cpu_map);
|
|
|
|
|
|
+ free_sched_groups(cpu_map, tmpmask);
|
|
|
|
+ SCHED_CPUMASK_FREE((void *)allmasks);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int build_sched_domains(const cpumask_t *cpu_map)
|
|
|
|
+{
|
|
|
|
+ return __build_sched_domains(cpu_map, NULL);
|
|
|
|
+}
|
|
|
|
+
|
|
static cpumask_t *doms_cur; /* current sched domains */
|
|
static cpumask_t *doms_cur; /* current sched domains */
|
|
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
|
|
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
|
|
|
|
+static struct sched_domain_attr *dattr_cur; /* attribues of custom domains
|
|
|
|
+ in 'doms_cur' */
|
|
|
|
|
|
/*
|
|
/*
|
|
* Special case: If a kmalloc of a doms_cur partition (array of
|
|
* Special case: If a kmalloc of a doms_cur partition (array of
|
|
@@ -6869,15 +7725,17 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
|
|
if (!doms_cur)
|
|
if (!doms_cur)
|
|
doms_cur = &fallback_doms;
|
|
doms_cur = &fallback_doms;
|
|
cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
|
|
cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
|
|
|
|
+ dattr_cur = NULL;
|
|
err = build_sched_domains(doms_cur);
|
|
err = build_sched_domains(doms_cur);
|
|
register_sched_domain_sysctl();
|
|
register_sched_domain_sysctl();
|
|
|
|
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
-static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
|
|
|
|
|
|
+static void arch_destroy_sched_domains(const cpumask_t *cpu_map,
|
|
|
|
+ cpumask_t *tmpmask)
|
|
{
|
|
{
|
|
- free_sched_groups(cpu_map);
|
|
|
|
|
|
+ free_sched_groups(cpu_map, tmpmask);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -6886,6 +7744,7 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
|
|
*/
|
|
*/
|
|
static void detach_destroy_domains(const cpumask_t *cpu_map)
|
|
static void detach_destroy_domains(const cpumask_t *cpu_map)
|
|
{
|
|
{
|
|
|
|
+ cpumask_t tmpmask;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
unregister_sched_domain_sysctl();
|
|
unregister_sched_domain_sysctl();
|
|
@@ -6893,7 +7752,23 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
|
|
for_each_cpu_mask(i, *cpu_map)
|
|
for_each_cpu_mask(i, *cpu_map)
|
|
cpu_attach_domain(NULL, &def_root_domain, i);
|
|
cpu_attach_domain(NULL, &def_root_domain, i);
|
|
synchronize_sched();
|
|
synchronize_sched();
|
|
- arch_destroy_sched_domains(cpu_map);
|
|
|
|
|
|
+ arch_destroy_sched_domains(cpu_map, &tmpmask);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* handle null as "default" */
|
|
|
|
+static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
|
|
|
|
+ struct sched_domain_attr *new, int idx_new)
|
|
|
|
+{
|
|
|
|
+ struct sched_domain_attr tmp;
|
|
|
|
+
|
|
|
|
+ /* fast path */
|
|
|
|
+ if (!new && !cur)
|
|
|
|
+ return 1;
|
|
|
|
+
|
|
|
|
+ tmp = SD_ATTR_INIT;
|
|
|
|
+ return !memcmp(cur ? (cur + idx_cur) : &tmp,
|
|
|
|
+ new ? (new + idx_new) : &tmp,
|
|
|
|
+ sizeof(struct sched_domain_attr));
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -6917,7 +7792,8 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
|
|
*
|
|
*
|
|
* Call with hotplug lock held
|
|
* Call with hotplug lock held
|
|
*/
|
|
*/
|
|
-void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
|
|
|
|
|
|
+void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
|
|
|
|
+ struct sched_domain_attr *dattr_new)
|
|
{
|
|
{
|
|
int i, j;
|
|
int i, j;
|
|
|
|
|
|
@@ -6930,12 +7806,14 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
|
|
ndoms_new = 1;
|
|
ndoms_new = 1;
|
|
doms_new = &fallback_doms;
|
|
doms_new = &fallback_doms;
|
|
cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
|
|
cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
|
|
|
|
+ dattr_new = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
/* Destroy deleted domains */
|
|
/* Destroy deleted domains */
|
|
for (i = 0; i < ndoms_cur; i++) {
|
|
for (i = 0; i < ndoms_cur; i++) {
|
|
for (j = 0; j < ndoms_new; j++) {
|
|
for (j = 0; j < ndoms_new; j++) {
|
|
- if (cpus_equal(doms_cur[i], doms_new[j]))
|
|
|
|
|
|
+ if (cpus_equal(doms_cur[i], doms_new[j])
|
|
|
|
+ && dattrs_equal(dattr_cur, i, dattr_new, j))
|
|
goto match1;
|
|
goto match1;
|
|
}
|
|
}
|
|
/* no match - a current sched domain not in new doms_new[] */
|
|
/* no match - a current sched domain not in new doms_new[] */
|
|
@@ -6947,11 +7825,13 @@ match1:
|
|
/* Build new domains */
|
|
/* Build new domains */
|
|
for (i = 0; i < ndoms_new; i++) {
|
|
for (i = 0; i < ndoms_new; i++) {
|
|
for (j = 0; j < ndoms_cur; j++) {
|
|
for (j = 0; j < ndoms_cur; j++) {
|
|
- if (cpus_equal(doms_new[i], doms_cur[j]))
|
|
|
|
|
|
+ if (cpus_equal(doms_new[i], doms_cur[j])
|
|
|
|
+ && dattrs_equal(dattr_new, i, dattr_cur, j))
|
|
goto match2;
|
|
goto match2;
|
|
}
|
|
}
|
|
/* no match - add a new doms_new */
|
|
/* no match - add a new doms_new */
|
|
- build_sched_domains(doms_new + i);
|
|
|
|
|
|
+ __build_sched_domains(doms_new + i,
|
|
|
|
+ dattr_new ? dattr_new + i : NULL);
|
|
match2:
|
|
match2:
|
|
;
|
|
;
|
|
}
|
|
}
|
|
@@ -6959,7 +7839,9 @@ match2:
|
|
/* Remember the new sched domains */
|
|
/* Remember the new sched domains */
|
|
if (doms_cur != &fallback_doms)
|
|
if (doms_cur != &fallback_doms)
|
|
kfree(doms_cur);
|
|
kfree(doms_cur);
|
|
|
|
+ kfree(dattr_cur); /* kfree(NULL) is safe */
|
|
doms_cur = doms_new;
|
|
doms_cur = doms_new;
|
|
|
|
+ dattr_cur = dattr_new;
|
|
ndoms_cur = ndoms_new;
|
|
ndoms_cur = ndoms_new;
|
|
|
|
|
|
register_sched_domain_sysctl();
|
|
register_sched_domain_sysctl();
|
|
@@ -7086,6 +7968,11 @@ void __init sched_init_smp(void)
|
|
{
|
|
{
|
|
cpumask_t non_isolated_cpus;
|
|
cpumask_t non_isolated_cpus;
|
|
|
|
|
|
|
|
+#if defined(CONFIG_NUMA)
|
|
|
|
+ sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
|
|
|
|
+ GFP_KERNEL);
|
|
|
|
+ BUG_ON(sched_group_nodes_bycpu == NULL);
|
|
|
|
+#endif
|
|
get_online_cpus();
|
|
get_online_cpus();
|
|
arch_init_sched_domains(&cpu_online_map);
|
|
arch_init_sched_domains(&cpu_online_map);
|
|
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
|
|
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
|
|
@@ -7096,13 +7983,18 @@ void __init sched_init_smp(void)
|
|
hotcpu_notifier(update_sched_domains, 0);
|
|
hotcpu_notifier(update_sched_domains, 0);
|
|
|
|
|
|
/* Move init over to a non-isolated CPU */
|
|
/* Move init over to a non-isolated CPU */
|
|
- if (set_cpus_allowed(current, non_isolated_cpus) < 0)
|
|
|
|
|
|
+ if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0)
|
|
BUG();
|
|
BUG();
|
|
sched_init_granularity();
|
|
sched_init_granularity();
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
void __init sched_init_smp(void)
|
|
void __init sched_init_smp(void)
|
|
{
|
|
{
|
|
|
|
+#if defined(CONFIG_NUMA)
|
|
|
|
+ sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
|
|
|
|
+ GFP_KERNEL);
|
|
|
|
+ BUG_ON(sched_group_nodes_bycpu == NULL);
|
|
|
|
+#endif
|
|
sched_init_granularity();
|
|
sched_init_granularity();
|
|
}
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
#endif /* CONFIG_SMP */
|
|
@@ -7117,6 +8009,7 @@ int in_sched_functions(unsigned long addr)
|
|
static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
|
|
static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
|
|
{
|
|
{
|
|
cfs_rq->tasks_timeline = RB_ROOT;
|
|
cfs_rq->tasks_timeline = RB_ROOT;
|
|
|
|
+ INIT_LIST_HEAD(&cfs_rq->tasks);
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
cfs_rq->rq = rq;
|
|
cfs_rq->rq = rq;
|
|
#endif
|
|
#endif
|
|
@@ -7146,6 +8039,8 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
|
|
|
|
|
rt_rq->rt_time = 0;
|
|
rt_rq->rt_time = 0;
|
|
rt_rq->rt_throttled = 0;
|
|
rt_rq->rt_throttled = 0;
|
|
|
|
+ rt_rq->rt_runtime = 0;
|
|
|
|
+ spin_lock_init(&rt_rq->rt_runtime_lock);
|
|
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
rt_rq->rt_nr_boosted = 0;
|
|
rt_rq->rt_nr_boosted = 0;
|
|
@@ -7154,10 +8049,11 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
-static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg,
|
|
|
|
- struct cfs_rq *cfs_rq, struct sched_entity *se,
|
|
|
|
- int cpu, int add)
|
|
|
|
|
|
+static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
|
|
|
|
+ struct sched_entity *se, int cpu, int add,
|
|
|
|
+ struct sched_entity *parent)
|
|
{
|
|
{
|
|
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
tg->cfs_rq[cpu] = cfs_rq;
|
|
tg->cfs_rq[cpu] = cfs_rq;
|
|
init_cfs_rq(cfs_rq, rq);
|
|
init_cfs_rq(cfs_rq, rq);
|
|
cfs_rq->tg = tg;
|
|
cfs_rq->tg = tg;
|
|
@@ -7165,45 +8061,132 @@ static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg,
|
|
list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
|
|
list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
|
|
|
|
|
|
tg->se[cpu] = se;
|
|
tg->se[cpu] = se;
|
|
- se->cfs_rq = &rq->cfs;
|
|
|
|
|
|
+ /* se could be NULL for init_task_group */
|
|
|
|
+ if (!se)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (!parent)
|
|
|
|
+ se->cfs_rq = &rq->cfs;
|
|
|
|
+ else
|
|
|
|
+ se->cfs_rq = parent->my_q;
|
|
|
|
+
|
|
se->my_q = cfs_rq;
|
|
se->my_q = cfs_rq;
|
|
se->load.weight = tg->shares;
|
|
se->load.weight = tg->shares;
|
|
se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
|
|
se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
|
|
- se->parent = NULL;
|
|
|
|
|
|
+ se->parent = parent;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
-static void init_tg_rt_entry(struct rq *rq, struct task_group *tg,
|
|
|
|
- struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
|
|
|
|
- int cpu, int add)
|
|
|
|
|
|
+static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
|
|
|
|
+ struct sched_rt_entity *rt_se, int cpu, int add,
|
|
|
|
+ struct sched_rt_entity *parent)
|
|
{
|
|
{
|
|
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
|
|
+
|
|
tg->rt_rq[cpu] = rt_rq;
|
|
tg->rt_rq[cpu] = rt_rq;
|
|
init_rt_rq(rt_rq, rq);
|
|
init_rt_rq(rt_rq, rq);
|
|
rt_rq->tg = tg;
|
|
rt_rq->tg = tg;
|
|
rt_rq->rt_se = rt_se;
|
|
rt_rq->rt_se = rt_se;
|
|
|
|
+ rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
|
|
if (add)
|
|
if (add)
|
|
list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
|
|
list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
|
|
|
|
|
|
tg->rt_se[cpu] = rt_se;
|
|
tg->rt_se[cpu] = rt_se;
|
|
|
|
+ if (!rt_se)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (!parent)
|
|
|
|
+ rt_se->rt_rq = &rq->rt;
|
|
|
|
+ else
|
|
|
|
+ rt_se->rt_rq = parent->my_q;
|
|
|
|
+
|
|
rt_se->rt_rq = &rq->rt;
|
|
rt_se->rt_rq = &rq->rt;
|
|
rt_se->my_q = rt_rq;
|
|
rt_se->my_q = rt_rq;
|
|
- rt_se->parent = NULL;
|
|
|
|
|
|
+ rt_se->parent = parent;
|
|
INIT_LIST_HEAD(&rt_se->run_list);
|
|
INIT_LIST_HEAD(&rt_se->run_list);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
void __init sched_init(void)
|
|
void __init sched_init(void)
|
|
{
|
|
{
|
|
- int highest_cpu = 0;
|
|
|
|
int i, j;
|
|
int i, j;
|
|
|
|
+ unsigned long alloc_size = 0, ptr;
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
+ alloc_size += 2 * nr_cpu_ids * sizeof(void **);
|
|
|
|
+#endif
|
|
|
|
+#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
+ alloc_size += 2 * nr_cpu_ids * sizeof(void **);
|
|
|
|
+#endif
|
|
|
|
+#ifdef CONFIG_USER_SCHED
|
|
|
|
+ alloc_size *= 2;
|
|
|
|
+#endif
|
|
|
|
+ /*
|
|
|
|
+ * As sched_init() is called before page_alloc is setup,
|
|
|
|
+ * we use alloc_bootmem().
|
|
|
|
+ */
|
|
|
|
+ if (alloc_size) {
|
|
|
|
+ ptr = (unsigned long)alloc_bootmem_low(alloc_size);
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
+ init_task_group.se = (struct sched_entity **)ptr;
|
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
+
|
|
|
|
+ init_task_group.cfs_rq = (struct cfs_rq **)ptr;
|
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_USER_SCHED
|
|
|
|
+ root_task_group.se = (struct sched_entity **)ptr;
|
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
+
|
|
|
|
+ root_task_group.cfs_rq = (struct cfs_rq **)ptr;
|
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
+#endif
|
|
|
|
+#endif
|
|
|
|
+#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
+ init_task_group.rt_se = (struct sched_rt_entity **)ptr;
|
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
+
|
|
|
|
+ init_task_group.rt_rq = (struct rt_rq **)ptr;
|
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_USER_SCHED
|
|
|
|
+ root_task_group.rt_se = (struct sched_rt_entity **)ptr;
|
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
+
|
|
|
|
+ root_task_group.rt_rq = (struct rt_rq **)ptr;
|
|
|
|
+ ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
+#endif
|
|
|
|
+#endif
|
|
|
|
+ }
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
|
|
+ init_aggregate();
|
|
init_defrootdomain();
|
|
init_defrootdomain();
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+ init_rt_bandwidth(&def_rt_bandwidth,
|
|
|
|
+ global_rt_period(), global_rt_runtime());
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
+ init_rt_bandwidth(&init_task_group.rt_bandwidth,
|
|
|
|
+ global_rt_period(), global_rt_runtime());
|
|
|
|
+#ifdef CONFIG_USER_SCHED
|
|
|
|
+ init_rt_bandwidth(&root_task_group.rt_bandwidth,
|
|
|
|
+ global_rt_period(), RUNTIME_INF);
|
|
|
|
+#endif
|
|
|
|
+#endif
|
|
|
|
+
|
|
#ifdef CONFIG_GROUP_SCHED
|
|
#ifdef CONFIG_GROUP_SCHED
|
|
list_add(&init_task_group.list, &task_groups);
|
|
list_add(&init_task_group.list, &task_groups);
|
|
|
|
+ INIT_LIST_HEAD(&init_task_group.children);
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_USER_SCHED
|
|
|
|
+ INIT_LIST_HEAD(&root_task_group.children);
|
|
|
|
+ init_task_group.parent = &root_task_group;
|
|
|
|
+ list_add(&init_task_group.siblings, &root_task_group.children);
|
|
|
|
+#endif
|
|
#endif
|
|
#endif
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
for_each_possible_cpu(i) {
|
|
@@ -7214,26 +8197,68 @@ void __init sched_init(void)
|
|
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
|
|
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
|
|
rq->nr_running = 0;
|
|
rq->nr_running = 0;
|
|
rq->clock = 1;
|
|
rq->clock = 1;
|
|
|
|
+ update_last_tick_seen(rq);
|
|
init_cfs_rq(&rq->cfs, rq);
|
|
init_cfs_rq(&rq->cfs, rq);
|
|
init_rt_rq(&rq->rt, rq);
|
|
init_rt_rq(&rq->rt, rq);
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
init_task_group.shares = init_task_group_load;
|
|
init_task_group.shares = init_task_group_load;
|
|
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
|
|
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
|
|
- init_tg_cfs_entry(rq, &init_task_group,
|
|
|
|
|
|
+#ifdef CONFIG_CGROUP_SCHED
|
|
|
|
+ /*
|
|
|
|
+ * How much cpu bandwidth does init_task_group get?
|
|
|
|
+ *
|
|
|
|
+ * In case of task-groups formed thr' the cgroup filesystem, it
|
|
|
|
+ * gets 100% of the cpu resources in the system. This overall
|
|
|
|
+ * system cpu resource is divided among the tasks of
|
|
|
|
+ * init_task_group and its child task-groups in a fair manner,
|
|
|
|
+ * based on each entity's (task or task-group's) weight
|
|
|
|
+ * (se->load.weight).
|
|
|
|
+ *
|
|
|
|
+ * In other words, if init_task_group has 10 tasks of weight
|
|
|
|
+ * 1024) and two child groups A0 and A1 (of weight 1024 each),
|
|
|
|
+ * then A0's share of the cpu resource is:
|
|
|
|
+ *
|
|
|
|
+ * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
|
|
|
|
+ *
|
|
|
|
+ * We achieve this by letting init_task_group's tasks sit
|
|
|
|
+ * directly in rq->cfs (i.e init_task_group->se[] = NULL).
|
|
|
|
+ */
|
|
|
|
+ init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
|
|
|
|
+#elif defined CONFIG_USER_SCHED
|
|
|
|
+ root_task_group.shares = NICE_0_LOAD;
|
|
|
|
+ init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
|
|
|
|
+ /*
|
|
|
|
+ * In case of task-groups formed thr' the user id of tasks,
|
|
|
|
+ * init_task_group represents tasks belonging to root user.
|
|
|
|
+ * Hence it forms a sibling of all subsequent groups formed.
|
|
|
|
+ * In this case, init_task_group gets only a fraction of overall
|
|
|
|
+ * system cpu resource, based on the weight assigned to root
|
|
|
|
+ * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
|
|
|
|
+ * by letting tasks of init_task_group sit in a separate cfs_rq
|
|
|
|
+ * (init_cfs_rq) and having one entity represent this group of
|
|
|
|
+ * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
|
|
|
|
+ */
|
|
|
|
+ init_tg_cfs_entry(&init_task_group,
|
|
&per_cpu(init_cfs_rq, i),
|
|
&per_cpu(init_cfs_rq, i),
|
|
- &per_cpu(init_sched_entity, i), i, 1);
|
|
|
|
|
|
+ &per_cpu(init_sched_entity, i), i, 1,
|
|
|
|
+ root_task_group.se[i]);
|
|
|
|
|
|
#endif
|
|
#endif
|
|
|
|
+#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
|
+
|
|
|
|
+ rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
- init_task_group.rt_runtime =
|
|
|
|
- sysctl_sched_rt_runtime * NSEC_PER_USEC;
|
|
|
|
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
|
|
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
|
|
- init_tg_rt_entry(rq, &init_task_group,
|
|
|
|
|
|
+#ifdef CONFIG_CGROUP_SCHED
|
|
|
|
+ init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
|
|
|
|
+#elif defined CONFIG_USER_SCHED
|
|
|
|
+ init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
|
|
|
|
+ init_tg_rt_entry(&init_task_group,
|
|
&per_cpu(init_rt_rq, i),
|
|
&per_cpu(init_rt_rq, i),
|
|
- &per_cpu(init_sched_rt_entity, i), i, 1);
|
|
|
|
|
|
+ &per_cpu(init_sched_rt_entity, i), i, 1,
|
|
|
|
+ root_task_group.rt_se[i]);
|
|
|
|
+#endif
|
|
#endif
|
|
#endif
|
|
- rq->rt_period_expire = 0;
|
|
|
|
- rq->rt_throttled = 0;
|
|
|
|
|
|
|
|
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
|
|
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
|
|
rq->cpu_load[j] = 0;
|
|
rq->cpu_load[j] = 0;
|
|
@@ -7250,7 +8275,6 @@ void __init sched_init(void)
|
|
#endif
|
|
#endif
|
|
init_rq_hrtick(rq);
|
|
init_rq_hrtick(rq);
|
|
atomic_set(&rq->nr_iowait, 0);
|
|
atomic_set(&rq->nr_iowait, 0);
|
|
- highest_cpu = i;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
set_load_weight(&init_task);
|
|
set_load_weight(&init_task);
|
|
@@ -7260,7 +8284,6 @@ void __init sched_init(void)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
- nr_cpu_ids = highest_cpu + 1;
|
|
|
|
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
|
|
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -7419,8 +8442,6 @@ void set_curr_task(int cpu, struct task_struct *p)
|
|
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-#ifdef CONFIG_GROUP_SCHED
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
static void free_fair_sched_group(struct task_group *tg)
|
|
static void free_fair_sched_group(struct task_group *tg)
|
|
{
|
|
{
|
|
@@ -7437,17 +8458,18 @@ static void free_fair_sched_group(struct task_group *tg)
|
|
kfree(tg->se);
|
|
kfree(tg->se);
|
|
}
|
|
}
|
|
|
|
|
|
-static int alloc_fair_sched_group(struct task_group *tg)
|
|
|
|
|
|
+static
|
|
|
|
+int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
{
|
|
{
|
|
struct cfs_rq *cfs_rq;
|
|
struct cfs_rq *cfs_rq;
|
|
- struct sched_entity *se;
|
|
|
|
|
|
+ struct sched_entity *se, *parent_se;
|
|
struct rq *rq;
|
|
struct rq *rq;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL);
|
|
|
|
|
|
+ tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
|
|
if (!tg->cfs_rq)
|
|
if (!tg->cfs_rq)
|
|
goto err;
|
|
goto err;
|
|
- tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL);
|
|
|
|
|
|
+ tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
|
|
if (!tg->se)
|
|
if (!tg->se)
|
|
goto err;
|
|
goto err;
|
|
|
|
|
|
@@ -7466,7 +8488,8 @@ static int alloc_fair_sched_group(struct task_group *tg)
|
|
if (!se)
|
|
if (!se)
|
|
goto err;
|
|
goto err;
|
|
|
|
|
|
- init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0);
|
|
|
|
|
|
+ parent_se = parent ? parent->se[i] : NULL;
|
|
|
|
+ init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
|
|
}
|
|
}
|
|
|
|
|
|
return 1;
|
|
return 1;
|
|
@@ -7490,7 +8513,8 @@ static inline void free_fair_sched_group(struct task_group *tg)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int alloc_fair_sched_group(struct task_group *tg)
|
|
|
|
|
|
+static inline
|
|
|
|
+int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
{
|
|
{
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
@@ -7509,6 +8533,8 @@ static void free_rt_sched_group(struct task_group *tg)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
|
|
+ destroy_rt_bandwidth(&tg->rt_bandwidth);
|
|
|
|
+
|
|
for_each_possible_cpu(i) {
|
|
for_each_possible_cpu(i) {
|
|
if (tg->rt_rq)
|
|
if (tg->rt_rq)
|
|
kfree(tg->rt_rq[i]);
|
|
kfree(tg->rt_rq[i]);
|
|
@@ -7520,21 +8546,23 @@ static void free_rt_sched_group(struct task_group *tg)
|
|
kfree(tg->rt_se);
|
|
kfree(tg->rt_se);
|
|
}
|
|
}
|
|
|
|
|
|
-static int alloc_rt_sched_group(struct task_group *tg)
|
|
|
|
|
|
+static
|
|
|
|
+int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
|
{
|
|
{
|
|
struct rt_rq *rt_rq;
|
|
struct rt_rq *rt_rq;
|
|
- struct sched_rt_entity *rt_se;
|
|
|
|
|
|
+ struct sched_rt_entity *rt_se, *parent_se;
|
|
struct rq *rq;
|
|
struct rq *rq;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL);
|
|
|
|
|
|
+ tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
|
|
if (!tg->rt_rq)
|
|
if (!tg->rt_rq)
|
|
goto err;
|
|
goto err;
|
|
- tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL);
|
|
|
|
|
|
+ tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
|
|
if (!tg->rt_se)
|
|
if (!tg->rt_se)
|
|
goto err;
|
|
goto err;
|
|
|
|
|
|
- tg->rt_runtime = 0;
|
|
|
|
|
|
+ init_rt_bandwidth(&tg->rt_bandwidth,
|
|
|
|
+ ktime_to_ns(def_rt_bandwidth.rt_period), 0);
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
for_each_possible_cpu(i) {
|
|
rq = cpu_rq(i);
|
|
rq = cpu_rq(i);
|
|
@@ -7549,7 +8577,8 @@ static int alloc_rt_sched_group(struct task_group *tg)
|
|
if (!rt_se)
|
|
if (!rt_se)
|
|
goto err;
|
|
goto err;
|
|
|
|
|
|
- init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0);
|
|
|
|
|
|
+ parent_se = parent ? parent->rt_se[i] : NULL;
|
|
|
|
+ init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
|
|
}
|
|
}
|
|
|
|
|
|
return 1;
|
|
return 1;
|
|
@@ -7573,7 +8602,8 @@ static inline void free_rt_sched_group(struct task_group *tg)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int alloc_rt_sched_group(struct task_group *tg)
|
|
|
|
|
|
+static inline
|
|
|
|
+int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
|
{
|
|
{
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
@@ -7587,6 +8617,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+#ifdef CONFIG_GROUP_SCHED
|
|
static void free_sched_group(struct task_group *tg)
|
|
static void free_sched_group(struct task_group *tg)
|
|
{
|
|
{
|
|
free_fair_sched_group(tg);
|
|
free_fair_sched_group(tg);
|
|
@@ -7595,7 +8626,7 @@ static void free_sched_group(struct task_group *tg)
|
|
}
|
|
}
|
|
|
|
|
|
/* allocate runqueue etc for a new task group */
|
|
/* allocate runqueue etc for a new task group */
|
|
-struct task_group *sched_create_group(void)
|
|
|
|
|
|
+struct task_group *sched_create_group(struct task_group *parent)
|
|
{
|
|
{
|
|
struct task_group *tg;
|
|
struct task_group *tg;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
@@ -7605,10 +8636,10 @@ struct task_group *sched_create_group(void)
|
|
if (!tg)
|
|
if (!tg)
|
|
return ERR_PTR(-ENOMEM);
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
- if (!alloc_fair_sched_group(tg))
|
|
|
|
|
|
+ if (!alloc_fair_sched_group(tg, parent))
|
|
goto err;
|
|
goto err;
|
|
|
|
|
|
- if (!alloc_rt_sched_group(tg))
|
|
|
|
|
|
+ if (!alloc_rt_sched_group(tg, parent))
|
|
goto err;
|
|
goto err;
|
|
|
|
|
|
spin_lock_irqsave(&task_group_lock, flags);
|
|
spin_lock_irqsave(&task_group_lock, flags);
|
|
@@ -7617,6 +8648,12 @@ struct task_group *sched_create_group(void)
|
|
register_rt_sched_group(tg, i);
|
|
register_rt_sched_group(tg, i);
|
|
}
|
|
}
|
|
list_add_rcu(&tg->list, &task_groups);
|
|
list_add_rcu(&tg->list, &task_groups);
|
|
|
|
+
|
|
|
|
+ WARN_ON(!parent); /* root should already exist */
|
|
|
|
+
|
|
|
|
+ tg->parent = parent;
|
|
|
|
+ list_add_rcu(&tg->siblings, &parent->children);
|
|
|
|
+ INIT_LIST_HEAD(&tg->children);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
|
|
|
|
return tg;
|
|
return tg;
|
|
@@ -7645,6 +8682,7 @@ void sched_destroy_group(struct task_group *tg)
|
|
unregister_rt_sched_group(tg, i);
|
|
unregister_rt_sched_group(tg, i);
|
|
}
|
|
}
|
|
list_del_rcu(&tg->list);
|
|
list_del_rcu(&tg->list);
|
|
|
|
+ list_del_rcu(&tg->siblings);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
|
|
|
|
/* wait for possible concurrent references to cfs_rqs complete */
|
|
/* wait for possible concurrent references to cfs_rqs complete */
|
|
@@ -7688,16 +8726,14 @@ void sched_move_task(struct task_struct *tsk)
|
|
|
|
|
|
task_rq_unlock(rq, &flags);
|
|
task_rq_unlock(rq, &flags);
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
-static void set_se_shares(struct sched_entity *se, unsigned long shares)
|
|
|
|
|
|
+static void __set_se_shares(struct sched_entity *se, unsigned long shares)
|
|
{
|
|
{
|
|
struct cfs_rq *cfs_rq = se->cfs_rq;
|
|
struct cfs_rq *cfs_rq = se->cfs_rq;
|
|
- struct rq *rq = cfs_rq->rq;
|
|
|
|
int on_rq;
|
|
int on_rq;
|
|
|
|
|
|
- spin_lock_irq(&rq->lock);
|
|
|
|
-
|
|
|
|
on_rq = se->on_rq;
|
|
on_rq = se->on_rq;
|
|
if (on_rq)
|
|
if (on_rq)
|
|
dequeue_entity(cfs_rq, se, 0);
|
|
dequeue_entity(cfs_rq, se, 0);
|
|
@@ -7707,8 +8743,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
|
|
|
|
|
|
if (on_rq)
|
|
if (on_rq)
|
|
enqueue_entity(cfs_rq, se, 0);
|
|
enqueue_entity(cfs_rq, se, 0);
|
|
|
|
+}
|
|
|
|
|
|
- spin_unlock_irq(&rq->lock);
|
|
|
|
|
|
+static void set_se_shares(struct sched_entity *se, unsigned long shares)
|
|
|
|
+{
|
|
|
|
+ struct cfs_rq *cfs_rq = se->cfs_rq;
|
|
|
|
+ struct rq *rq = cfs_rq->rq;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
+ __set_se_shares(se, shares);
|
|
|
|
+ spin_unlock_irqrestore(&rq->lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static DEFINE_MUTEX(shares_mutex);
|
|
static DEFINE_MUTEX(shares_mutex);
|
|
@@ -7718,13 +8763,19 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
int i;
|
|
int i;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We can't change the weight of the root cgroup.
|
|
|
|
+ */
|
|
|
|
+ if (!tg->se[0])
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* A weight of 0 or 1 can cause arithmetics problems.
|
|
* A weight of 0 or 1 can cause arithmetics problems.
|
|
* (The default weight is 1024 - so there's no practical
|
|
* (The default weight is 1024 - so there's no practical
|
|
* limitation from this.)
|
|
* limitation from this.)
|
|
*/
|
|
*/
|
|
- if (shares < 2)
|
|
|
|
- shares = 2;
|
|
|
|
|
|
+ if (shares < MIN_SHARES)
|
|
|
|
+ shares = MIN_SHARES;
|
|
|
|
|
|
mutex_lock(&shares_mutex);
|
|
mutex_lock(&shares_mutex);
|
|
if (tg->shares == shares)
|
|
if (tg->shares == shares)
|
|
@@ -7733,6 +8784,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
spin_lock_irqsave(&task_group_lock, flags);
|
|
spin_lock_irqsave(&task_group_lock, flags);
|
|
for_each_possible_cpu(i)
|
|
for_each_possible_cpu(i)
|
|
unregister_fair_sched_group(tg, i);
|
|
unregister_fair_sched_group(tg, i);
|
|
|
|
+ list_del_rcu(&tg->siblings);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
|
|
|
|
/* wait for any ongoing reference to this group to finish */
|
|
/* wait for any ongoing reference to this group to finish */
|
|
@@ -7743,8 +8795,13 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
* w/o tripping rebalance_share or load_balance_fair.
|
|
* w/o tripping rebalance_share or load_balance_fair.
|
|
*/
|
|
*/
|
|
tg->shares = shares;
|
|
tg->shares = shares;
|
|
- for_each_possible_cpu(i)
|
|
|
|
- set_se_shares(tg->se[i], shares);
|
|
|
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
|
+ /*
|
|
|
|
+ * force a rebalance
|
|
|
|
+ */
|
|
|
|
+ cfs_rq_set_shares(tg->cfs_rq[i], 0);
|
|
|
|
+ set_se_shares(tg->se[i], shares/nr_cpu_ids);
|
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
/*
|
|
* Enable load balance activity on this group, by inserting it back on
|
|
* Enable load balance activity on this group, by inserting it back on
|
|
@@ -7753,6 +8810,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
spin_lock_irqsave(&task_group_lock, flags);
|
|
spin_lock_irqsave(&task_group_lock, flags);
|
|
for_each_possible_cpu(i)
|
|
for_each_possible_cpu(i)
|
|
register_fair_sched_group(tg, i);
|
|
register_fair_sched_group(tg, i);
|
|
|
|
+ list_add_rcu(&tg->siblings, &tg->parent->children);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
done:
|
|
done:
|
|
mutex_unlock(&shares_mutex);
|
|
mutex_unlock(&shares_mutex);
|
|
@@ -7779,26 +8837,58 @@ static unsigned long to_ratio(u64 period, u64 runtime)
|
|
return div64_64(runtime << 16, period);
|
|
return div64_64(runtime << 16, period);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_CGROUP_SCHED
|
|
|
|
+static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
|
|
|
|
+{
|
|
|
|
+ struct task_group *tgi, *parent = tg->parent;
|
|
|
|
+ unsigned long total = 0;
|
|
|
|
+
|
|
|
|
+ if (!parent) {
|
|
|
|
+ if (global_rt_period() < period)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ return to_ratio(period, runtime) <
|
|
|
|
+ to_ratio(global_rt_period(), global_rt_runtime());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ktime_to_ns(parent->rt_bandwidth.rt_period) < period)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ list_for_each_entry_rcu(tgi, &parent->children, siblings) {
|
|
|
|
+ if (tgi == tg)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period),
|
|
|
|
+ tgi->rt_bandwidth.rt_runtime);
|
|
|
|
+ }
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+
|
|
|
|
+ return total + to_ratio(period, runtime) <
|
|
|
|
+ to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period),
|
|
|
|
+ parent->rt_bandwidth.rt_runtime);
|
|
|
|
+}
|
|
|
|
+#elif defined CONFIG_USER_SCHED
|
|
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
|
|
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
|
|
{
|
|
{
|
|
struct task_group *tgi;
|
|
struct task_group *tgi;
|
|
unsigned long total = 0;
|
|
unsigned long total = 0;
|
|
unsigned long global_ratio =
|
|
unsigned long global_ratio =
|
|
- to_ratio(sysctl_sched_rt_period,
|
|
|
|
- sysctl_sched_rt_runtime < 0 ?
|
|
|
|
- RUNTIME_INF : sysctl_sched_rt_runtime);
|
|
|
|
|
|
+ to_ratio(global_rt_period(), global_rt_runtime());
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(tgi, &task_groups, list) {
|
|
list_for_each_entry_rcu(tgi, &task_groups, list) {
|
|
if (tgi == tg)
|
|
if (tgi == tg)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- total += to_ratio(period, tgi->rt_runtime);
|
|
|
|
|
|
+ total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period),
|
|
|
|
+ tgi->rt_bandwidth.rt_runtime);
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
|
|
|
|
return total + to_ratio(period, runtime) < global_ratio;
|
|
return total + to_ratio(period, runtime) < global_ratio;
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
|
|
|
|
/* Must be called with tasklist_lock held */
|
|
/* Must be called with tasklist_lock held */
|
|
static inline int tg_has_rt_tasks(struct task_group *tg)
|
|
static inline int tg_has_rt_tasks(struct task_group *tg)
|
|
@@ -7811,19 +8901,14 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
|
|
|
|
|
|
+static int tg_set_bandwidth(struct task_group *tg,
|
|
|
|
+ u64 rt_period, u64 rt_runtime)
|
|
{
|
|
{
|
|
- u64 rt_runtime, rt_period;
|
|
|
|
- int err = 0;
|
|
|
|
-
|
|
|
|
- rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
|
|
|
|
- rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
|
|
|
|
- if (rt_runtime_us == -1)
|
|
|
|
- rt_runtime = RUNTIME_INF;
|
|
|
|
|
|
+ int i, err = 0;
|
|
|
|
|
|
mutex_lock(&rt_constraints_mutex);
|
|
mutex_lock(&rt_constraints_mutex);
|
|
read_lock(&tasklist_lock);
|
|
read_lock(&tasklist_lock);
|
|
- if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
|
|
|
|
|
|
+ if (rt_runtime == 0 && tg_has_rt_tasks(tg)) {
|
|
err = -EBUSY;
|
|
err = -EBUSY;
|
|
goto unlock;
|
|
goto unlock;
|
|
}
|
|
}
|
|
@@ -7831,7 +8916,19 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
|
|
err = -EINVAL;
|
|
err = -EINVAL;
|
|
goto unlock;
|
|
goto unlock;
|
|
}
|
|
}
|
|
- tg->rt_runtime = rt_runtime;
|
|
|
|
|
|
+
|
|
|
|
+ spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
|
|
|
+ tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
|
|
|
|
+ tg->rt_bandwidth.rt_runtime = rt_runtime;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
|
+ struct rt_rq *rt_rq = tg->rt_rq[i];
|
|
|
|
+
|
|
|
|
+ spin_lock(&rt_rq->rt_runtime_lock);
|
|
|
|
+ rt_rq->rt_runtime = rt_runtime;
|
|
|
|
+ spin_unlock(&rt_rq->rt_runtime_lock);
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
|
unlock:
|
|
unlock:
|
|
read_unlock(&tasklist_lock);
|
|
read_unlock(&tasklist_lock);
|
|
mutex_unlock(&rt_constraints_mutex);
|
|
mutex_unlock(&rt_constraints_mutex);
|
|
@@ -7839,19 +8936,109 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
|
|
|
|
+{
|
|
|
|
+ u64 rt_runtime, rt_period;
|
|
|
|
+
|
|
|
|
+ rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
|
|
|
|
+ rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
|
|
|
|
+ if (rt_runtime_us < 0)
|
|
|
|
+ rt_runtime = RUNTIME_INF;
|
|
|
|
+
|
|
|
|
+ return tg_set_bandwidth(tg, rt_period, rt_runtime);
|
|
|
|
+}
|
|
|
|
+
|
|
long sched_group_rt_runtime(struct task_group *tg)
|
|
long sched_group_rt_runtime(struct task_group *tg)
|
|
{
|
|
{
|
|
u64 rt_runtime_us;
|
|
u64 rt_runtime_us;
|
|
|
|
|
|
- if (tg->rt_runtime == RUNTIME_INF)
|
|
|
|
|
|
+ if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
- rt_runtime_us = tg->rt_runtime;
|
|
|
|
|
|
+ rt_runtime_us = tg->rt_bandwidth.rt_runtime;
|
|
do_div(rt_runtime_us, NSEC_PER_USEC);
|
|
do_div(rt_runtime_us, NSEC_PER_USEC);
|
|
return rt_runtime_us;
|
|
return rt_runtime_us;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
|
|
|
|
+{
|
|
|
|
+ u64 rt_runtime, rt_period;
|
|
|
|
+
|
|
|
|
+ rt_period = (u64)rt_period_us * NSEC_PER_USEC;
|
|
|
|
+ rt_runtime = tg->rt_bandwidth.rt_runtime;
|
|
|
|
+
|
|
|
|
+ return tg_set_bandwidth(tg, rt_period, rt_runtime);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+long sched_group_rt_period(struct task_group *tg)
|
|
|
|
+{
|
|
|
|
+ u64 rt_period_us;
|
|
|
|
+
|
|
|
|
+ rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
|
|
|
|
+ do_div(rt_period_us, NSEC_PER_USEC);
|
|
|
|
+ return rt_period_us;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int sched_rt_global_constraints(void)
|
|
|
|
+{
|
|
|
|
+ int ret = 0;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&rt_constraints_mutex);
|
|
|
|
+ if (!__rt_schedulable(NULL, 1, 0))
|
|
|
|
+ ret = -EINVAL;
|
|
|
|
+ mutex_unlock(&rt_constraints_mutex);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static int sched_rt_global_constraints(void)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
|
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
|
+ struct rt_rq *rt_rq = &cpu_rq(i)->rt;
|
|
|
|
+
|
|
|
|
+ spin_lock(&rt_rq->rt_runtime_lock);
|
|
|
|
+ rt_rq->rt_runtime = global_rt_runtime();
|
|
|
|
+ spin_unlock(&rt_rq->rt_runtime_lock);
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
#endif
|
|
#endif
|
|
-#endif /* CONFIG_GROUP_SCHED */
|
|
|
|
|
|
+
|
|
|
|
+int sched_rt_handler(struct ctl_table *table, int write,
|
|
|
|
+ struct file *filp, void __user *buffer, size_t *lenp,
|
|
|
|
+ loff_t *ppos)
|
|
|
|
+{
|
|
|
|
+ int ret;
|
|
|
|
+ int old_period, old_runtime;
|
|
|
|
+ static DEFINE_MUTEX(mutex);
|
|
|
|
+
|
|
|
|
+ mutex_lock(&mutex);
|
|
|
|
+ old_period = sysctl_sched_rt_period;
|
|
|
|
+ old_runtime = sysctl_sched_rt_runtime;
|
|
|
|
+
|
|
|
|
+ ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
|
|
|
|
+
|
|
|
|
+ if (!ret && write) {
|
|
|
|
+ ret = sched_rt_global_constraints();
|
|
|
|
+ if (ret) {
|
|
|
|
+ sysctl_sched_rt_period = old_period;
|
|
|
|
+ sysctl_sched_rt_runtime = old_runtime;
|
|
|
|
+ } else {
|
|
|
|
+ def_rt_bandwidth.rt_runtime = global_rt_runtime();
|
|
|
|
+ def_rt_bandwidth.rt_period =
|
|
|
|
+ ns_to_ktime(global_rt_period());
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ mutex_unlock(&mutex);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
|
|
|
|
@@ -7865,7 +9052,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
|
|
static struct cgroup_subsys_state *
|
|
static struct cgroup_subsys_state *
|
|
cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
|
cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
|
{
|
|
{
|
|
- struct task_group *tg;
|
|
|
|
|
|
+ struct task_group *tg, *parent;
|
|
|
|
|
|
if (!cgrp->parent) {
|
|
if (!cgrp->parent) {
|
|
/* This is early initialization for the top cgroup */
|
|
/* This is early initialization for the top cgroup */
|
|
@@ -7873,11 +9060,8 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
|
return &init_task_group.css;
|
|
return &init_task_group.css;
|
|
}
|
|
}
|
|
|
|
|
|
- /* we support only 1-level deep hierarchical scheduler atm */
|
|
|
|
- if (cgrp->parent->parent)
|
|
|
|
- return ERR_PTR(-EINVAL);
|
|
|
|
-
|
|
|
|
- tg = sched_create_group();
|
|
|
|
|
|
+ parent = cgroup_tg(cgrp->parent);
|
|
|
|
+ tg = sched_create_group(parent);
|
|
if (IS_ERR(tg))
|
|
if (IS_ERR(tg))
|
|
return ERR_PTR(-ENOMEM);
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
@@ -7901,7 +9085,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
|
{
|
|
{
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/* Don't accept realtime tasks when there is no way for them to run */
|
|
/* Don't accept realtime tasks when there is no way for them to run */
|
|
- if (rt_task(tsk) && cgroup_tg(cgrp)->rt_runtime == 0)
|
|
|
|
|
|
+ if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
#else
|
|
#else
|
|
/* We don't support RT-tasks being in separate groups */
|
|
/* We don't support RT-tasks being in separate groups */
|
|
@@ -7935,7 +9119,7 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
-static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
|
|
|
|
|
|
+static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
|
|
struct file *file,
|
|
struct file *file,
|
|
const char __user *userbuf,
|
|
const char __user *userbuf,
|
|
size_t nbytes, loff_t *unused_ppos)
|
|
size_t nbytes, loff_t *unused_ppos)
|
|
@@ -7979,6 +9163,17 @@ static ssize_t cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft,
|
|
|
|
|
|
return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
|
|
return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
|
|
|
|
+ u64 rt_period_us)
|
|
|
|
+{
|
|
|
|
+ return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
|
|
|
|
+{
|
|
|
|
+ return sched_group_rt_period(cgroup_tg(cgrp));
|
|
|
|
+}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
static struct cftype cpu_files[] = {
|
|
static struct cftype cpu_files[] = {
|
|
@@ -7995,6 +9190,11 @@ static struct cftype cpu_files[] = {
|
|
.read = cpu_rt_runtime_read,
|
|
.read = cpu_rt_runtime_read,
|
|
.write = cpu_rt_runtime_write,
|
|
.write = cpu_rt_runtime_write,
|
|
},
|
|
},
|
|
|
|
+ {
|
|
|
|
+ .name = "rt_period_us",
|
|
|
|
+ .read_uint = cpu_rt_period_read_uint,
|
|
|
|
+ .write_uint = cpu_rt_period_write_uint,
|
|
|
|
+ },
|
|
#endif
|
|
#endif
|
|
};
|
|
};
|
|
|
|
|
|
@@ -8035,9 +9235,9 @@ struct cpuacct {
|
|
struct cgroup_subsys cpuacct_subsys;
|
|
struct cgroup_subsys cpuacct_subsys;
|
|
|
|
|
|
/* return cpu accounting group corresponding to this container */
|
|
/* return cpu accounting group corresponding to this container */
|
|
-static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
|
|
|
|
|
|
+static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
|
|
{
|
|
{
|
|
- return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
|
|
|
|
|
|
+ return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
|
|
struct cpuacct, css);
|
|
struct cpuacct, css);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -8050,7 +9250,7 @@ static inline struct cpuacct *task_ca(struct task_struct *tsk)
|
|
|
|
|
|
/* create a new cpu accounting group */
|
|
/* create a new cpu accounting group */
|
|
static struct cgroup_subsys_state *cpuacct_create(
|
|
static struct cgroup_subsys_state *cpuacct_create(
|
|
- struct cgroup_subsys *ss, struct cgroup *cont)
|
|
|
|
|
|
+ struct cgroup_subsys *ss, struct cgroup *cgrp)
|
|
{
|
|
{
|
|
struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
|
|
struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
|
|
|
|
|
|
@@ -8068,18 +9268,18 @@ static struct cgroup_subsys_state *cpuacct_create(
|
|
|
|
|
|
/* destroy an existing cpu accounting group */
|
|
/* destroy an existing cpu accounting group */
|
|
static void
|
|
static void
|
|
-cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
|
|
|
|
|
|
+cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
|
{
|
|
{
|
|
- struct cpuacct *ca = cgroup_ca(cont);
|
|
|
|
|
|
+ struct cpuacct *ca = cgroup_ca(cgrp);
|
|
|
|
|
|
free_percpu(ca->cpuusage);
|
|
free_percpu(ca->cpuusage);
|
|
kfree(ca);
|
|
kfree(ca);
|
|
}
|
|
}
|
|
|
|
|
|
/* return total cpu usage (in nanoseconds) of a group */
|
|
/* return total cpu usage (in nanoseconds) of a group */
|
|
-static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
|
|
|
|
|
|
+static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
|
|
{
|
|
{
|
|
- struct cpuacct *ca = cgroup_ca(cont);
|
|
|
|
|
|
+ struct cpuacct *ca = cgroup_ca(cgrp);
|
|
u64 totalcpuusage = 0;
|
|
u64 totalcpuusage = 0;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
@@ -8098,16 +9298,40 @@ static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
|
|
return totalcpuusage;
|
|
return totalcpuusage;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
|
|
|
|
+ u64 reset)
|
|
|
|
+{
|
|
|
|
+ struct cpuacct *ca = cgroup_ca(cgrp);
|
|
|
|
+ int err = 0;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ if (reset) {
|
|
|
|
+ err = -EINVAL;
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
|
+ u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
|
|
|
|
+
|
|
|
|
+ spin_lock_irq(&cpu_rq(i)->lock);
|
|
|
|
+ *cpuusage = 0;
|
|
|
|
+ spin_unlock_irq(&cpu_rq(i)->lock);
|
|
|
|
+ }
|
|
|
|
+out:
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+
|
|
static struct cftype files[] = {
|
|
static struct cftype files[] = {
|
|
{
|
|
{
|
|
.name = "usage",
|
|
.name = "usage",
|
|
.read_uint = cpuusage_read,
|
|
.read_uint = cpuusage_read,
|
|
|
|
+ .write_uint = cpuusage_write,
|
|
},
|
|
},
|
|
};
|
|
};
|
|
|
|
|
|
-static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
|
|
|
|
|
|
+static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
|
{
|
|
{
|
|
- return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
|
|
|
|
|
|
+ return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|