|
@@ -823,8 +823,8 @@ static void set_load_weight(struct task_struct *p)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- load->weight = scale_load(prio_to_weight[prio]);
|
|
|
|
- load->inv_weight = prio_to_wmult[prio];
|
|
|
|
|
|
+ load->weight = scale_load(sched_prio_to_weight[prio]);
|
|
|
|
+ load->inv_weight = sched_prio_to_wmult[prio];
|
|
}
|
|
}
|
|
|
|
|
|
static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
|
|
static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
|
|
@@ -8625,3 +8625,44 @@ void dump_cpu_task(int cpu)
|
|
pr_info("Task dump for CPU %d:\n", cpu);
|
|
pr_info("Task dump for CPU %d:\n", cpu);
|
|
sched_show_task(cpu_curr(cpu));
|
|
sched_show_task(cpu_curr(cpu));
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Nice levels are multiplicative, with a gentle 10% change for every
|
|
|
|
+ * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
|
|
|
|
+ * nice 1, it will get ~10% less CPU time than another CPU-bound task
|
|
|
|
+ * that remained on nice 0.
|
|
|
|
+ *
|
|
|
|
+ * The "10% effect" is relative and cumulative: from _any_ nice level,
|
|
|
|
+ * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
|
|
|
|
+ * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
|
|
|
|
+ * If a task goes up by ~10% and another task goes down by ~10% then
|
|
|
|
+ * the relative distance between them is ~25%.)
|
|
|
|
+ */
|
|
|
|
+const int sched_prio_to_weight[40] = {
|
|
|
|
+ /* -20 */ 88761, 71755, 56483, 46273, 36291,
|
|
|
|
+ /* -15 */ 29154, 23254, 18705, 14949, 11916,
|
|
|
|
+ /* -10 */ 9548, 7620, 6100, 4904, 3906,
|
|
|
|
+ /* -5 */ 3121, 2501, 1991, 1586, 1277,
|
|
|
|
+ /* 0 */ 1024, 820, 655, 526, 423,
|
|
|
|
+ /* 5 */ 335, 272, 215, 172, 137,
|
|
|
|
+ /* 10 */ 110, 87, 70, 56, 45,
|
|
|
|
+ /* 15 */ 36, 29, 23, 18, 15,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
|
|
|
|
+ *
|
|
|
|
+ * In cases where the weight does not change often, we can use the
|
|
|
|
+ * precalculated inverse to speed up arithmetics by turning divisions
|
|
|
|
+ * into multiplications:
|
|
|
|
+ */
|
|
|
|
+const u32 sched_prio_to_wmult[40] = {
|
|
|
|
+ /* -20 */ 48388, 59856, 76040, 92818, 118348,
|
|
|
|
+ /* -15 */ 147320, 184698, 229616, 287308, 360437,
|
|
|
|
+ /* -10 */ 449829, 563644, 704093, 875809, 1099582,
|
|
|
|
+ /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
|
|
|
|
+ /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
|
|
|
|
+ /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
|
|
|
|
+ /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
|
|
|
|
+ /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
|
|
|
|
+};
|