浏览代码

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixlets from Thomas Gleixner:
 "Three trivial fixlets for the scheduler:

   - move print_rt_rq() and print_dl_rq() declarations to the right
     place

   - make grub_reclaim() static

   - fix the bogus documentation reference in Kconfig"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix documentation file path
  sched/deadline: Make the grub_reclaim() function static
  sched/debug: Move the print_rt_rq() and print_dl_rq() declarations to kernel/sched/sched.h
Linus Torvalds 7 年之前
父节点
当前提交
441cab960d
共有 4 个文件被更改,包括 5 次插入8 次删除
  1. 1 1
      init/Kconfig
  2. 1 3
      kernel/sched/deadline.c
  3. 0 2
      kernel/sched/rt.c
  4. 3 2
      kernel/sched/sched.h

+ 1 - 1
init/Kconfig

@@ -738,7 +738,7 @@ config CFS_BANDWIDTH
 	  tasks running within the fair group scheduler.  Groups with no limit
 	  tasks running within the fair group scheduler.  Groups with no limit
 	  set are considered to be unconstrained and will run with no
 	  set are considered to be unconstrained and will run with no
 	  restriction.
 	  restriction.
-	  See tip/Documentation/scheduler/sched-bwc.txt for more information.
+	  See Documentation/scheduler/sched-bwc.txt for more information.
 
 
 config RT_GROUP_SCHED
 config RT_GROUP_SCHED
 	bool "Group scheduling for SCHED_RR/FIFO"
 	bool "Group scheduling for SCHED_RR/FIFO"

+ 1 - 3
kernel/sched/deadline.c

@@ -1117,7 +1117,7 @@ extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
  * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
  * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
  * So, overflow is not an issue here.
  * So, overflow is not an issue here.
  */
  */
-u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
+static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
 {
 {
 	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
 	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
 	u64 u_act;
 	u64 u_act;
@@ -2731,8 +2731,6 @@ bool dl_cpu_busy(unsigned int cpu)
 #endif
 #endif
 
 
 #ifdef CONFIG_SCHED_DEBUG
 #ifdef CONFIG_SCHED_DEBUG
-extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
-
 void print_dl_stats(struct seq_file *m, int cpu)
 void print_dl_stats(struct seq_file *m, int cpu)
 {
 {
 	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
 	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);

+ 0 - 2
kernel/sched/rt.c

@@ -2701,8 +2701,6 @@ int sched_rr_handler(struct ctl_table *table, int write,
 }
 }
 
 
 #ifdef CONFIG_SCHED_DEBUG
 #ifdef CONFIG_SCHED_DEBUG
-extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
-
 void print_rt_stats(struct seq_file *m, int cpu)
 void print_rt_stats(struct seq_file *m, int cpu)
 {
 {
 	rt_rq_iter_t iter;
 	rt_rq_iter_t iter;

+ 3 - 2
kernel/sched/sched.h

@@ -2025,8 +2025,9 @@ extern bool sched_debug_enabled;
 extern void print_cfs_stats(struct seq_file *m, int cpu);
 extern void print_cfs_stats(struct seq_file *m, int cpu);
 extern void print_rt_stats(struct seq_file *m, int cpu);
 extern void print_rt_stats(struct seq_file *m, int cpu);
 extern void print_dl_stats(struct seq_file *m, int cpu);
 extern void print_dl_stats(struct seq_file *m, int cpu);
-extern void
-print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
+extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
+extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
+extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
 #ifdef CONFIG_NUMA_BALANCING
 #ifdef CONFIG_NUMA_BALANCING
 extern void
 extern void
 show_numa_stats(struct task_struct *p, struct seq_file *m);
 show_numa_stats(struct task_struct *p, struct seq_file *m);