|
@@ -2474,44 +2474,6 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
|
|
EXPORT_PER_CPU_SYMBOL(kstat);
|
|
|
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
|
|
|
|
|
|
-/*
|
|
|
- * Return any ns on the sched_clock that have not yet been accounted in
|
|
|
- * @p in case that task is currently running.
|
|
|
- *
|
|
|
- * Called with task_rq_lock() held on @rq.
|
|
|
- */
|
|
|
-static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
|
|
|
-{
|
|
|
- u64 ns = 0;
|
|
|
-
|
|
|
- /*
|
|
|
- * Must be ->curr _and_ ->on_rq. If dequeued, we would
|
|
|
- * project cycles that may never be accounted to this
|
|
|
- * thread, breaking clock_gettime().
|
|
|
- */
|
|
|
- if (task_current(rq, p) && task_on_rq_queued(p)) {
|
|
|
- update_rq_clock(rq);
|
|
|
- ns = rq_clock_task(rq) - p->se.exec_start;
|
|
|
- if ((s64)ns < 0)
|
|
|
- ns = 0;
|
|
|
- }
|
|
|
-
|
|
|
- return ns;
|
|
|
-}
|
|
|
-
|
|
|
-unsigned long long task_delta_exec(struct task_struct *p)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- struct rq *rq;
|
|
|
- u64 ns = 0;
|
|
|
-
|
|
|
- rq = task_rq_lock(p, &flags);
|
|
|
- ns = do_task_delta_exec(p, rq);
|
|
|
- task_rq_unlock(rq, p, &flags);
|
|
|
-
|
|
|
- return ns;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Return accounted runtime for the task.
|
|
|
* In case the task is currently running, return the runtime plus current's
|
|
@@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct rq *rq;
|
|
|
- u64 ns = 0;
|
|
|
+ u64 ns;
|
|
|
|
|
|
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
|
|
|
/*
|
|
@@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
|
|
#endif
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
- ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
|
|
|
+ /*
|
|
|
+ * Must be ->curr _and_ ->on_rq. If dequeued, we would
|
|
|
+ * project cycles that may never be accounted to this
|
|
|
+ * thread, breaking clock_gettime().
|
|
|
+ */
|
|
|
+ if (task_current(rq, p) && task_on_rq_queued(p)) {
|
|
|
+ update_rq_clock(rq);
|
|
|
+ p->sched_class->update_curr(rq);
|
|
|
+ }
|
|
|
+ ns = p->se.sum_exec_runtime;
|
|
|
task_rq_unlock(rq, p, &flags);
|
|
|
|
|
|
return ns;
|
|
@@ -6368,6 +6339,10 @@ static void sched_init_numa(void)
|
|
|
if (!sched_debug())
|
|
|
break;
|
|
|
}
|
|
|
+
|
|
|
+ if (!level)
|
|
|
+ return;
|
|
|
+
|
|
|
/*
|
|
|
* 'level' contains the number of unique distances, excluding the
|
|
|
* identity distance node_distance(i,i).
|
|
@@ -7444,8 +7419,12 @@ void sched_move_task(struct task_struct *tsk)
|
|
|
if (unlikely(running))
|
|
|
put_prev_task(rq, tsk);
|
|
|
|
|
|
- tg = container_of(task_css_check(tsk, cpu_cgrp_id,
|
|
|
- lockdep_is_held(&tsk->sighand->siglock)),
|
|
|
+ /*
|
|
|
+ * All callers are synchronized by task_rq_lock(); we do not use RCU
|
|
|
+ * which is pointless here. Thus, we pass "true" to task_css_check()
|
|
|
+ * to prevent lockdep warnings.
|
|
|
+ */
|
|
|
+ tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
|
|
|
struct task_group, css);
|
|
|
tg = autogroup_task_group(tsk, tg);
|
|
|
tsk->sched_task_group = tg;
|