|
@@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
|
|
|
* In theory, the compile should just see 0 here, and optimize out the call
|
|
|
* to sched_rt_avg_update. But I don't trust it...
|
|
|
*/
|
|
|
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
|
|
- s64 steal = 0, irq_delta = 0;
|
|
|
-#endif
|
|
|
+ s64 __maybe_unused steal = 0, irq_delta = 0;
|
|
|
+
|
|
|
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
|
|
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
|
|
|
|
|
@@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
|
|
|
|
|
|
rq->clock_task += delta;
|
|
|
|
|
|
-#ifdef HAVE_SCHED_AVG_IRQ
|
|
|
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
|
|
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
|
|
|
update_irq_load_avg(rq, irq_delta + steal);
|
|
|
#endif
|