|
@@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|
|
|
|
|
struct cpuidle_state *target_state = &drv->states[index];
|
|
|
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
|
|
|
- u64 time_start, time_end;
|
|
|
+ ktime_t time_start, time_end;
|
|
|
s64 diff;
|
|
|
|
|
|
/*
|
|
@@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|
|
sched_idle_set_state(target_state);
|
|
|
|
|
|
trace_cpu_idle_rcuidle(index, dev->cpu);
|
|
|
- time_start = local_clock();
|
|
|
+ time_start = ns_to_ktime(local_clock());
|
|
|
|
|
|
stop_critical_timings();
|
|
|
entered_state = target_state->enter(dev, drv, index);
|
|
|
start_critical_timings();
|
|
|
|
|
|
- time_end = local_clock();
|
|
|
+ time_end = ns_to_ktime(local_clock());
|
|
|
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
|
|
|
|
|
/* The cpu is no longer idle or about to enter idle. */
|
|
@@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|
|
if (!cpuidle_state_is_coupled(drv, index))
|
|
|
local_irq_enable();
|
|
|
|
|
|
- /*
|
|
|
- * local_clock() returns the time in nanosecond, let's shift
|
|
|
- * by 10 (divide by 1024) to have microsecond based time.
|
|
|
- */
|
|
|
- diff = (time_end - time_start) >> 10;
|
|
|
+ diff = ktime_us_delta(time_end, time_start);
|
|
|
if (diff > INT_MAX)
|
|
|
diff = INT_MAX;
|
|
|
|