|
@@ -120,6 +120,19 @@ int tick_is_broadcast_device(struct clock_event_device *dev)
|
|
|
return (dev && tick_broadcast_device.evtdev == dev);
|
|
|
}
|
|
|
|
|
|
+int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
|
|
|
+{
|
|
|
+ int ret = -ENODEV;
|
|
|
+
|
|
|
+ if (tick_is_broadcast_device(dev)) {
|
|
|
+ raw_spin_lock(&tick_broadcast_lock);
|
|
|
+ ret = __clockevents_update_freq(dev, freq);
|
|
|
+ raw_spin_unlock(&tick_broadcast_lock);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static void err_broadcast(const struct cpumask *mask)
|
|
|
{
|
|
|
pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
|
|
@@ -272,12 +285,8 @@ static void tick_do_broadcast(struct cpumask *mask)
|
|
|
*/
|
|
|
static void tick_do_periodic_broadcast(void)
|
|
|
{
|
|
|
- raw_spin_lock(&tick_broadcast_lock);
|
|
|
-
|
|
|
cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
|
|
|
tick_do_broadcast(tmpmask);
|
|
|
-
|
|
|
- raw_spin_unlock(&tick_broadcast_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -287,13 +296,15 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
|
|
{
|
|
|
ktime_t next;
|
|
|
|
|
|
+ raw_spin_lock(&tick_broadcast_lock);
|
|
|
+
|
|
|
tick_do_periodic_broadcast();
|
|
|
|
|
|
/*
|
|
|
* The device is in periodic mode. No reprogramming necessary:
|
|
|
*/
|
|
|
if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
|
|
|
- return;
|
|
|
+ goto unlock;
|
|
|
|
|
|
/*
|
|
|
* Setup the next period for devices, which do not have
|
|
@@ -306,9 +317,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
|
|
next = ktime_add(next, tick_period);
|
|
|
|
|
|
if (!clockevents_program_event(dev, next, false))
|
|
|
- return;
|
|
|
+ goto unlock;
|
|
|
tick_do_periodic_broadcast();
|
|
|
}
|
|
|
+unlock:
|
|
|
+ raw_spin_unlock(&tick_broadcast_lock);
|
|
|
}
|
|
|
|
|
|
/*
|