|
@@ -120,6 +120,19 @@ int tick_is_broadcast_device(struct clock_event_device *dev)
|
|
return (dev && tick_broadcast_device.evtdev == dev);
|
|
return (dev && tick_broadcast_device.evtdev == dev);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
|
|
|
|
+{
|
|
|
|
+ int ret = -ENODEV;
|
|
|
|
+
|
|
|
|
+ if (tick_is_broadcast_device(dev)) {
|
|
|
|
+ raw_spin_lock(&tick_broadcast_lock);
|
|
|
|
+ ret = __clockevents_update_freq(dev, freq);
|
|
|
|
+ raw_spin_unlock(&tick_broadcast_lock);
|
|
|
|
+ }
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
static void err_broadcast(const struct cpumask *mask)
|
|
static void err_broadcast(const struct cpumask *mask)
|
|
{
|
|
{
|
|
pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
|
|
pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
|
|
@@ -272,12 +285,8 @@ static void tick_do_broadcast(struct cpumask *mask)
|
|
*/
|
|
*/
|
|
static void tick_do_periodic_broadcast(void)
|
|
static void tick_do_periodic_broadcast(void)
|
|
{
|
|
{
|
|
- raw_spin_lock(&tick_broadcast_lock);
|
|
|
|
-
|
|
|
|
cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
|
|
cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
|
|
tick_do_broadcast(tmpmask);
|
|
tick_do_broadcast(tmpmask);
|
|
-
|
|
|
|
- raw_spin_unlock(&tick_broadcast_lock);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -287,13 +296,15 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
|
{
|
|
{
|
|
ktime_t next;
|
|
ktime_t next;
|
|
|
|
|
|
|
|
+ raw_spin_lock(&tick_broadcast_lock);
|
|
|
|
+
|
|
tick_do_periodic_broadcast();
|
|
tick_do_periodic_broadcast();
|
|
|
|
|
|
/*
|
|
/*
|
|
* The device is in periodic mode. No reprogramming necessary:
|
|
* The device is in periodic mode. No reprogramming necessary:
|
|
*/
|
|
*/
|
|
if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
|
|
if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
|
|
- return;
|
|
|
|
|
|
+ goto unlock;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Setup the next period for devices, which do not have
|
|
* Setup the next period for devices, which do not have
|
|
@@ -306,9 +317,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
|
next = ktime_add(next, tick_period);
|
|
next = ktime_add(next, tick_period);
|
|
|
|
|
|
if (!clockevents_program_event(dev, next, false))
|
|
if (!clockevents_program_event(dev, next, false))
|
|
- return;
|
|
|
|
|
|
+ goto unlock;
|
|
tick_do_periodic_broadcast();
|
|
tick_do_periodic_broadcast();
|
|
}
|
|
}
|
|
|
|
+unlock:
|
|
|
|
+ raw_spin_unlock(&tick_broadcast_lock);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -630,24 +643,61 @@ again:
|
|
raw_spin_unlock(&tick_broadcast_lock);
|
|
raw_spin_unlock(&tick_broadcast_lock);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
|
|
|
|
+{
|
|
|
|
+ if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
|
|
|
|
+ return 0;
|
|
|
|
+ if (bc->next_event.tv64 == KTIME_MAX)
|
|
|
|
+ return 0;
|
|
|
|
+ return bc->bound_on == cpu ? -EBUSY : 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void broadcast_shutdown_local(struct clock_event_device *bc,
|
|
|
|
+ struct clock_event_device *dev)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * For hrtimer based broadcasting we cannot shutdown the cpu
|
|
|
|
+ * local device if our own event is the first one to expire or
|
|
|
|
+ * if we own the broadcast timer.
|
|
|
|
+ */
|
|
|
|
+ if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
|
|
|
|
+ if (broadcast_needs_cpu(bc, smp_processor_id()))
|
|
|
|
+ return;
|
|
|
|
+ if (dev->next_event.tv64 < bc->next_event.tv64)
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void broadcast_move_bc(int deadcpu)
|
|
|
|
+{
|
|
|
|
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
|
|
|
+
|
|
|
|
+ if (!bc || !broadcast_needs_cpu(bc, deadcpu))
|
|
|
|
+ return;
|
|
|
|
+ /* This moves the broadcast assignment to this cpu */
|
|
|
|
+ clockevents_program_event(bc, bc->next_event, 1);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Powerstate information: The system enters/leaves a state, where
|
|
* Powerstate information: The system enters/leaves a state, where
|
|
* affected devices might stop
|
|
* affected devices might stop
|
|
|
|
+ * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
|
|
*/
|
|
*/
|
|
-void tick_broadcast_oneshot_control(unsigned long reason)
|
|
|
|
|
|
+int tick_broadcast_oneshot_control(unsigned long reason)
|
|
{
|
|
{
|
|
struct clock_event_device *bc, *dev;
|
|
struct clock_event_device *bc, *dev;
|
|
struct tick_device *td;
|
|
struct tick_device *td;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
ktime_t now;
|
|
ktime_t now;
|
|
- int cpu;
|
|
|
|
|
|
+ int cpu, ret = 0;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Periodic mode does not care about the enter/exit of power
|
|
* Periodic mode does not care about the enter/exit of power
|
|
* states
|
|
* states
|
|
*/
|
|
*/
|
|
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
|
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
|
- return;
|
|
|
|
|
|
+ return 0;
|
|
|
|
|
|
/*
|
|
/*
|
|
* We are called with preemtion disabled from the depth of the
|
|
* We are called with preemtion disabled from the depth of the
|
|
@@ -658,7 +708,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
|
dev = td->evtdev;
|
|
dev = td->evtdev;
|
|
|
|
|
|
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
|
|
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
|
|
- return;
|
|
|
|
|
|
+ return 0;
|
|
|
|
|
|
bc = tick_broadcast_device.evtdev;
|
|
bc = tick_broadcast_device.evtdev;
|
|
|
|
|
|
@@ -666,7 +716,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
|
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
|
|
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
|
|
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
|
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
|
WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
|
|
WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
|
|
- clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
|
|
|
|
|
|
+ broadcast_shutdown_local(bc, dev);
|
|
/*
|
|
/*
|
|
* We only reprogram the broadcast timer if we
|
|
* We only reprogram the broadcast timer if we
|
|
* did not mark ourself in the force mask and
|
|
* did not mark ourself in the force mask and
|
|
@@ -679,6 +729,16 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
|
dev->next_event.tv64 < bc->next_event.tv64)
|
|
dev->next_event.tv64 < bc->next_event.tv64)
|
|
tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
|
|
tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
|
|
}
|
|
}
|
|
|
|
+ /*
|
|
|
|
+ * If the current CPU owns the hrtimer broadcast
|
|
|
|
+ * mechanism, it cannot go deep idle and we remove the
|
|
|
|
+ * CPU from the broadcast mask. We don't have to go
|
|
|
|
+ * through the EXIT path as the local timer is not
|
|
|
|
+ * shutdown.
|
|
|
|
+ */
|
|
|
|
+ ret = broadcast_needs_cpu(bc, cpu);
|
|
|
|
+ if (ret)
|
|
|
|
+ cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
|
|
} else {
|
|
} else {
|
|
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
|
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
|
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
|
|
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
|
|
@@ -746,6 +806,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
|
}
|
|
}
|
|
out:
|
|
out:
|
|
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
|
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -852,6 +913,8 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
|
|
cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
|
|
cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
|
|
cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
|
|
cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
|
|
|
|
|
|
|
|
+ broadcast_move_bc(cpu);
|
|
|
|
+
|
|
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
|
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
|
}
|
|
}
|
|
|
|
|