|
@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
|
|
|
static int watchdog_running;
|
|
|
static atomic_t watchdog_reset_pending;
|
|
|
|
|
|
+static void inline clocksource_watchdog_lock(unsigned long *flags)
|
|
|
+{
|
|
|
+ spin_lock_irqsave(&watchdog_lock, *flags);
|
|
|
+}
|
|
|
+
|
|
|
+static void inline clocksource_watchdog_unlock(unsigned long *flags)
|
|
|
+{
|
|
|
+ spin_unlock_irqrestore(&watchdog_lock, *flags);
|
|
|
+}
|
|
|
+
|
|
|
static int clocksource_watchdog_kthread(void *data);
|
|
|
static void __clocksource_change_rating(struct clocksource *cs, int rating);
|
|
|
|
|
@@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs)
|
|
|
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
|
|
|
cs->flags |= CLOCK_SOURCE_UNSTABLE;
|
|
|
|
|
|
+ /*
|
|
|
+ * If the clocksource is registered clocksource_watchdog_kthread() will
|
|
|
+ * re-rate and re-select.
|
|
|
+ */
|
|
|
+ if (list_empty(&cs->list)) {
|
|
|
+ cs->rating = 0;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
if (cs->mark_unstable)
|
|
|
cs->mark_unstable(cs);
|
|
|
|
|
|
+ /* kick clocksource_watchdog_kthread() */
|
|
|
if (finished_booting)
|
|
|
schedule_work(&watchdog_work);
|
|
|
}
|
|
@@ -153,10 +173,8 @@ static void __clocksource_unstable(struct clocksource *cs)
|
|
|
* clocksource_mark_unstable - mark clocksource unstable via watchdog
|
|
|
* @cs: clocksource to be marked unstable
|
|
|
*
|
|
|
- * This function is called instead of clocksource_change_rating from
|
|
|
- * cpu hotplug code to avoid a deadlock between the clocksource mutex
|
|
|
- * and the cpu hotplug mutex. It defers the update of the clocksource
|
|
|
- * to the watchdog thread.
|
|
|
+ * This function is called by the x86 TSC code to mark clocksources as unstable;
|
|
|
+ * it defers demotion and re-selection to a kthread.
|
|
|
*/
|
|
|
void clocksource_mark_unstable(struct clocksource *cs)
|
|
|
{
|
|
@@ -164,7 +182,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
|
|
|
|
|
|
spin_lock_irqsave(&watchdog_lock, flags);
|
|
|
if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
|
|
|
- if (list_empty(&cs->wd_list))
|
|
|
+ if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
|
|
|
list_add(&cs->wd_list, &watchdog_list);
|
|
|
__clocksource_unstable(cs);
|
|
|
}
|
|
@@ -319,9 +337,8 @@ static void clocksource_resume_watchdog(void)
|
|
|
|
|
|
static void clocksource_enqueue_watchdog(struct clocksource *cs)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
+ INIT_LIST_HEAD(&cs->wd_list);
|
|
|
|
|
|
- spin_lock_irqsave(&watchdog_lock, flags);
|
|
|
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
|
|
|
/* cs is a clocksource to be watched. */
|
|
|
list_add(&cs->wd_list, &watchdog_list);
|
|
@@ -331,7 +348,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
|
|
|
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
|
|
|
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
|
|
|
}
|
|
|
- spin_unlock_irqrestore(&watchdog_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void clocksource_select_watchdog(bool fallback)
|
|
@@ -373,9 +389,6 @@ static void clocksource_select_watchdog(bool fallback)
|
|
|
|
|
|
static void clocksource_dequeue_watchdog(struct clocksource *cs)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- spin_lock_irqsave(&watchdog_lock, flags);
|
|
|
if (cs != watchdog) {
|
|
|
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
|
|
|
/* cs is a watched clocksource. */
|
|
@@ -384,21 +397,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
|
|
|
clocksource_stop_watchdog();
|
|
|
}
|
|
|
}
|
|
|
- spin_unlock_irqrestore(&watchdog_lock, flags);
|
|
|
}
|
|
|
|
|
|
static int __clocksource_watchdog_kthread(void)
|
|
|
{
|
|
|
struct clocksource *cs, *tmp;
|
|
|
unsigned long flags;
|
|
|
- LIST_HEAD(unstable);
|
|
|
int select = 0;
|
|
|
|
|
|
spin_lock_irqsave(&watchdog_lock, flags);
|
|
|
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
|
|
|
if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
|
|
|
list_del_init(&cs->wd_list);
|
|
|
- list_add(&cs->wd_list, &unstable);
|
|
|
+ __clocksource_change_rating(cs, 0);
|
|
|
select = 1;
|
|
|
}
|
|
|
if (cs->flags & CLOCK_SOURCE_RESELECT) {
|
|
@@ -410,11 +421,6 @@ static int __clocksource_watchdog_kthread(void)
|
|
|
clocksource_stop_watchdog();
|
|
|
spin_unlock_irqrestore(&watchdog_lock, flags);
|
|
|
|
|
|
- /* Needs to be done outside of watchdog lock */
|
|
|
- list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
|
|
|
- list_del_init(&cs->wd_list);
|
|
|
- __clocksource_change_rating(cs, 0);
|
|
|
- }
|
|
|
return select;
|
|
|
}
|
|
|
|
|
@@ -447,6 +453,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
|
|
|
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
|
|
|
void clocksource_mark_unstable(struct clocksource *cs) { }
|
|
|
|
|
|
+static void inline clocksource_watchdog_lock(unsigned long *flags) { }
|
|
|
+static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
|
|
|
+
|
|
|
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
|
|
|
|
|
|
/**
|
|
@@ -779,14 +788,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
|
|
|
*/
|
|
|
int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
/* Initialize mult/shift and max_idle_ns */
|
|
|
__clocksource_update_freq_scale(cs, scale, freq);
|
|
|
|
|
|
/* Add clocksource to the clocksource list */
|
|
|
mutex_lock(&clocksource_mutex);
|
|
|
+
|
|
|
+ clocksource_watchdog_lock(&flags);
|
|
|
clocksource_enqueue(cs);
|
|
|
clocksource_enqueue_watchdog(cs);
|
|
|
+ clocksource_watchdog_unlock(&flags);
|
|
|
+
|
|
|
clocksource_select();
|
|
|
clocksource_select_watchdog(false);
|
|
|
mutex_unlock(&clocksource_mutex);
|
|
@@ -808,8 +822,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
|
|
|
*/
|
|
|
void clocksource_change_rating(struct clocksource *cs, int rating)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
mutex_lock(&clocksource_mutex);
|
|
|
+ clocksource_watchdog_lock(&flags);
|
|
|
__clocksource_change_rating(cs, rating);
|
|
|
+ clocksource_watchdog_unlock(&flags);
|
|
|
+
|
|
|
clocksource_select();
|
|
|
clocksource_select_watchdog(false);
|
|
|
mutex_unlock(&clocksource_mutex);
|
|
@@ -821,6 +840,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
|
|
|
*/
|
|
|
static int clocksource_unbind(struct clocksource *cs)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
if (clocksource_is_watchdog(cs)) {
|
|
|
/* Select and try to install a replacement watchdog. */
|
|
|
clocksource_select_watchdog(true);
|
|
@@ -834,8 +855,12 @@ static int clocksource_unbind(struct clocksource *cs)
|
|
|
if (curr_clocksource == cs)
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
+
|
|
|
+ clocksource_watchdog_lock(&flags);
|
|
|
clocksource_dequeue_watchdog(cs);
|
|
|
list_del_init(&cs->list);
|
|
|
+ clocksource_watchdog_unlock(&flags);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|