浏览代码

Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timekeeping fixes from Thomas Gleixner:
 "Two fixes for timekeeping:

   - Revert to the previous kthread based update, which is unfortunately
     required due to lock ordering issues. The removal caused boot
     failures on old Core2 machines. Add a proper comment why the thread
     needs to stay to prevent accidental removal in the future.

   - Fix a silly typo in a function declaration"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  clocksource: Revert "Remove kthread"
  timekeeping: Fix declaration of read_persistent_wall_and_boot_offset()
Linus Torvalds 7 年之前
父节点
当前提交
3567994a05
共有 2 个文件被更改,包括 32 次插入12 次删除
  1. 2 2
      include/linux/timekeeping.h
  2. 30 10
      kernel/time/clocksource.c

+ 2 - 2
include/linux/timekeeping.h

@@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
 extern int persistent_clock_is_local;
 extern int persistent_clock_is_local;
 
 
 extern void read_persistent_clock64(struct timespec64 *ts);
 extern void read_persistent_clock64(struct timespec64 *ts);
-void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
-					   struct timespec64 *boot_offset);
+void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
+					  struct timespec64 *boot_offset);
 extern int update_persistent_clock64(struct timespec64 now);
 extern int update_persistent_clock64(struct timespec64 now);
 
 
 /*
 /*

+ 30 - 10
kernel/time/clocksource.c

@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
 	spin_unlock_irqrestore(&watchdog_lock, *flags);
 	spin_unlock_irqrestore(&watchdog_lock, *flags);
 }
 }
 
 
+static int clocksource_watchdog_kthread(void *data);
+static void __clocksource_change_rating(struct clocksource *cs, int rating);
+
 /*
 /*
  * Interval: 0.5sec Threshold: 0.0625s
  * Interval: 0.5sec Threshold: 0.0625s
  */
  */
 #define WATCHDOG_INTERVAL (HZ >> 1)
 #define WATCHDOG_INTERVAL (HZ >> 1)
 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
 
 
+static void clocksource_watchdog_work(struct work_struct *work)
+{
+	/*
+	 * We cannot directly run clocksource_watchdog_kthread() here, because
+	 * clocksource_select() calls timekeeping_notify() which uses
+	 * stop_machine(). One cannot use stop_machine() from a workqueue() due
+	 * lock inversions wrt CPU hotplug.
+	 *
+	 * Also, we only ever run this work once or twice during the lifetime
+	 * of the kernel, so there is no point in creating a more permanent
+	 * kthread for this.
+	 *
+	 * If kthread_run fails the next watchdog scan over the
+	 * watchdog_list will find the unstable clock again.
+	 */
+	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
+}
+
 static void __clocksource_unstable(struct clocksource *cs)
 static void __clocksource_unstable(struct clocksource *cs)
 {
 {
 	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
 	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
 	cs->flags |= CLOCK_SOURCE_UNSTABLE;
 	cs->flags |= CLOCK_SOURCE_UNSTABLE;
 
 
 	/*
 	/*
-	 * If the clocksource is registered clocksource_watchdog_work() will
+	 * If the clocksource is registered clocksource_watchdog_kthread() will
 	 * re-rate and re-select.
 	 * re-rate and re-select.
 	 */
 	 */
 	if (list_empty(&cs->list)) {
 	if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
 	if (cs->mark_unstable)
 	if (cs->mark_unstable)
 		cs->mark_unstable(cs);
 		cs->mark_unstable(cs);
 
 
-	/* kick clocksource_watchdog_work() */
+	/* kick clocksource_watchdog_kthread() */
 	if (finished_booting)
 	if (finished_booting)
 		schedule_work(&watchdog_work);
 		schedule_work(&watchdog_work);
 }
 }
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
  * @cs:		clocksource to be marked unstable
  * @cs:		clocksource to be marked unstable
  *
  *
  * This function is called by the x86 TSC code to mark clocksources as unstable;
  * This function is called by the x86 TSC code to mark clocksources as unstable;
- * it defers demotion and re-selection to a work.
+ * it defers demotion and re-selection to a kthread.
  */
  */
 void clocksource_mark_unstable(struct clocksource *cs)
 void clocksource_mark_unstable(struct clocksource *cs)
 {
 {
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
 	}
 	}
 }
 }
 
 
-static void __clocksource_change_rating(struct clocksource *cs, int rating);
-
-static int __clocksource_watchdog_work(void)
+static int __clocksource_watchdog_kthread(void)
 {
 {
 	struct clocksource *cs, *tmp;
 	struct clocksource *cs, *tmp;
 	unsigned long flags;
 	unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
 	return select;
 	return select;
 }
 }
 
 
-static void clocksource_watchdog_work(struct work_struct *work)
+static int clocksource_watchdog_kthread(void *data)
 {
 {
 	mutex_lock(&clocksource_mutex);
 	mutex_lock(&clocksource_mutex);
-	if (__clocksource_watchdog_work())
+	if (__clocksource_watchdog_kthread())
 		clocksource_select();
 		clocksource_select();
 	mutex_unlock(&clocksource_mutex);
 	mutex_unlock(&clocksource_mutex);
+	return 0;
 }
 }
 
 
 static bool clocksource_is_watchdog(struct clocksource *cs)
 static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
 static void clocksource_select_watchdog(bool fallback) { }
 static void clocksource_select_watchdog(bool fallback) { }
 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 static inline void clocksource_resume_watchdog(void) { }
 static inline void clocksource_resume_watchdog(void) { }
-static inline int __clocksource_watchdog_work(void) { return 0; }
+static inline int __clocksource_watchdog_kthread(void) { return 0; }
 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
 void clocksource_mark_unstable(struct clocksource *cs) { }
 void clocksource_mark_unstable(struct clocksource *cs) { }
 
 
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
 	/*
 	/*
 	 * Run the watchdog first to eliminate unstable clock sources
 	 * Run the watchdog first to eliminate unstable clock sources
 	 */
 	 */
-	__clocksource_watchdog_work();
+	__clocksource_watchdog_kthread();
 	clocksource_select();
 	clocksource_select();
 	mutex_unlock(&clocksource_mutex);
 	mutex_unlock(&clocksource_mutex);
 	return 0;
 	return 0;