|
@@ -92,24 +92,25 @@ static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
|
|
/* Functions below help us manage 'deferrable' flag */
|
|
/* Functions below help us manage 'deferrable' flag */
|
|
static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
|
|
static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
|
|
{
|
|
{
|
|
- return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
|
|
|
|
|
|
+ return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
|
|
|
|
|
|
+static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
|
|
{
|
|
{
|
|
- return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
|
|
|
|
|
|
+ return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void timer_set_deferrable(struct timer_list *timer)
|
|
|
|
|
|
+static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
|
|
{
|
|
{
|
|
- timer->base = TBASE_MAKE_DEFERRED(timer->base);
|
|
|
|
|
|
+ return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
|
|
}
|
|
}
|
|
|
|
|
|
static inline void
|
|
static inline void
|
|
timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
|
|
timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
|
|
{
|
|
{
|
|
- timer->base = (struct tvec_base *)((unsigned long)(new_base) |
|
|
|
|
- tbase_get_deferrable(timer->base));
|
|
|
|
|
|
+ unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
|
|
|
|
+
|
|
|
|
+ timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
|
|
}
|
|
}
|
|
|
|
|
|
static unsigned long round_jiffies_common(unsigned long j, int cpu,
|
|
static unsigned long round_jiffies_common(unsigned long j, int cpu,
|
|
@@ -563,16 +564,14 @@ static inline void debug_timer_assert_init(struct timer_list *timer)
|
|
debug_object_assert_init(timer, &timer_debug_descr);
|
|
debug_object_assert_init(timer, &timer_debug_descr);
|
|
}
|
|
}
|
|
|
|
|
|
-static void __init_timer(struct timer_list *timer,
|
|
|
|
- const char *name,
|
|
|
|
- struct lock_class_key *key);
|
|
|
|
|
|
+static void do_init_timer(struct timer_list *timer, unsigned int flags,
|
|
|
|
+ const char *name, struct lock_class_key *key);
|
|
|
|
|
|
-void init_timer_on_stack_key(struct timer_list *timer,
|
|
|
|
- const char *name,
|
|
|
|
- struct lock_class_key *key)
|
|
|
|
|
|
+void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
|
|
|
|
+ const char *name, struct lock_class_key *key)
|
|
{
|
|
{
|
|
debug_object_init_on_stack(timer, &timer_debug_descr);
|
|
debug_object_init_on_stack(timer, &timer_debug_descr);
|
|
- __init_timer(timer, name, key);
|
|
|
|
|
|
+ do_init_timer(timer, flags, name, key);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
|
|
EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
|
|
|
|
|
|
@@ -613,12 +612,13 @@ static inline void debug_assert_init(struct timer_list *timer)
|
|
debug_timer_assert_init(timer);
|
|
debug_timer_assert_init(timer);
|
|
}
|
|
}
|
|
|
|
|
|
-static void __init_timer(struct timer_list *timer,
|
|
|
|
- const char *name,
|
|
|
|
- struct lock_class_key *key)
|
|
|
|
|
|
+static void do_init_timer(struct timer_list *timer, unsigned int flags,
|
|
|
|
+ const char *name, struct lock_class_key *key)
|
|
{
|
|
{
|
|
|
|
+ struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
|
|
|
|
+
|
|
timer->entry.next = NULL;
|
|
timer->entry.next = NULL;
|
|
- timer->base = __raw_get_cpu_var(tvec_bases);
|
|
|
|
|
|
+ timer->base = (void *)((unsigned long)base | flags);
|
|
timer->slack = -1;
|
|
timer->slack = -1;
|
|
#ifdef CONFIG_TIMER_STATS
|
|
#ifdef CONFIG_TIMER_STATS
|
|
timer->start_site = NULL;
|
|
timer->start_site = NULL;
|
|
@@ -628,22 +628,10 @@ static void __init_timer(struct timer_list *timer,
|
|
lockdep_init_map(&timer->lockdep_map, name, key, 0);
|
|
lockdep_init_map(&timer->lockdep_map, name, key, 0);
|
|
}
|
|
}
|
|
|
|
|
|
-void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
|
|
|
|
- const char *name,
|
|
|
|
- struct lock_class_key *key,
|
|
|
|
- void (*function)(unsigned long),
|
|
|
|
- unsigned long data)
|
|
|
|
-{
|
|
|
|
- timer->function = function;
|
|
|
|
- timer->data = data;
|
|
|
|
- init_timer_on_stack_key(timer, name, key);
|
|
|
|
- timer_set_deferrable(timer);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key);
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* init_timer_key - initialize a timer
|
|
* init_timer_key - initialize a timer
|
|
* @timer: the timer to be initialized
|
|
* @timer: the timer to be initialized
|
|
|
|
+ * @flags: timer flags
|
|
* @name: name of the timer
|
|
* @name: name of the timer
|
|
* @key: lockdep class key of the fake lock used for tracking timer
|
|
* @key: lockdep class key of the fake lock used for tracking timer
|
|
* sync lock dependencies
|
|
* sync lock dependencies
|
|
@@ -651,24 +639,14 @@ EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key);
|
|
* init_timer_key() must be done to a timer prior calling *any* of the
|
|
* init_timer_key() must be done to a timer prior calling *any* of the
|
|
* other timer functions.
|
|
* other timer functions.
|
|
*/
|
|
*/
|
|
-void init_timer_key(struct timer_list *timer,
|
|
|
|
- const char *name,
|
|
|
|
- struct lock_class_key *key)
|
|
|
|
|
|
+void init_timer_key(struct timer_list *timer, unsigned int flags,
|
|
|
|
+ const char *name, struct lock_class_key *key)
|
|
{
|
|
{
|
|
debug_init(timer);
|
|
debug_init(timer);
|
|
- __init_timer(timer, name, key);
|
|
|
|
|
|
+ do_init_timer(timer, flags, name, key);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(init_timer_key);
|
|
EXPORT_SYMBOL(init_timer_key);
|
|
|
|
|
|
-void init_timer_deferrable_key(struct timer_list *timer,
|
|
|
|
- const char *name,
|
|
|
|
- struct lock_class_key *key)
|
|
|
|
-{
|
|
|
|
- init_timer_key(timer, name, key);
|
|
|
|
- timer_set_deferrable(timer);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL(init_timer_deferrable_key);
|
|
|
|
-
|
|
|
|
static inline void detach_timer(struct timer_list *timer, bool clear_pending)
|
|
static inline void detach_timer(struct timer_list *timer, bool clear_pending)
|
|
{
|
|
{
|
|
struct list_head *entry = &timer->entry;
|
|
struct list_head *entry = &timer->entry;
|
|
@@ -686,7 +664,7 @@ detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
|
|
{
|
|
{
|
|
detach_timer(timer, true);
|
|
detach_timer(timer, true);
|
|
if (!tbase_get_deferrable(timer->base))
|
|
if (!tbase_get_deferrable(timer->base))
|
|
- timer->base->active_timers--;
|
|
|
|
|
|
+ base->active_timers--;
|
|
}
|
|
}
|
|
|
|
|
|
static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
|
|
static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
|
|
@@ -697,7 +675,7 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
|
|
|
|
|
|
detach_timer(timer, clear_pending);
|
|
detach_timer(timer, clear_pending);
|
|
if (!tbase_get_deferrable(timer->base)) {
|
|
if (!tbase_get_deferrable(timer->base)) {
|
|
- timer->base->active_timers--;
|
|
|
|
|
|
+ base->active_timers--;
|
|
if (timer->expires == base->next_timer)
|
|
if (timer->expires == base->next_timer)
|
|
base->next_timer = base->timer_jiffies;
|
|
base->next_timer = base->timer_jiffies;
|
|
}
|
|
}
|
|
@@ -1029,14 +1007,14 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
|
|
*
|
|
*
|
|
* Synchronization rules: Callers must prevent restarting of the timer,
|
|
* Synchronization rules: Callers must prevent restarting of the timer,
|
|
* otherwise this function is meaningless. It must not be called from
|
|
* otherwise this function is meaningless. It must not be called from
|
|
- * interrupt contexts. The caller must not hold locks which would prevent
|
|
|
|
- * completion of the timer's handler. The timer's handler must not call
|
|
|
|
- * add_timer_on(). Upon exit the timer is not queued and the handler is
|
|
|
|
- * not running on any CPU.
|
|
|
|
|
|
+ * interrupt contexts unless the timer is an irqsafe one. The caller must
|
|
|
|
+ * not hold locks which would prevent completion of the timer's
|
|
|
|
+ * handler. The timer's handler must not call add_timer_on(). Upon exit the
|
|
|
|
+ * timer is not queued and the handler is not running on any CPU.
|
|
*
|
|
*
|
|
- * Note: You must not hold locks that are held in interrupt context
|
|
|
|
- * while calling this function. Even if the lock has nothing to do
|
|
|
|
- * with the timer in question. Here's why:
|
|
|
|
|
|
+ * Note: For !irqsafe timers, you must not hold locks that are held in
|
|
|
|
+ * interrupt context while calling this function. Even if the lock has
|
|
|
|
+ * nothing to do with the timer in question. Here's why:
|
|
*
|
|
*
|
|
* CPU0 CPU1
|
|
* CPU0 CPU1
|
|
* ---- ----
|
|
* ---- ----
|
|
@@ -1073,7 +1051,7 @@ int del_timer_sync(struct timer_list *timer)
|
|
* don't use it in hardirq context, because it
|
|
* don't use it in hardirq context, because it
|
|
* could lead to deadlock.
|
|
* could lead to deadlock.
|
|
*/
|
|
*/
|
|
- WARN_ON(in_irq());
|
|
|
|
|
|
+ WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
|
|
for (;;) {
|
|
for (;;) {
|
|
int ret = try_to_del_timer_sync(timer);
|
|
int ret = try_to_del_timer_sync(timer);
|
|
if (ret >= 0)
|
|
if (ret >= 0)
|
|
@@ -1180,19 +1158,27 @@ static inline void __run_timers(struct tvec_base *base)
|
|
while (!list_empty(head)) {
|
|
while (!list_empty(head)) {
|
|
void (*fn)(unsigned long);
|
|
void (*fn)(unsigned long);
|
|
unsigned long data;
|
|
unsigned long data;
|
|
|
|
+ bool irqsafe;
|
|
|
|
|
|
timer = list_first_entry(head, struct timer_list,entry);
|
|
timer = list_first_entry(head, struct timer_list,entry);
|
|
fn = timer->function;
|
|
fn = timer->function;
|
|
data = timer->data;
|
|
data = timer->data;
|
|
|
|
+ irqsafe = tbase_get_irqsafe(timer->base);
|
|
|
|
|
|
timer_stats_account_timer(timer);
|
|
timer_stats_account_timer(timer);
|
|
|
|
|
|
base->running_timer = timer;
|
|
base->running_timer = timer;
|
|
detach_expired_timer(timer, base);
|
|
detach_expired_timer(timer, base);
|
|
|
|
|
|
- spin_unlock_irq(&base->lock);
|
|
|
|
- call_timer_fn(timer, fn, data);
|
|
|
|
- spin_lock_irq(&base->lock);
|
|
|
|
|
|
+ if (irqsafe) {
|
|
|
|
+ spin_unlock(&base->lock);
|
|
|
|
+ call_timer_fn(timer, fn, data);
|
|
|
|
+ spin_lock(&base->lock);
|
|
|
|
+ } else {
|
|
|
|
+ spin_unlock_irq(&base->lock);
|
|
|
|
+ call_timer_fn(timer, fn, data);
|
|
|
|
+ spin_lock_irq(&base->lock);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
base->running_timer = NULL;
|
|
base->running_timer = NULL;
|
|
@@ -1800,9 +1786,13 @@ static struct notifier_block __cpuinitdata timers_nb = {
|
|
|
|
|
|
void __init init_timers(void)
|
|
void __init init_timers(void)
|
|
{
|
|
{
|
|
- int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
|
|
|
|
- (void *)(long)smp_processor_id());
|
|
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ /* ensure there are enough low bits for flags in timer->base pointer */
|
|
|
|
+ BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
|
|
|
|
|
|
|
|
+ err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
|
|
|
|
+ (void *)(long)smp_processor_id());
|
|
init_timer_stats();
|
|
init_timer_stats();
|
|
|
|
|
|
BUG_ON(err != NOTIFY_OK);
|
|
BUG_ON(err != NOTIFY_OK);
|