|
@@ -186,22 +186,24 @@ static inline void gov_cancel_timers(struct cpufreq_policy *policy)
|
|
|
|
|
|
void gov_cancel_work(struct cpu_common_dbs_info *shared)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
+ /* Tell dbs_timer_handler() to skip queuing up work items. */
|
|
|
+ atomic_inc(&shared->skip_work);
|
|
|
/*
|
|
|
- * No work will be queued from timer handlers after skip_work is
|
|
|
- * updated. And so we can safely cancel the work first and then the
|
|
|
- * timers.
|
|
|
+ * If dbs_timer_handler() is already running, it may not notice the
|
|
|
+ * incremented skip_work, so wait for it to complete to prevent its work
|
|
|
+ * item from being queued up after the cancel_work_sync() below.
|
|
|
+ */
|
|
|
+ gov_cancel_timers(shared->policy);
|
|
|
+ /*
|
|
|
+ * In case dbs_timer_handler() managed to run and spawn a work item
|
|
|
+ * before the timers have been canceled, wait for that work item to
|
|
|
+ * complete and then cancel all of the timers set up by it. If
|
|
|
+ * dbs_timer_handler() runs again at that point, it will see the
|
|
|
+ * positive value of skip_work and won't spawn any more work items.
|
|
|
*/
|
|
|
- spin_lock_irqsave(&shared->timer_lock, flags);
|
|
|
- shared->skip_work++;
|
|
|
- spin_unlock_irqrestore(&shared->timer_lock, flags);
|
|
|
-
|
|
|
cancel_work_sync(&shared->work);
|
|
|
-
|
|
|
gov_cancel_timers(shared->policy);
|
|
|
-
|
|
|
- shared->skip_work = 0;
|
|
|
+ atomic_set(&shared->skip_work, 0);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(gov_cancel_work);
|
|
|
|
|
@@ -230,7 +232,6 @@ static void dbs_work_handler(struct work_struct *work)
|
|
|
struct cpufreq_policy *policy;
|
|
|
struct dbs_data *dbs_data;
|
|
|
unsigned int sampling_rate, delay;
|
|
|
- unsigned long flags;
|
|
|
bool eval_load;
|
|
|
|
|
|
policy = shared->policy;
|
|
@@ -259,9 +260,7 @@ static void dbs_work_handler(struct work_struct *work)
|
|
|
delay = dbs_data->cdata->gov_dbs_timer(policy, eval_load);
|
|
|
mutex_unlock(&shared->timer_mutex);
|
|
|
|
|
|
- spin_lock_irqsave(&shared->timer_lock, flags);
|
|
|
- shared->skip_work--;
|
|
|
- spin_unlock_irqrestore(&shared->timer_lock, flags);
|
|
|
+ atomic_dec(&shared->skip_work);
|
|
|
|
|
|
gov_add_timers(policy, delay);
|
|
|
}
|
|
@@ -270,22 +269,18 @@ static void dbs_timer_handler(unsigned long data)
|
|
|
{
|
|
|
struct cpu_dbs_info *cdbs = (struct cpu_dbs_info *)data;
|
|
|
struct cpu_common_dbs_info *shared = cdbs->shared;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- spin_lock_irqsave(&shared->timer_lock, flags);
|
|
|
|
|
|
/*
|
|
|
- * Timer handler isn't allowed to queue work at the moment, because:
|
|
|
+ * Timer handler may not be allowed to queue the work at the moment,
|
|
|
+ * because:
|
|
|
* - Another timer handler has done that
|
|
|
* - We are stopping the governor
|
|
|
- * - Or we are updating the sampling rate of ondemand governor
|
|
|
+ * - Or we are updating the sampling rate of the ondemand governor
|
|
|
*/
|
|
|
- if (!shared->skip_work) {
|
|
|
- shared->skip_work++;
|
|
|
+ if (atomic_inc_return(&shared->skip_work) > 1)
|
|
|
+ atomic_dec(&shared->skip_work);
|
|
|
+ else
|
|
|
queue_work(system_wq, &shared->work);
|
|
|
- }
|
|
|
-
|
|
|
- spin_unlock_irqrestore(&shared->timer_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void set_sampling_rate(struct dbs_data *dbs_data,
|
|
@@ -316,7 +311,7 @@ static int alloc_common_dbs_info(struct cpufreq_policy *policy,
|
|
|
cdata->get_cpu_cdbs(j)->shared = shared;
|
|
|
|
|
|
mutex_init(&shared->timer_mutex);
|
|
|
- spin_lock_init(&shared->timer_lock);
|
|
|
+ atomic_set(&shared->skip_work, 0);
|
|
|
INIT_WORK(&shared->work, dbs_work_handler);
|
|
|
return 0;
|
|
|
}
|