cpufreq: governor: Use lockless timer function
It is possible to get rid of the timer_lock spinlock used by the governor timer function for synchronization, but a couple of races need to be avoided. The first race is between multiple dbs_timer_handler() instances that may be running in parallel with each other on different CPUs. Namely, one of them has to queue up the work item, but it cannot be queued up more than once. To achieve that, atomic_inc_return() can be used on the skip_work field of struct cpu_common_dbs_info. The second race is between an already running dbs_timer_handler() and gov_cancel_work(). In that case the dbs_timer_handler() might not notice the skip_work incrementation in gov_cancel_work() and it might queue up its work item after gov_cancel_work() had returned (and that work item would corrupt skip_work going forward). To prevent that from happening, gov_cancel_work() can be made wait for the timer function to complete (on all CPUs) right after skip_work has been incremented. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
This commit is contained in:
parent
f08f638b9c
commit
2dd3e724b4
|
@ -186,22 +186,24 @@ static inline void gov_cancel_timers(struct cpufreq_policy *policy)
|
|||
|
||||
void gov_cancel_work(struct cpu_common_dbs_info *shared)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* Tell dbs_timer_handler() to skip queuing up work items. */
|
||||
atomic_inc(&shared->skip_work);
|
||||
/*
|
||||
* No work will be queued from timer handlers after skip_work is
|
||||
* updated. And so we can safely cancel the work first and then the
|
||||
* timers.
|
||||
* If dbs_timer_handler() is already running, it may not notice the
|
||||
* incremented skip_work, so wait for it to complete to prevent its work
|
||||
* item from being queued up after the cancel_work_sync() below.
|
||||
*/
|
||||
spin_lock_irqsave(&shared->timer_lock, flags);
|
||||
shared->skip_work++;
|
||||
spin_unlock_irqrestore(&shared->timer_lock, flags);
|
||||
|
||||
cancel_work_sync(&shared->work);
|
||||
|
||||
gov_cancel_timers(shared->policy);
|
||||
|
||||
shared->skip_work = 0;
|
||||
/*
|
||||
* In case dbs_timer_handler() managed to run and spawn a work item
|
||||
* before the timers have been canceled, wait for that work item to
|
||||
* complete and then cancel all of the timers set up by it. If
|
||||
* dbs_timer_handler() runs again at that point, it will see the
|
||||
* positive value of skip_work and won't spawn any more work items.
|
||||
*/
|
||||
cancel_work_sync(&shared->work);
|
||||
gov_cancel_timers(shared->policy);
|
||||
atomic_set(&shared->skip_work, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gov_cancel_work);
|
||||
|
||||
|
@ -230,7 +232,6 @@ static void dbs_work_handler(struct work_struct *work)
|
|||
struct cpufreq_policy *policy;
|
||||
struct dbs_data *dbs_data;
|
||||
unsigned int sampling_rate, delay;
|
||||
unsigned long flags;
|
||||
bool eval_load;
|
||||
|
||||
policy = shared->policy;
|
||||
|
@ -259,9 +260,7 @@ static void dbs_work_handler(struct work_struct *work)
|
|||
delay = dbs_data->cdata->gov_dbs_timer(policy, eval_load);
|
||||
mutex_unlock(&shared->timer_mutex);
|
||||
|
||||
spin_lock_irqsave(&shared->timer_lock, flags);
|
||||
shared->skip_work--;
|
||||
spin_unlock_irqrestore(&shared->timer_lock, flags);
|
||||
atomic_dec(&shared->skip_work);
|
||||
|
||||
gov_add_timers(policy, delay);
|
||||
}
|
||||
|
@ -270,24 +269,20 @@ static void dbs_timer_handler(unsigned long data)
|
|||
{
|
||||
struct cpu_dbs_info *cdbs = (struct cpu_dbs_info *)data;
|
||||
struct cpu_common_dbs_info *shared = cdbs->shared;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&shared->timer_lock, flags);
|
||||
|
||||
/*
|
||||
* Timer handler isn't allowed to queue work at the moment, because:
|
||||
* Timer handler may not be allowed to queue the work at the moment,
|
||||
* because:
|
||||
* - Another timer handler has done that
|
||||
* - We are stopping the governor
|
||||
* - Or we are updating the sampling rate of ondemand governor
|
||||
* - Or we are updating the sampling rate of the ondemand governor
|
||||
*/
|
||||
if (!shared->skip_work) {
|
||||
shared->skip_work++;
|
||||
if (atomic_inc_return(&shared->skip_work) > 1)
|
||||
atomic_dec(&shared->skip_work);
|
||||
else
|
||||
queue_work(system_wq, &shared->work);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&shared->timer_lock, flags);
|
||||
}
|
||||
|
||||
static void set_sampling_rate(struct dbs_data *dbs_data,
|
||||
unsigned int sampling_rate)
|
||||
{
|
||||
|
@ -316,7 +311,7 @@ static int alloc_common_dbs_info(struct cpufreq_policy *policy,
|
|||
cdata->get_cpu_cdbs(j)->shared = shared;
|
||||
|
||||
mutex_init(&shared->timer_mutex);
|
||||
spin_lock_init(&shared->timer_lock);
|
||||
atomic_set(&shared->skip_work, 0);
|
||||
INIT_WORK(&shared->work, dbs_work_handler);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#ifndef _CPUFREQ_GOVERNOR_H
|
||||
#define _CPUFREQ_GOVERNOR_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -137,14 +138,8 @@ struct cpu_common_dbs_info {
|
|||
*/
|
||||
struct mutex timer_mutex;
|
||||
|
||||
/*
|
||||
* Per policy lock that serializes access to queuing work from timer
|
||||
* handlers.
|
||||
*/
|
||||
spinlock_t timer_lock;
|
||||
|
||||
ktime_t time_stamp;
|
||||
unsigned int skip_work;
|
||||
atomic_t skip_work;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue