mirror of https://gitee.com/openkylin/linux.git
thermal/x86_pkg_temp: Make pkg_temp_lock a raw_spinlock_t
The spinlock pkg_temp_lock has the potential of being taken in atomic context because it can be acquired from the thermal IRQ vector. It's static and limited scope so go ahead and make it a raw spinlock. Signed-off-by: Clark Williams <williams@redhat.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Zhang Rui <rui.zhang@intel.com> Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> Link: https://lore.kernel.org/r/20191008110021.2j44ayunal7fkb7i@linutronix.de
This commit is contained in:
parent
fd96a316d2
commit
afa58b49ac
|
@ -63,7 +63,7 @@ static int max_id __read_mostly;
|
|||
/* Array of zone pointers */
|
||||
static struct zone_device **zones;
|
||||
/* Serializes interrupt notification, work and hotplug */
|
||||
static DEFINE_SPINLOCK(pkg_temp_lock);
|
||||
static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
|
||||
/* Protects zone operation in the work function against hotplug removal */
|
||||
static DEFINE_MUTEX(thermal_zone_mutex);
|
||||
|
||||
|
@ -266,12 +266,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
|
|||
u64 msr_val, wr_val;
|
||||
|
||||
mutex_lock(&thermal_zone_mutex);
|
||||
spin_lock_irq(&pkg_temp_lock);
|
||||
raw_spin_lock_irq(&pkg_temp_lock);
|
||||
++pkg_work_cnt;
|
||||
|
||||
zonedev = pkg_temp_thermal_get_dev(cpu);
|
||||
if (!zonedev) {
|
||||
spin_unlock_irq(&pkg_temp_lock);
|
||||
raw_spin_unlock_irq(&pkg_temp_lock);
|
||||
mutex_unlock(&thermal_zone_mutex);
|
||||
return;
|
||||
}
|
||||
|
@ -285,7 +285,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
|
|||
}
|
||||
|
||||
enable_pkg_thres_interrupt();
|
||||
spin_unlock_irq(&pkg_temp_lock);
|
||||
raw_spin_unlock_irq(&pkg_temp_lock);
|
||||
|
||||
/*
|
||||
* If tzone is not NULL, then thermal_zone_mutex will prevent the
|
||||
|
@ -310,7 +310,7 @@ static int pkg_thermal_notify(u64 msr_val)
|
|||
struct zone_device *zonedev;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pkg_temp_lock, flags);
|
||||
raw_spin_lock_irqsave(&pkg_temp_lock, flags);
|
||||
++pkg_interrupt_cnt;
|
||||
|
||||
disable_pkg_thres_interrupt();
|
||||
|
@ -322,7 +322,7 @@ static int pkg_thermal_notify(u64 msr_val)
|
|||
pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&pkg_temp_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -368,9 +368,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
|
|||
zonedev->msr_pkg_therm_high);
|
||||
|
||||
cpumask_set_cpu(cpu, &zonedev->cpumask);
|
||||
spin_lock_irq(&pkg_temp_lock);
|
||||
raw_spin_lock_irq(&pkg_temp_lock);
|
||||
zones[id] = zonedev;
|
||||
spin_unlock_irq(&pkg_temp_lock);
|
||||
raw_spin_unlock_irq(&pkg_temp_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -407,7 +407,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
|
|||
}
|
||||
|
||||
/* Protect against work and interrupts */
|
||||
spin_lock_irq(&pkg_temp_lock);
|
||||
raw_spin_lock_irq(&pkg_temp_lock);
|
||||
|
||||
/*
|
||||
* Check whether this cpu was the current target and store the new
|
||||
|
@ -439,9 +439,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
|
|||
* To cancel the work we need to drop the lock, otherwise
|
||||
* we might deadlock if the work needs to be flushed.
|
||||
*/
|
||||
spin_unlock_irq(&pkg_temp_lock);
|
||||
raw_spin_unlock_irq(&pkg_temp_lock);
|
||||
cancel_delayed_work_sync(&zonedev->work);
|
||||
spin_lock_irq(&pkg_temp_lock);
|
||||
raw_spin_lock_irq(&pkg_temp_lock);
|
||||
/*
|
||||
* If this is not the last cpu in the package and the work
|
||||
* did not run after we dropped the lock above, then we
|
||||
|
@ -452,7 +452,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
|
|||
pkg_thermal_schedule_work(target, &zonedev->work);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&pkg_temp_lock);
|
||||
raw_spin_unlock_irq(&pkg_temp_lock);
|
||||
|
||||
/* Final cleanup if this is the last cpu */
|
||||
if (lastcpu)
|
||||
|
|
|
@ -478,7 +478,8 @@ static int stm_thermal_resume(struct device *dev)
|
|||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume);
|
||||
static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops,
|
||||
stm_thermal_suspend, stm_thermal_resume);
|
||||
|
||||
static const struct thermal_zone_of_device_ops stm_tz_ops = {
|
||||
.get_temp = stm_thermal_get_temp,
|
||||
|
|
Loading…
Reference in New Issue