|
@@ -61,6 +61,7 @@ struct pkg_device {
|
|
|
struct list_head list;
|
|
|
u16 phys_proc_id;
|
|
|
u16 cpu;
|
|
|
+ bool work_scheduled;
|
|
|
u32 tj_max;
|
|
|
u32 msr_pkg_therm_low;
|
|
|
u32 msr_pkg_therm_high;
|
|
@@ -82,11 +83,6 @@ static DEFINE_MUTEX(thermal_zone_mutex);
|
|
|
/* Interrupt to work function schedule queue */
|
|
|
static DEFINE_PER_CPU(struct delayed_work, pkg_temp_thermal_threshold_work);
|
|
|
|
|
|
-/* To track if the work is already scheduled on a package */
|
|
|
-static u8 *pkg_work_scheduled;
|
|
|
-
|
|
|
-static u16 max_phy_id;
|
|
|
-
|
|
|
/* Debug counters to show using debugfs */
|
|
|
static struct dentry *debugfs;
|
|
|
static unsigned int pkg_interrupt_cnt;
|
|
@@ -294,7 +290,7 @@ static inline void disable_pkg_thres_interrupt(void)
|
|
|
static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
|
|
|
{
|
|
|
struct thermal_zone_device *tzone = NULL;
|
|
|
- int phy_id, cpu = smp_processor_id();
|
|
|
+ int cpu = smp_processor_id();
|
|
|
struct pkg_device *pkgdev;
|
|
|
u64 msr_val, wr_val;
|
|
|
|
|
@@ -308,8 +304,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
|
|
|
mutex_unlock(&thermal_zone_mutex);
|
|
|
return;
|
|
|
}
|
|
|
-
|
|
|
- pkg_work_scheduled[phy_id] = 0;
|
|
|
+ pkgdev->work_scheduled = false;
|
|
|
|
|
|
rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
|
|
|
wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1);
|
|
@@ -334,7 +329,6 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
|
|
|
static int pkg_thermal_notify(u64 msr_val)
|
|
|
{
|
|
|
int cpu = smp_processor_id();
|
|
|
- int phy_id = topology_physical_package_id(cpu);
|
|
|
struct pkg_device *pkgdev;
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -345,8 +339,8 @@ static int pkg_thermal_notify(u64 msr_val)
|
|
|
|
|
|
/* Work is per package, so scheduling it once is enough. */
|
|
|
pkgdev = pkg_temp_thermal_get_dev(cpu);
|
|
|
- if (pkgdev && pkg_work_scheduled && !pkg_work_scheduled[phy_id]) {
|
|
|
- pkg_work_scheduled[phy_id] = 1;
|
|
|
+ if (pkgdev && !pkgdev->work_scheduled) {
|
|
|
+ pkgdev->work_scheduled = true;
|
|
|
schedule_delayed_work_on(cpu,
|
|
|
&per_cpu(pkg_temp_thermal_threshold_work, cpu),
|
|
|
msecs_to_jiffies(notify_delay_ms));
|
|
@@ -361,8 +355,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
|
|
|
u32 tj_max, eax, ebx, ecx, edx;
|
|
|
struct pkg_device *pkgdev;
|
|
|
int thres_count, err;
|
|
|
- unsigned long flags;
|
|
|
- u8 *temp;
|
|
|
|
|
|
cpuid(6, &eax, &ebx, &ecx, &edx);
|
|
|
thres_count = ebx & 0x07;
|
|
@@ -382,20 +374,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
|
|
|
if (!pkgdev)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- spin_lock_irqsave(&pkg_temp_lock, flags);
|
|
|
- if (topology_physical_package_id(cpu) > max_phy_id)
|
|
|
- max_phy_id = topology_physical_package_id(cpu);
|
|
|
- temp = krealloc(pkg_work_scheduled,
|
|
|
- (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
|
|
|
- if (!temp) {
|
|
|
- spin_unlock_irqrestore(&pkg_temp_lock, flags);
|
|
|
- kfree(pkgdev);
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
- pkg_work_scheduled = temp;
|
|
|
- pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
|
|
|
- spin_unlock_irqrestore(&pkg_temp_lock, flags);
|
|
|
-
|
|
|
pkgdev->phys_proc_id = topology_physical_package_id(cpu);
|
|
|
pkgdev->cpu = cpu;
|
|
|
pkgdev->tj_max = tj_max;
|
|
@@ -554,7 +532,6 @@ err_ret:
|
|
|
for_each_online_cpu(i)
|
|
|
put_core_offline(i);
|
|
|
cpu_notifier_register_done();
|
|
|
- kfree(pkg_work_scheduled);
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
module_init(pkg_temp_thermal_init)
|
|
@@ -572,8 +549,6 @@ static void __exit pkg_temp_thermal_exit(void)
|
|
|
put_core_offline(i);
|
|
|
cpu_notifier_register_done();
|
|
|
|
|
|
- kfree(pkg_work_scheduled);
|
|
|
-
|
|
|
debugfs_remove_recursive(debugfs);
|
|
|
}
|
|
|
module_exit(pkg_temp_thermal_exit)
|