|
@@ -42,6 +42,9 @@ struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu
|
|
return box;
|
|
return box;
|
|
|
|
|
|
raw_spin_lock(&uncore_box_lock);
|
|
raw_spin_lock(&uncore_box_lock);
|
|
|
|
+ /* Recheck in lock to handle races. */
|
|
|
|
+ if (*per_cpu_ptr(pmu->box, cpu))
|
|
|
|
+ goto out;
|
|
list_for_each_entry(box, &pmu->box_list, list) {
|
|
list_for_each_entry(box, &pmu->box_list, list) {
|
|
if (box->phys_id == topology_physical_package_id(cpu)) {
|
|
if (box->phys_id == topology_physical_package_id(cpu)) {
|
|
atomic_inc(&box->refcnt);
|
|
atomic_inc(&box->refcnt);
|
|
@@ -49,6 +52,7 @@ struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+out:
|
|
raw_spin_unlock(&uncore_box_lock);
|
|
raw_spin_unlock(&uncore_box_lock);
|
|
|
|
|
|
return *per_cpu_ptr(pmu->box, cpu);
|
|
return *per_cpu_ptr(pmu->box, cpu);
|