|
@@ -66,6 +66,42 @@ DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
|
|
|
DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
|
|
|
DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
|
|
|
|
|
|
+static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
|
|
|
+{
|
|
|
+ return container_of(event->pmu, struct intel_uncore_pmu, pmu);
|
|
|
+}
|
|
|
+
|
|
|
+static struct intel_uncore_box *
|
|
|
+uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
|
|
|
+{
|
|
|
+ struct intel_uncore_box *box;
|
|
|
+
|
|
|
+ box = *per_cpu_ptr(pmu->box, cpu);
|
|
|
+ if (box)
|
|
|
+ return box;
|
|
|
+
|
|
|
+ raw_spin_lock(&uncore_box_lock);
|
|
|
+ list_for_each_entry(box, &pmu->box_list, list) {
|
|
|
+ if (box->phys_id == topology_physical_package_id(cpu)) {
|
|
|
+ atomic_inc(&box->refcnt);
|
|
|
+ *per_cpu_ptr(pmu->box, cpu) = box;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ raw_spin_unlock(&uncore_box_lock);
|
|
|
+
|
|
|
+ return *per_cpu_ptr(pmu->box, cpu);
|
|
|
+}
|
|
|
+
|
|
|
+static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * perf core schedules event on the basis of cpu, uncore events are
|
|
|
+ * collected by one of the cpus inside a physical package.
|
|
|
+ */
|
|
|
+ return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
|
|
|
+}
|
|
|
+
|
|
|
static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
u64 count;
|
|
@@ -2845,42 +2881,6 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
|
|
|
return box;
|
|
|
}
|
|
|
|
|
|
-static struct intel_uncore_box *
|
|
|
-uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
|
|
|
-{
|
|
|
- struct intel_uncore_box *box;
|
|
|
-
|
|
|
- box = *per_cpu_ptr(pmu->box, cpu);
|
|
|
- if (box)
|
|
|
- return box;
|
|
|
-
|
|
|
- raw_spin_lock(&uncore_box_lock);
|
|
|
- list_for_each_entry(box, &pmu->box_list, list) {
|
|
|
- if (box->phys_id == topology_physical_package_id(cpu)) {
|
|
|
- atomic_inc(&box->refcnt);
|
|
|
- *per_cpu_ptr(pmu->box, cpu) = box;
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- raw_spin_unlock(&uncore_box_lock);
|
|
|
-
|
|
|
- return *per_cpu_ptr(pmu->box, cpu);
|
|
|
-}
|
|
|
-
|
|
|
-static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
|
|
|
-{
|
|
|
- return container_of(event->pmu, struct intel_uncore_pmu, pmu);
|
|
|
-}
|
|
|
-
|
|
|
-static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
|
|
|
-{
|
|
|
- /*
|
|
|
- * perf core schedules event on the basis of cpu, uncore events are
|
|
|
- * collected by one of the cpus inside a physical package.
|
|
|
- */
|
|
|
- return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
|
|
|
-}
|
|
|
-
|
|
|
static int
|
|
|
uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
|
|
|
{
|