|
@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
|
|
|
|
|
|
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
|
|
|
{
|
|
|
- return pmu->boxes[topology_logical_package_id(cpu)];
|
|
|
+ unsigned int pkgid = topology_logical_package_id(cpu);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The unsigned check also catches the '-1' return value for non
|
|
|
+ * existent mappings in the topology map.
|
|
|
+ */
|
|
|
+ return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
|
|
|
}
|
|
|
|
|
|
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
|
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
|
|
|
pmu->registered = false;
|
|
|
}
|
|
|
|
|
|
-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
|
|
|
-{
|
|
|
- struct intel_uncore_pmu *pmu = type->pmus;
|
|
|
- struct intel_uncore_box *box;
|
|
|
- int i, pkg;
|
|
|
-
|
|
|
- if (pmu) {
|
|
|
- pkg = topology_physical_package_id(cpu);
|
|
|
- for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
- box = pmu->boxes[pkg];
|
|
|
- if (box)
|
|
|
- uncore_box_exit(box);
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void uncore_exit_boxes(void *dummy)
|
|
|
-{
|
|
|
- struct intel_uncore_type **types;
|
|
|
-
|
|
|
- for (types = uncore_msr_uncores; *types; types++)
|
|
|
- __uncore_exit_boxes(*types++, smp_processor_id());
|
|
|
-}
|
|
|
-
|
|
|
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
|
|
|
{
|
|
|
int pkg;
|
|
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int uncore_cpu_dying(unsigned int cpu)
|
|
|
-{
|
|
|
- struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
|
|
- struct intel_uncore_pmu *pmu;
|
|
|
- struct intel_uncore_box *box;
|
|
|
- int i, pkg;
|
|
|
-
|
|
|
- pkg = topology_logical_package_id(cpu);
|
|
|
- for (; *types; types++) {
|
|
|
- type = *types;
|
|
|
- pmu = type->pmus;
|
|
|
- for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
- box = pmu->boxes[pkg];
|
|
|
- if (box && atomic_dec_return(&box->refcnt) == 0)
|
|
|
- uncore_box_exit(box);
|
|
|
- }
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int first_init;
|
|
|
-
|
|
|
-static int uncore_cpu_starting(unsigned int cpu)
|
|
|
-{
|
|
|
- struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
|
|
- struct intel_uncore_pmu *pmu;
|
|
|
- struct intel_uncore_box *box;
|
|
|
- int i, pkg, ncpus = 1;
|
|
|
-
|
|
|
- if (first_init) {
|
|
|
- /*
|
|
|
- * On init we get the number of online cpus in the package
|
|
|
- * and set refcount for all of them.
|
|
|
- */
|
|
|
- ncpus = cpumask_weight(topology_core_cpumask(cpu));
|
|
|
- }
|
|
|
-
|
|
|
- pkg = topology_logical_package_id(cpu);
|
|
|
- for (; *types; types++) {
|
|
|
- type = *types;
|
|
|
- pmu = type->pmus;
|
|
|
- for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
- box = pmu->boxes[pkg];
|
|
|
- if (!box)
|
|
|
- continue;
|
|
|
- /* The first cpu on a package activates the box */
|
|
|
- if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
|
|
|
- uncore_box_init(box);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int uncore_cpu_prepare(unsigned int cpu)
|
|
|
-{
|
|
|
- struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
|
|
- struct intel_uncore_pmu *pmu;
|
|
|
- struct intel_uncore_box *box;
|
|
|
- int i, pkg;
|
|
|
-
|
|
|
- pkg = topology_logical_package_id(cpu);
|
|
|
- for (; *types; types++) {
|
|
|
- type = *types;
|
|
|
- pmu = type->pmus;
|
|
|
- for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
- if (pmu->boxes[pkg])
|
|
|
- continue;
|
|
|
- /* First cpu of a package allocates the box */
|
|
|
- box = uncore_alloc_box(type, cpu_to_node(cpu));
|
|
|
- if (!box)
|
|
|
- return -ENOMEM;
|
|
|
- box->pmu = pmu;
|
|
|
- box->pkgid = pkg;
|
|
|
- pmu->boxes[pkg] = box;
|
|
|
- }
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
|
|
|
int new_cpu)
|
|
|
{
|
|
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
|
|
|
|
|
|
static int uncore_event_cpu_offline(unsigned int cpu)
|
|
|
{
|
|
|
- int target;
|
|
|
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
|
|
+ struct intel_uncore_pmu *pmu;
|
|
|
+ struct intel_uncore_box *box;
|
|
|
+ int i, pkg, target;
|
|
|
|
|
|
/* Check if exiting cpu is used for collecting uncore events */
|
|
|
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
|
|
|
- return 0;
|
|
|
-
|
|
|
+ goto unref;
|
|
|
/* Find a new cpu to collect uncore events */
|
|
|
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
|
|
|
|
|
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
|
|
|
|
|
|
uncore_change_context(uncore_msr_uncores, cpu, target);
|
|
|
uncore_change_context(uncore_pci_uncores, cpu, target);
|
|
|
+
|
|
|
+unref:
|
|
|
+ /* Clear the references */
|
|
|
+ pkg = topology_logical_package_id(cpu);
|
|
|
+ for (; *types; types++) {
|
|
|
+ type = *types;
|
|
|
+ pmu = type->pmus;
|
|
|
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
+ box = pmu->boxes[pkg];
|
|
|
+ if (box && atomic_dec_return(&box->refcnt) == 0)
|
|
|
+ uncore_box_exit(box);
|
|
|
+ }
|
|
|
+ }
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int allocate_boxes(struct intel_uncore_type **types,
|
|
|
+ unsigned int pkg, unsigned int cpu)
|
|
|
+{
|
|
|
+ struct intel_uncore_box *box, *tmp;
|
|
|
+ struct intel_uncore_type *type;
|
|
|
+ struct intel_uncore_pmu *pmu;
|
|
|
+ LIST_HEAD(allocated);
|
|
|
+ int i;
|
|
|
+
|
|
|
+ /* Try to allocate all required boxes */
|
|
|
+ for (; *types; types++) {
|
|
|
+ type = *types;
|
|
|
+ pmu = type->pmus;
|
|
|
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
+ if (pmu->boxes[pkg])
|
|
|
+ continue;
|
|
|
+ box = uncore_alloc_box(type, cpu_to_node(cpu));
|
|
|
+ if (!box)
|
|
|
+ goto cleanup;
|
|
|
+ box->pmu = pmu;
|
|
|
+ box->pkgid = pkg;
|
|
|
+ list_add(&box->active_list, &allocated);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ /* Install them in the pmus */
|
|
|
+ list_for_each_entry_safe(box, tmp, &allocated, active_list) {
|
|
|
+ list_del_init(&box->active_list);
|
|
|
+ box->pmu->boxes[pkg] = box;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+
|
|
|
+cleanup:
|
|
|
+ list_for_each_entry_safe(box, tmp, &allocated, active_list) {
|
|
|
+ list_del_init(&box->active_list);
|
|
|
+ kfree(box);
|
|
|
+ }
|
|
|
+ return -ENOMEM;
|
|
|
+}
|
|
|
+
|
|
|
static int uncore_event_cpu_online(unsigned int cpu)
|
|
|
{
|
|
|
- int target;
|
|
|
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
|
|
+ struct intel_uncore_pmu *pmu;
|
|
|
+ struct intel_uncore_box *box;
|
|
|
+ int i, ret, pkg, target;
|
|
|
+
|
|
|
+ pkg = topology_logical_package_id(cpu);
|
|
|
+ ret = allocate_boxes(types, pkg, cpu);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ for (; *types; types++) {
|
|
|
+ type = *types;
|
|
|
+ pmu = type->pmus;
|
|
|
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
|
|
|
+ box = pmu->boxes[pkg];
|
|
|
+ if (!box && atomic_inc_return(&box->refcnt) == 1)
|
|
|
+ uncore_box_init(box);
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Check if there is an online cpu in the package
|
|
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
|
|
|
if (cret && pret)
|
|
|
return -ENODEV;
|
|
|
|
|
|
- /*
|
|
|
- * Install callbacks. Core will call them for each online cpu.
|
|
|
- *
|
|
|
- * The first online cpu of each package allocates and takes
|
|
|
- * the refcounts for all other online cpus in that package.
|
|
|
- * If msrs are not enabled no allocation is required and
|
|
|
- * uncore_cpu_prepare() is not called for each online cpu.
|
|
|
- */
|
|
|
- if (!cret) {
|
|
|
- ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
|
|
|
- "perf/x86/intel/uncore:prepare",
|
|
|
- uncore_cpu_prepare, NULL);
|
|
|
- if (ret)
|
|
|
- goto err;
|
|
|
- } else {
|
|
|
- cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
|
|
|
- "perf/x86/intel/uncore:prepare",
|
|
|
- uncore_cpu_prepare, NULL);
|
|
|
- }
|
|
|
- first_init = 1;
|
|
|
- cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
|
|
|
- "perf/x86/uncore:starting",
|
|
|
- uncore_cpu_starting, uncore_cpu_dying);
|
|
|
- first_init = 0;
|
|
|
- cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
|
|
- "perf/x86/uncore:online",
|
|
|
- uncore_event_cpu_online, uncore_event_cpu_offline);
|
|
|
+ /* Install hotplug callbacks to setup the targets for each package */
|
|
|
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
|
|
+ "perf/x86/intel/uncore:online",
|
|
|
+ uncore_event_cpu_online,
|
|
|
+ uncore_event_cpu_offline);
|
|
|
+ if (ret)
|
|
|
+ goto err;
|
|
|
return 0;
|
|
|
|
|
|
err:
|
|
|
- /* Undo box->init_box() */
|
|
|
- on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
|
|
|
uncore_types_exit(uncore_msr_uncores);
|
|
|
uncore_pci_exit();
|
|
|
return ret;
|
|
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
|
|
|
|
|
|
static void __exit intel_uncore_exit(void)
|
|
|
{
|
|
|
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
|
|
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
|
|
|
- cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
|
|
|
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
|
|
uncore_types_exit(uncore_msr_uncores);
|
|
|
uncore_pci_exit();
|
|
|
}
|