|
@@ -1339,6 +1339,7 @@ static __initconst const struct x86_pmu p4_pmu = {
|
|
__init int p4_pmu_init(void)
|
|
__init int p4_pmu_init(void)
|
|
{
|
|
{
|
|
unsigned int low, high;
|
|
unsigned int low, high;
|
|
|
|
+ int i, reg;
|
|
|
|
|
|
/* If we get stripped -- indexing fails */
|
|
/* If we get stripped -- indexing fails */
|
|
BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
|
|
BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
|
|
@@ -1357,5 +1358,19 @@ __init int p4_pmu_init(void)
|
|
|
|
|
|
x86_pmu = p4_pmu;
|
|
x86_pmu = p4_pmu;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Even though the counters are configured to interrupt a particular
|
|
|
|
+ * logical processor when an overflow happens, testing has shown that
|
|
|
|
+ * on kdump kernels (which uses a single cpu), thread1's counter
|
|
|
|
+ * continues to run and will report an NMI on thread0. Due to the
|
|
|
|
+ * overflow bug, this leads to a stream of unknown NMIs.
|
|
|
|
+ *
|
|
|
|
+ * Solve this by zero'ing out the registers to mimic a reset.
|
|
|
|
+ */
|
|
|
|
+ for (i = 0; i < x86_pmu.num_counters; i++) {
|
|
|
|
+ reg = x86_pmu_config_addr(i);
|
|
|
|
+ wrmsrl_safe(reg, 0ULL);
|
|
|
|
+ }
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|