|
@@ -270,7 +270,7 @@ static void print_mce(struct mce *m)
|
|
{
|
|
{
|
|
__print_mce(m);
|
|
__print_mce(m);
|
|
|
|
|
|
- if (m->cpuvendor != X86_VENDOR_AMD)
|
|
|
|
|
|
+ if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
|
|
pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
|
|
pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
|
|
}
|
|
}
|
|
|
|
|
|
@@ -508,9 +508,9 @@ static int mce_usable_address(struct mce *m)
|
|
|
|
|
|
bool mce_is_memory_error(struct mce *m)
|
|
bool mce_is_memory_error(struct mce *m)
|
|
{
|
|
{
|
|
- if (m->cpuvendor == X86_VENDOR_AMD) {
|
|
|
|
|
|
+ if (m->cpuvendor == X86_VENDOR_AMD ||
|
|
|
|
+ m->cpuvendor == X86_VENDOR_HYGON) {
|
|
return amd_mce_is_memory_error(m);
|
|
return amd_mce_is_memory_error(m);
|
|
-
|
|
|
|
} else if (m->cpuvendor == X86_VENDOR_INTEL) {
|
|
} else if (m->cpuvendor == X86_VENDOR_INTEL) {
|
|
/*
|
|
/*
|
|
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
|
|
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
|
|
@@ -539,6 +539,9 @@ static bool mce_is_correctable(struct mce *m)
|
|
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
|
|
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
|
|
return false;
|
|
return false;
|
|
|
|
|
|
|
|
+ if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
|
|
|
|
+ return false;
|
|
|
|
+
|
|
if (m->status & MCI_STATUS_UC)
|
|
if (m->status & MCI_STATUS_UC)
|
|
return false;
|
|
return false;
|
|
|
|
|
|
@@ -1705,7 +1708,7 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
|
|
*/
|
|
*/
|
|
static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
|
|
static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
|
|
{
|
|
{
|
|
- if (c->x86_vendor == X86_VENDOR_AMD) {
|
|
|
|
|
|
+ if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
|
|
mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
|
|
mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
|
|
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
|
|
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
|
|
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
|
|
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
|
|
@@ -1746,6 +1749,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
|
|
mce_amd_feature_init(c);
|
|
mce_amd_feature_init(c);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ case X86_VENDOR_HYGON:
|
|
|
|
+ mce_hygon_feature_init(c);
|
|
|
|
+ break;
|
|
|
|
+
|
|
case X86_VENDOR_CENTAUR:
|
|
case X86_VENDOR_CENTAUR:
|
|
mce_centaur_feature_init(c);
|
|
mce_centaur_feature_init(c);
|
|
break;
|
|
break;
|
|
@@ -1971,12 +1979,14 @@ static void mce_disable_error_reporting(void)
|
|
static void vendor_disable_error_reporting(void)
|
|
static void vendor_disable_error_reporting(void)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
- * Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide.
|
|
|
|
|
|
+ * Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs
|
|
|
|
+ * are socket-wide.
|
|
* Disabling them for just a single offlined CPU is bad, since it will
|
|
* Disabling them for just a single offlined CPU is bad, since it will
|
|
* inhibit reporting for all shared resources on the socket like the
|
|
* inhibit reporting for all shared resources on the socket like the
|
|
* last level cache (LLC), the integrated memory controller (iMC), etc.
|
|
* last level cache (LLC), the integrated memory controller (iMC), etc.
|
|
*/
|
|
*/
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
|
|
|
|
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
|
|
boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
|
boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
|
return;
|
|
return;
|
|
|
|
|