Selaa lähdekoodia

Merge tag 'edac_for_4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp

Pull EDAC updates from Borislav Petkov:

 - add KNM support to sb_edac (Piotr Luc)

 - add AMD Zen support to amd64_edac (Yazen Ghannam)

 - misc small cleanups, improvements and fixes (Colin Ian King, Dave
   Hansen, Pan Bian, Thor Thayer, Wei Yongjun, Yanjiang Jin, yours
   truly)

* tag 'edac_for_4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp: (26 commits)
  EDAC, amd64: Fix improper return value
  EDAC, amd64: Improve amd64-specific printing macros
  EDAC, amd64: Autoload amd64_edac_mod on Fam17h systems
  EDAC, amd64: Define and register UMC error decode function
  EDAC, amd64: Determine EDAC capabilities on Fam17h systems
  EDAC, amd64: Determine EDAC MC capabilities on Fam17h
  EDAC, amd64: Add Fam17h debug output
  EDAC, amd64: Add Fam17h scrubber support
  EDAC, mce_amd: Don't report poison bit on Fam15h, bank 4
  EDAC, amd64: Read MC registers on AMD Fam17h
  EDAC, amd64: Reserve correct PCI devices on AMD Fam17h
  EDAC, amd64: Add AMD Fam17h family type and ops
  EDAC, amd64: Extend ecc_enabled() to Fam17h
  EDAC, amd64: Don't force-enable ECC checking on newer systems
  EDAC, amd64: Add Deferred Error type
  EDAC, amd64: Rename __log_bus_error() to be more specific
  EDAC, amd64: Change target of pci_name from F2 to F3
  EDAC, mce_amd: Rename nb_bus_decoder to dram_ecc_decoder
  EDAC: Add LRDDR4 DRAM type
  EDAC, mpc85xx: Implement remove method for the platform driver
  ...
Linus Torvalds 9 vuotta sitten
vanhempi
commit
daf34710a9

+ 4 - 0
drivers/edac/altera_edac.c

@@ -153,13 +153,17 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
 	if (count == 3) {
 		edac_printk(KERN_ALERT, EDAC_MC,
 			    "Inject Double bit error\n");
+		local_irq_disable();
 		regmap_write(drvdata->mc_vbase, priv->ce_ue_trgr_offset,
 			     (read_reg | priv->ue_set_mask));
+		local_irq_enable();
 	} else {
 		edac_printk(KERN_ALERT, EDAC_MC,
 			    "Inject Single bit error\n");
+		local_irq_disable();
 		regmap_write(drvdata->mc_vbase,	priv->ce_ue_trgr_offset,
 			     (read_reg | priv->ce_set_mask));
+		local_irq_enable();
 	}
 
 	ptemp[0] = 0x5A5A5A5A;

+ 580 - 112
drivers/edac/amd64_edac.c

@@ -164,8 +164,23 @@ static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
  * other archs, we might not have access to the caches directly.
  */
 
+static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
+{
+	/*
+	 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
+	 * are shifted down by 0x5, so scrubval 0x5 is written to the register
+	 * as 0x0, scrubval 0x6 as 0x1, etc.
+	 */
+	if (scrubval >= 0x5 && scrubval <= 0x14) {
+		scrubval -= 0x5;
+		pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
+		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
+	} else {
+		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
+	}
+}
 /*
- * scan the scrub rate mapping table for a close or matching bandwidth value to
+ * Scan the scrub rate mapping table for a close or matching bandwidth value to
  * issue. If requested is too big, then use last maximum value found.
  */
 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
@@ -196,7 +211,9 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
 
 	scrubval = scrubrates[i].scrubval;
 
-	if (pvt->fam == 0x15 && pvt->model == 0x60) {
+	if (pvt->fam == 0x17) {
+		__f17h_set_scrubval(pvt, scrubval);
+	} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
 		f15h_select_dct(pvt, 0);
 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
 		f15h_select_dct(pvt, 1);
@@ -233,18 +250,34 @@ static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
 static int get_scrub_rate(struct mem_ctl_info *mci)
 {
 	struct amd64_pvt *pvt = mci->pvt_info;
-	u32 scrubval = 0;
 	int i, retval = -EINVAL;
+	u32 scrubval = 0;
 
-	if (pvt->fam == 0x15) {
+	switch (pvt->fam) {
+	case 0x15:
 		/* Erratum #505 */
 		if (pvt->model < 0x10)
 			f15h_select_dct(pvt, 0);
 
 		if (pvt->model == 0x60)
 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
-	} else
+		break;
+
+	case 0x17:
+		amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
+		if (scrubval & BIT(0)) {
+			amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
+			scrubval &= 0xF;
+			scrubval += 0x5;
+		} else {
+			scrubval = 0;
+		}
+		break;
+
+	default:
 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
+		break;
+	}
 
 	scrubval = scrubval & 0x001F;
 
@@ -682,15 +715,33 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
  */
 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
 {
-	u8 bit;
 	unsigned long edac_cap = EDAC_FLAG_NONE;
+	u8 bit;
 
-	bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
-		? 19
-		: 17;
+	if (pvt->umc) {
+		u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
 
-	if (pvt->dclr0 & BIT(bit))
-		edac_cap = EDAC_FLAG_SECDED;
+		for (i = 0; i < NUM_UMCS; i++) {
+			if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
+				continue;
+
+			umc_en_mask |= BIT(i);
+
+			/* UMC Configuration bit 12 (DimmEccEn) */
+			if (pvt->umc[i].umc_cfg & BIT(12))
+				dimm_ecc_en_mask |= BIT(i);
+		}
+
+		if (umc_en_mask == dimm_ecc_en_mask)
+			edac_cap = EDAC_FLAG_SECDED;
+	} else {
+		bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
+			? 19
+			: 17;
+
+		if (pvt->dclr0 & BIT(bit))
+			edac_cap = EDAC_FLAG_SECDED;
+	}
 
 	return edac_cap;
 }
@@ -729,8 +780,75 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
 		 (dclr & BIT(15)) ?  "yes" : "no");
 }
 
+static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
+{
+	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
+	int dimm, size0, size1;
+
+	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
+
+	for (dimm = 0; dimm < 4; dimm++) {
+		size0 = 0;
+
+		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
+			size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm);
+
+		size1 = 0;
+		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
+			size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm);
+
+		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
+				dimm * 2,     size0,
+				dimm * 2 + 1, size1);
+	}
+}
+
+static void __dump_misc_regs_df(struct amd64_pvt *pvt)
+{
+	struct amd64_umc *umc;
+	u32 i, tmp, umc_base;
+
+	for (i = 0; i < NUM_UMCS; i++) {
+		umc_base = get_umc_base(i);
+		umc = &pvt->umc[i];
+
+		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
+		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
+		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
+		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
+
+		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
+		edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
+
+		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
+		edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
+		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
+
+		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
+				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
+				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
+		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
+				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
+		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
+				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
+		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
+				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
+
+		if (pvt->dram_type == MEM_LRDDR4) {
+			amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
+			edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
+					i, 1 << ((tmp >> 4) & 0x3));
+		}
+
+		debug_display_dimm_sizes_df(pvt, i);
+	}
+
+	edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
+		 pvt->dhar, dhar_base(pvt));
+}
+
 /* Display and decode various NB registers for debug purposes. */
-static void dump_misc_regs(struct amd64_pvt *pvt)
+static void __dump_misc_regs(struct amd64_pvt *pvt)
 {
 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
 
@@ -750,8 +868,6 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
 				   : f10_dhar_offset(pvt));
 
-	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
-
 	debug_display_dimm_sizes(pvt, 0);
 
 	/* everything below this point is Fam10h and above */
@@ -760,13 +876,25 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
 
 	debug_display_dimm_sizes(pvt, 1);
 
-	amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
-
 	/* Only if NOT ganged does dclr1 have valid info */
 	if (!dct_ganging_enabled(pvt))
 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
 }
 
+/* Display and decode various NB registers for debug purposes. */
+static void dump_misc_regs(struct amd64_pvt *pvt)
+{
+	if (pvt->umc)
+		__dump_misc_regs_df(pvt);
+	else
+		__dump_misc_regs(pvt);
+
+	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+
+	amd64_info("using %s syndromes.\n",
+			((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
+}
+
 /*
  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
  */
@@ -789,46 +917,78 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
  */
 static void read_dct_base_mask(struct amd64_pvt *pvt)
 {
-	int cs;
+	int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
 
 	prep_chip_selects(pvt);
 
+	if (pvt->umc) {
+		base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
+		base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
+		mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
+		mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
+	} else {
+		base_reg0 = DCSB0;
+		base_reg1 = DCSB1;
+		mask_reg0 = DCSM0;
+		mask_reg1 = DCSM1;
+	}
+
 	for_each_chip_select(cs, 0, pvt) {
-		int reg0   = DCSB0 + (cs * 4);
-		int reg1   = DCSB1 + (cs * 4);
+		int reg0   = base_reg0 + (cs * 4);
+		int reg1   = base_reg1 + (cs * 4);
 		u32 *base0 = &pvt->csels[0].csbases[cs];
 		u32 *base1 = &pvt->csels[1].csbases[cs];
 
-		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
-			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
-				 cs, *base0, reg0);
+		if (pvt->umc) {
+			if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
+				edac_dbg(0, "  DCSB0[%d]=0x%08x reg: 0x%x\n",
+					 cs, *base0, reg0);
 
-		if (pvt->fam == 0xf)
-			continue;
+			if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
+				edac_dbg(0, "  DCSB1[%d]=0x%08x reg: 0x%x\n",
+					 cs, *base1, reg1);
+		} else {
+			if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
+				edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
+					 cs, *base0, reg0);
+
+			if (pvt->fam == 0xf)
+				continue;
 
-		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
-			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
-				 cs, *base1, (pvt->fam == 0x10) ? reg1
+			if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
+				edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
+					 cs, *base1, (pvt->fam == 0x10) ? reg1
 								: reg0);
+		}
 	}
 
 	for_each_chip_select_mask(cs, 0, pvt) {
-		int reg0   = DCSM0 + (cs * 4);
-		int reg1   = DCSM1 + (cs * 4);
+		int reg0   = mask_reg0 + (cs * 4);
+		int reg1   = mask_reg1 + (cs * 4);
 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
 
-		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
-			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
-				 cs, *mask0, reg0);
+		if (pvt->umc) {
+			if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
+				edac_dbg(0, "    DCSM0[%d]=0x%08x reg: 0x%x\n",
+					 cs, *mask0, reg0);
 
-		if (pvt->fam == 0xf)
-			continue;
+			if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
+				edac_dbg(0, "    DCSM1[%d]=0x%08x reg: 0x%x\n",
+					 cs, *mask1, reg1);
+		} else {
+			if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
+				edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
+					 cs, *mask0, reg0);
+
+			if (pvt->fam == 0xf)
+				continue;
 
-		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
-			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
-				 cs, *mask1, (pvt->fam == 0x10) ? reg1
+			if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
+				edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
+					 cs, *mask1, (pvt->fam == 0x10) ? reg1
 								: reg0);
+		}
 	}
 }
 
@@ -881,6 +1041,15 @@ static void determine_memory_type(struct amd64_pvt *pvt)
 	case 0x16:
 		goto ddr3;
 
+	case 0x17:
+		if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
+			pvt->dram_type = MEM_LRDDR4;
+		else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
+			pvt->dram_type = MEM_RDDR4;
+		else
+			pvt->dram_type = MEM_DDR4;
+		return;
+
 	default:
 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
 		pvt->dram_type = MEM_EMPTY;
@@ -1210,6 +1379,19 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
 	return channels;
 }
 
+static int f17_early_channel_count(struct amd64_pvt *pvt)
+{
+	int i, channels = 0;
+
+	/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
+	for (i = 0; i < NUM_UMCS; i++)
+		channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
+
+	amd64_info("MCT channel count: %d\n", channels);
+
+	return channels;
+}
+
 static int ddr3_cs_size(unsigned i, bool dct_width)
 {
 	unsigned shift = 0;
@@ -1337,6 +1519,23 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
 		return ddr3_cs_size(cs_mode, false);
 }
 
+static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+				    unsigned int cs_mode, int csrow_nr)
+{
+	u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
+
+	/*  Each mask is used for every two base addresses. */
+	u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
+
+	/*  Register [31:1] = Address [39:9]. Size is in kBs here. */
+	u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
+
+	edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
+
+	/* Return size in MBs. */
+	return size >> 10;
+}
+
 static void read_dram_ctl_register(struct amd64_pvt *pvt)
 {
 
@@ -1897,8 +2096,9 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
 
 		size0 = 0;
 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
-			/* For f15m60h, need multiplier for LRDIMM cs_size
-			 * calculation. We pass 'dimm' value to the dbam_to_cs
+			/*
+			 * For F15m60h, we need multiplier for LRDIMM cs_size
+			 * calculation. We pass dimm value to the dbam_to_cs
 			 * mapper so we can find the multiplier from the
 			 * corresponding DCSM.
 			 */
@@ -1989,6 +2189,15 @@ static struct amd64_family_type family_types[] = {
 			.dbam_to_cs		= f16_dbam_to_chip_select,
 		}
 	},
+	[F17_CPUS] = {
+		.ctl_name = "F17h",
+		.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
+		.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
+		.ops = {
+			.early_channel_count	= f17_early_channel_count,
+			.dbam_to_cs		= f17_base_addr_to_cs_size,
+		}
+	},
 };
 
 /*
@@ -2155,7 +2364,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
 }
 
-static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
+static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
 			    u8 ecc_type)
 {
 	enum hw_event_mc_err_type err_type;
@@ -2165,6 +2374,8 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
 		err_type = HW_EVENT_ERR_CORRECTED;
 	else if (ecc_type == 1)
 		err_type = HW_EVENT_ERR_UNCORRECTED;
+	else if (ecc_type == 3)
+		err_type = HW_EVENT_ERR_DEFERRED;
 	else {
 		WARN(1, "Something is rotten in the state of Denmark.\n");
 		return;
@@ -2181,7 +2392,13 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
 		string = "Failed to map error addr to a csrow";
 		break;
 	case ERR_CHANNEL:
-		string = "unknown syndrome - possible error reporting race";
+		string = "Unknown syndrome - possible error reporting race";
+		break;
+	case ERR_SYND:
+		string = "MCA_SYND not valid - unknown syndrome and csrow";
+		break;
+	case ERR_NORM_ADDR:
+		string = "Cannot decode normalized address";
 		break;
 	default:
 		string = "WTF error";
@@ -2227,36 +2444,127 @@ static inline void decode_bus_error(int node_id, struct mce *m)
 
 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
 
-	__log_bus_error(mci, &err, ecc_type);
+	__log_ecc_error(mci, &err, ecc_type);
+}
+
+/*
+ * To find the UMC channel represented by this bank we need to match on its
+ * instance_id. The instance_id of a bank is held in the lower 32 bits of its
+ * IPID.
+ */
+static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m)
+{
+	u32 umc_instance_id[] = {0x50f00, 0x150f00};
+	u32 instance_id = m->ipid & GENMASK(31, 0);
+	int i, channel = -1;
+
+	for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++)
+		if (umc_instance_id[i] == instance_id)
+			channel = i;
+
+	return channel;
+}
+
+static void decode_umc_error(int node_id, struct mce *m)
+{
+	u8 ecc_type = (m->status >> 45) & 0x3;
+	struct mem_ctl_info *mci;
+	struct amd64_pvt *pvt;
+	struct err_info err;
+	u64 sys_addr;
+
+	mci = edac_mc_find(node_id);
+	if (!mci)
+		return;
+
+	pvt = mci->pvt_info;
+
+	memset(&err, 0, sizeof(err));
+
+	if (m->status & MCI_STATUS_DEFERRED)
+		ecc_type = 3;
+
+	err.channel = find_umc_channel(pvt, m);
+	if (err.channel < 0) {
+		err.err_code = ERR_CHANNEL;
+		goto log_error;
+	}
+
+	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+		err.err_code = ERR_NORM_ADDR;
+		goto log_error;
+	}
+
+	error_address_to_page_and_offset(sys_addr, &err);
+
+	if (!(m->status & MCI_STATUS_SYNDV)) {
+		err.err_code = ERR_SYND;
+		goto log_error;
+	}
+
+	if (ecc_type == 2) {
+		u8 length = (m->synd >> 18) & 0x3f;
+
+		if (length)
+			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
+		else
+			err.err_code = ERR_CHANNEL;
+	}
+
+	err.csrow = m->synd & 0x7;
+
+log_error:
+	__log_ecc_error(mci, &err, ecc_type);
 }
 
 /*
  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
+ * Reserve F0 and F6 on systems with a UMC.
  */
-static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
-{
+static int
+reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
+{
+	if (pvt->umc) {
+		pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
+		if (!pvt->F0) {
+			amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
+			return -ENODEV;
+		}
+
+		pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
+		if (!pvt->F6) {
+			pci_dev_put(pvt->F0);
+			pvt->F0 = NULL;
+
+			amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
+			return -ENODEV;
+		}
+
+		edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
+		edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
+		edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
+
+		return 0;
+	}
+
 	/* Reserve the ADDRESS MAP Device */
-	pvt->F1 = pci_get_related_function(pvt->F3->vendor, f1_id, pvt->F3);
+	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
 	if (!pvt->F1) {
-		amd64_err("error address map device not found: "
-			  "vendor %x device 0x%x (broken BIOS?)\n",
-			  PCI_VENDOR_ID_AMD, f1_id);
+		amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
 		return -ENODEV;
 	}
 
 	/* Reserve the DCT Device */
-	pvt->F2 = pci_get_related_function(pvt->F3->vendor, f2_id, pvt->F3);
+	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
 	if (!pvt->F2) {
 		pci_dev_put(pvt->F1);
 		pvt->F1 = NULL;
 
-		amd64_err("error F2 device not found: "
-			  "vendor %x device 0x%x (broken BIOS?)\n",
-			  PCI_VENDOR_ID_AMD, f2_id);
-
+		amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
 		return -ENODEV;
 	}
+
 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
@@ -2266,8 +2574,69 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
 
 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
 {
-	pci_dev_put(pvt->F1);
-	pci_dev_put(pvt->F2);
+	if (pvt->umc) {
+		pci_dev_put(pvt->F0);
+		pci_dev_put(pvt->F6);
+	} else {
+		pci_dev_put(pvt->F1);
+		pci_dev_put(pvt->F2);
+	}
+}
+
+static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
+{
+	pvt->ecc_sym_sz = 4;
+
+	if (pvt->umc) {
+		u8 i;
+
+		for (i = 0; i < NUM_UMCS; i++) {
+			/* Check enabled channels only: */
+			if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) &&
+			    (pvt->umc[i].ecc_ctrl & BIT(7))) {
+				pvt->ecc_sym_sz = 8;
+				break;
+			}
+		}
+
+		return;
+	}
+
+	if (pvt->fam >= 0x10) {
+		u32 tmp;
+
+		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+		/* F16h has only DCT0, so no need to read dbam1. */
+		if (pvt->fam != 0x16)
+			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
+
+		/* F10h, revD and later can do x8 ECC too. */
+		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
+			pvt->ecc_sym_sz = 8;
+	}
+}
+
+/*
+ * Retrieve the hardware registers of the memory controller.
+ */
+static void __read_mc_regs_df(struct amd64_pvt *pvt)
+{
+	u8 nid = pvt->mc_node_id;
+	struct amd64_umc *umc;
+	u32 i, umc_base;
+
+	/* Read registers from each UMC */
+	for (i = 0; i < NUM_UMCS; i++) {
+
+		umc_base = get_umc_base(i);
+		umc = &pvt->umc[i];
+
+		amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
+		amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
+		amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
+		amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
+		amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
+	}
 }
 
 /*
@@ -2276,24 +2645,31 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
  */
 static void read_mc_regs(struct amd64_pvt *pvt)
 {
-	unsigned range;
+	unsigned int range;
 	u64 msr_val;
-	u32 tmp;
 
 	/*
 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
-	 * those are Read-As-Zero
+	 * those are Read-As-Zero.
 	 */
 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
 
-	/* check first whether TOP_MEM2 is enabled */
+	/* Check first whether TOP_MEM2 is enabled: */
 	rdmsrl(MSR_K8_SYSCFG, msr_val);
-	if (msr_val & (1U << 21)) {
+	if (msr_val & BIT(21)) {
 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
-	} else
+	} else {
 		edac_dbg(0, "  TOP_MEM2 disabled\n");
+	}
+
+	if (pvt->umc) {
+		__read_mc_regs_df(pvt);
+		amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
+
+		goto skip;
+	}
 
 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
 
@@ -2322,8 +2698,6 @@ static void read_mc_regs(struct amd64_pvt *pvt)
 			 dram_dst_node(pvt, range));
 	}
 
-	read_dct_base_mask(pvt);
-
 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
 
@@ -2337,20 +2711,14 @@ static void read_mc_regs(struct amd64_pvt *pvt)
 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
 	}
 
-	pvt->ecc_sym_sz = 4;
+skip:
+	read_dct_base_mask(pvt);
+
 	determine_memory_type(pvt);
 	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
 
-	if (pvt->fam >= 0x10) {
-		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
-		/* F16h has only DCT0, so no need to read dbam1 */
-		if (pvt->fam != 0x16)
-			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
+	determine_ecc_sym_sz(pvt);
 
-		/* F10h, revD and later can do x8 ECC too */
-		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
-			pvt->ecc_sym_sz = 8;
-	}
 	dump_misc_regs(pvt);
 }
 
@@ -2420,20 +2788,22 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
 static int init_csrows(struct mem_ctl_info *mci)
 {
 	struct amd64_pvt *pvt = mci->pvt_info;
+	enum edac_type edac_mode = EDAC_NONE;
 	struct csrow_info *csrow;
 	struct dimm_info *dimm;
-	enum edac_type edac_mode;
 	int i, j, empty = 1;
 	int nr_pages = 0;
 	u32 val;
 
-	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
+	if (!pvt->umc) {
+		amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
 
-	pvt->nbcfg = val;
+		pvt->nbcfg = val;
 
-	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
-		 pvt->mc_node_id, val,
-		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
+		edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+			 pvt->mc_node_id, val,
+			 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
+	}
 
 	/*
 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
@@ -2469,14 +2839,18 @@ static int init_csrows(struct mem_ctl_info *mci)
 
 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
 
-		/*
-		 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
-		 */
-		if (pvt->nbcfg & NBCFG_ECC_ENABLE)
-			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
-				    EDAC_S4ECD4ED : EDAC_SECDED;
-		else
-			edac_mode = EDAC_NONE;
+		/* Determine DIMM ECC mode: */
+		if (pvt->umc) {
+			if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
+				edac_mode = EDAC_S4ECD4ED;
+			else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
+				edac_mode = EDAC_SECDED;
+
+		} else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
+			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
+					? EDAC_S4ECD4ED
+					: EDAC_SECDED;
+		}
 
 		for (j = 0; j < pvt->channel_count; j++) {
 			dimm = csrow->channels[j]->dimm;
@@ -2539,7 +2913,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
 
 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
 		amd64_warn("%s: error allocating mask\n", __func__);
-		return false;
+		return -ENOMEM;
 	}
 
 	get_cpus_on_this_dct_cpumask(cmask, nid);
@@ -2627,7 +3001,6 @@ static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
 {
 	u32 value, mask = 0x3;		/* UECC/CECC enable */
 
-
 	if (!s->nbctl_valid)
 		return;
 
@@ -2663,20 +3036,50 @@ static const char *ecc_msg =
 
 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
 {
-	u32 value;
-	u8 ecc_en = 0;
 	bool nb_mce_en = false;
+	u8 ecc_en = 0, i;
+	u32 value;
 
-	amd64_read_pci_cfg(F3, NBCFG, &value);
+	if (boot_cpu_data.x86 >= 0x17) {
+		u8 umc_en_mask = 0, ecc_en_mask = 0;
 
-	ecc_en = !!(value & NBCFG_ECC_ENABLE);
-	amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
+		for (i = 0; i < NUM_UMCS; i++) {
+			u32 base = get_umc_base(i);
+
+			/* Only check enabled UMCs. */
+			if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
+				continue;
+
+			if (!(value & UMC_SDP_INIT))
+				continue;
+
+			umc_en_mask |= BIT(i);
 
-	nb_mce_en = nb_mce_bank_enabled_on_node(nid);
-	if (!nb_mce_en)
-		amd64_notice("NB MCE bank disabled, set MSR "
-			     "0x%08x[4] on node %d to enable.\n",
-			     MSR_IA32_MCG_CTL, nid);
+			if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
+				continue;
+
+			if (value & UMC_ECC_ENABLED)
+				ecc_en_mask |= BIT(i);
+		}
+
+		/* Check whether at least one UMC is enabled: */
+		if (umc_en_mask)
+			ecc_en = umc_en_mask == ecc_en_mask;
+
+		/* Assume UMC MCA banks are enabled. */
+		nb_mce_en = true;
+	} else {
+		amd64_read_pci_cfg(F3, NBCFG, &value);
+
+		ecc_en = !!(value & NBCFG_ECC_ENABLE);
+
+		nb_mce_en = nb_mce_bank_enabled_on_node(nid);
+		if (!nb_mce_en)
+			amd64_notice("NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
+				     MSR_IA32_MCG_CTL, nid);
+	}
+
+	amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
 
 	if (!ecc_en || !nb_mce_en) {
 		amd64_notice("%s", ecc_msg);
@@ -2685,6 +3088,27 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
 	return true;
 }
 
+static inline void
+f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
+{
+	u8 i, ecc_en = 1, cpk_en = 1;
+
+	for (i = 0; i < NUM_UMCS; i++) {
+		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
+			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
+			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
+		}
+	}
+
+	/* Set chipkill only if ECC is enabled: */
+	if (ecc_en) {
+		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+
+		if (cpk_en)
+			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+	}
+}
+
 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
 				 struct amd64_family_type *fam)
 {
@@ -2693,17 +3117,21 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
 
-	if (pvt->nbcap & NBCAP_SECDED)
-		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+	if (pvt->umc) {
+		f17h_determine_edac_ctl_cap(mci, pvt);
+	} else {
+		if (pvt->nbcap & NBCAP_SECDED)
+			mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
 
-	if (pvt->nbcap & NBCAP_CHIPKILL)
-		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+		if (pvt->nbcap & NBCAP_CHIPKILL)
+			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+	}
 
 	mci->edac_cap		= determine_edac_cap(pvt);
 	mci->mod_name		= EDAC_MOD_STR;
 	mci->mod_ver		= EDAC_AMD64_VERSION;
 	mci->ctl_name		= fam->ctl_name;
-	mci->dev_name		= pci_name(pvt->F2);
+	mci->dev_name		= pci_name(pvt->F3);
 	mci->ctl_page_to_phys	= NULL;
 
 	/* memory scrubber interface */
@@ -2759,6 +3187,11 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
 		pvt->ops	= &family_types[F16_CPUS].ops;
 		break;
 
+	case 0x17:
+		fam_type	= &family_types[F17_CPUS];
+		pvt->ops	= &family_types[F17_CPUS].ops;
+		break;
+
 	default:
 		amd64_err("Unsupported family!\n");
 		return NULL;
@@ -2789,6 +3222,7 @@ static int init_one_instance(unsigned int nid)
 	struct mem_ctl_info *mci = NULL;
 	struct edac_mc_layer layers[2];
 	struct amd64_pvt *pvt = NULL;
+	u16 pci_id1, pci_id2;
 	int err = 0, ret;
 
 	ret = -ENOMEM;
@@ -2804,10 +3238,23 @@ static int init_one_instance(unsigned int nid)
 	if (!fam_type)
 		goto err_free;
 
-	ret = -ENODEV;
-	err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f2_id);
+	if (pvt->fam >= 0x17) {
+		pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
+		if (!pvt->umc) {
+			ret = -ENOMEM;
+			goto err_free;
+		}
+
+		pci_id1 = fam_type->f0_id;
+		pci_id2 = fam_type->f6_id;
+	} else {
+		pci_id1 = fam_type->f1_id;
+		pci_id2 = fam_type->f2_id;
+	}
+
+	err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
 	if (err)
-		goto err_free;
+		goto err_post_init;
 
 	read_mc_regs(pvt);
 
@@ -2857,7 +3304,10 @@ static int init_one_instance(unsigned int nid)
 	if (report_gart_errors)
 		amd_report_gart_errors(true);
 
-	amd_register_ecc_decoder(decode_bus_error);
+	if (pvt->umc)
+		amd_register_ecc_decoder(decode_umc_error);
+	else
+		amd_register_ecc_decoder(decode_bus_error);
 
 	return 0;
 
@@ -2867,6 +3317,10 @@ static int init_one_instance(unsigned int nid)
 err_siblings:
 	free_mc_sibling_devs(pvt);
 
+err_post_init:
+	if (pvt->fam >= 0x17)
+		kfree(pvt->umc);
+
 err_free:
 	kfree(pvt);
 
@@ -2893,7 +3347,11 @@ static int probe_one_instance(unsigned int nid)
 		if (!ecc_enable_override)
 			goto err_enable;
 
-		amd64_warn("Forcing ECC on!\n");
+		if (boot_cpu_data.x86 >= 0x17) {
+			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
+			goto err_enable;
+		} else
+			amd64_warn("Forcing ECC on!\n");
 
 		if (!enable_ecc_error_reporting(s, nid, F3))
 			goto err_enable;
@@ -2902,7 +3360,9 @@ static int probe_one_instance(unsigned int nid)
 	ret = init_one_instance(nid);
 	if (ret < 0) {
 		amd64_err("Error probing instance: %d\n", nid);
-		restore_ecc_error_reporting(s, nid, F3);
+
+		if (boot_cpu_data.x86 < 0x17)
+			restore_ecc_error_reporting(s, nid, F3);
 	}
 
 	return ret;
@@ -2938,7 +3398,11 @@ static void remove_one_instance(unsigned int nid)
 
 	/* unregister from EDAC MCE */
 	amd_report_gart_errors(false);
-	amd_unregister_ecc_decoder(decode_bus_error);
+
+	if (pvt->umc)
+		amd_unregister_ecc_decoder(decode_umc_error);
+	else
+		amd_unregister_ecc_decoder(decode_bus_error);
 
 	kfree(ecc_stngs[nid]);
 	ecc_stngs[nid] = NULL;
@@ -2963,7 +3427,10 @@ static void setup_pci_device(void)
 		return;
 
 	pvt = mci->pvt_info;
-	pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
+	if (pvt->umc)
+		pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
+	else
+		pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
 	if (!pci_ctl) {
 		pr_warn("%s(): Unable to create PCI control\n", __func__);
 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
@@ -2975,6 +3442,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
 	{ X86_VENDOR_AMD, 0x10, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
 	{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
 	{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
+	{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);

+ 52 - 4
drivers/edac/amd64_edac.h

@@ -30,10 +30,10 @@
 	edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
 
 #define amd64_warn(fmt, arg...) \
-	edac_printk(KERN_WARNING, "amd64", fmt, ##arg)
+	edac_printk(KERN_WARNING, "amd64", "Warning: " fmt, ##arg)
 
 #define amd64_err(fmt, arg...) \
-	edac_printk(KERN_ERR, "amd64", fmt, ##arg)
+	edac_printk(KERN_ERR, "amd64", "Error: " fmt, ##arg)
 
 #define amd64_mc_warn(mci, fmt, arg...) \
 	edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)
@@ -118,6 +118,8 @@
 #define PCI_DEVICE_ID_AMD_16H_NB_F2	0x1532
 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
+#define PCI_DEVICE_ID_AMD_17H_DF_F0	0x1460
+#define PCI_DEVICE_ID_AMD_17H_DF_F6	0x1466
 
 /*
  * Function 1 - Address Map
@@ -202,6 +204,8 @@
 #define DCT_SEL_HI			0x114
 
 #define F15H_M60H_SCRCTRL		0x1C8
+#define F17H_SCR_BASE_ADDR		0x48
+#define F17H_SCR_LIMIT_ADDR		0x4C
 
 /*
  * Function 3 - Misc Control
@@ -248,6 +252,31 @@
 /* MSRs */
 #define MSR_MCGCTL_NBE			BIT(4)
 
+/* F17h */
+
+/* F0: */
+#define DF_DHAR				0x104
+
+/* UMC CH register offsets */
+#define UMCCH_BASE_ADDR			0x0
+#define UMCCH_ADDR_MASK			0x20
+#define UMCCH_ADDR_CFG			0x30
+#define UMCCH_DIMM_CFG			0x80
+#define UMCCH_UMC_CFG			0x100
+#define UMCCH_SDP_CTRL			0x104
+#define UMCCH_ECC_CTRL			0x14C
+#define UMCCH_ECC_BAD_SYMBOL		0xD90
+#define UMCCH_UMC_CAP			0xDF0
+#define UMCCH_UMC_CAP_HI		0xDF4
+
+/* UMC CH bitfields */
+#define UMC_ECC_CHIPKILL_CAP		BIT(31)
+#define UMC_ECC_ENABLED			BIT(30)
+
+#define UMC_SDP_INIT			BIT(31)
+
+#define NUM_UMCS			2
+
 enum amd_families {
 	K8_CPUS = 0,
 	F10_CPUS,
@@ -256,6 +285,7 @@ enum amd_families {
 	F15_M60H_CPUS,
 	F16_CPUS,
 	F16_M30H_CPUS,
+	F17_CPUS,
 	NUM_FAMILIES,
 };
 
@@ -288,11 +318,19 @@ struct chip_select {
 	u8 m_cnt;
 };
 
+struct amd64_umc {
+	u32 dimm_cfg;		/* DIMM Configuration reg */
+	u32 umc_cfg;		/* Configuration reg */
+	u32 sdp_ctrl;		/* SDP Control reg */
+	u32 ecc_ctrl;		/* DRAM ECC Control reg */
+	u32 umc_cap_hi;		/* Capabilities High reg */
+};
+
 struct amd64_pvt {
 	struct low_ops *ops;
 
 	/* pci_device handles which we utilize */
-	struct pci_dev *F1, *F2, *F3;
+	struct pci_dev *F0, *F1, *F2, *F3, *F6;
 
 	u16 mc_node_id;		/* MC index of this MC node */
 	u8 fam;			/* CPU family */
@@ -335,6 +373,8 @@ struct amd64_pvt {
 
 	/* cache the dram_type */
 	enum mem_type dram_type;
+
+	struct amd64_umc *umc;	/* UMC registers */
 };
 
 enum err_codes {
@@ -342,6 +382,8 @@ enum err_codes {
 	ERR_NODE	= -1,
 	ERR_CSROW	= -2,
 	ERR_CHANNEL	= -3,
+	ERR_SYND	= -4,
+	ERR_NORM_ADDR	= -5,
 };
 
 struct err_info {
@@ -354,6 +396,12 @@ struct err_info {
 	u32 offset;
 };
 
+static inline u32 get_umc_base(u8 channel)
+{
+	/* ch0: 0x50000, ch1: 0x150000 */
+	return 0x50000 + (!!channel << 20);
+}
+
 static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
 {
 	u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
@@ -422,7 +470,7 @@ struct low_ops {
 
 struct amd64_family_type {
 	const char *ctl_name;
-	u16 f1_id, f2_id;
+	u16 f0_id, f1_id, f2_id, f6_id;
 	struct low_ops ops;
 };
 

+ 31 - 18
drivers/edac/edac_mc.c

@@ -482,15 +482,8 @@ void edac_mc_free(struct mem_ctl_info *mci)
 }
 EXPORT_SYMBOL_GPL(edac_mc_free);
 
-
-/**
- * find_mci_by_dev
- *
- *	scan list of controllers looking for the one that manages
- *	the 'dev' device
- * @dev: pointer to a struct device related with the MCI
- */
-struct mem_ctl_info *find_mci_by_dev(struct device *dev)
+/* Caller must hold mem_ctls_mutex */
+static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
 {
 	struct mem_ctl_info *mci;
 	struct list_head *item;
@@ -506,6 +499,24 @@ struct mem_ctl_info *find_mci_by_dev(struct device *dev)
 
 	return NULL;
 }
+
+/**
+ * find_mci_by_dev
+ *
+ *	scan list of controllers looking for the one that manages
+ *	the 'dev' device
+ * @dev: pointer to a struct device related with the MCI
+ */
+struct mem_ctl_info *find_mci_by_dev(struct device *dev)
+{
+	struct mem_ctl_info *ret;
+
+	mutex_lock(&mem_ctls_mutex);
+	ret = __find_mci_by_dev(dev);
+	mutex_unlock(&mem_ctls_mutex);
+
+	return ret;
+}
 EXPORT_SYMBOL_GPL(find_mci_by_dev);
 
 /*
@@ -588,7 +599,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
 
 	insert_before = &mc_devices;
 
-	p = find_mci_by_dev(mci->pdev);
+	p = __find_mci_by_dev(mci->pdev);
 	if (unlikely(p != NULL))
 		goto fail0;
 
@@ -640,26 +651,28 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci)
  *
  * If found, return a pointer to the structure.
  * Else return NULL.
- *
- * Caller must hold mem_ctls_mutex.
  */
 struct mem_ctl_info *edac_mc_find(int idx)
 {
+	struct mem_ctl_info *mci = NULL;
 	struct list_head *item;
-	struct mem_ctl_info *mci;
+
+	mutex_lock(&mem_ctls_mutex);
 
 	list_for_each(item, &mc_devices) {
 		mci = list_entry(item, struct mem_ctl_info, link);
 
 		if (mci->mc_idx >= idx) {
-			if (mci->mc_idx == idx)
-				return mci;
-
+			if (mci->mc_idx == idx) {
+				goto unlock;
+			}
 			break;
 		}
 	}
 
-	return NULL;
+unlock:
+	mutex_unlock(&mem_ctls_mutex);
+	return mci;
 }
 EXPORT_SYMBOL(edac_mc_find);
 
@@ -779,7 +792,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
 	mutex_lock(&mem_ctls_mutex);
 
 	/* find the requested mci struct in the global list */
-	mci = find_mci_by_dev(dev);
+	mci = __find_mci_by_dev(dev);
 	if (mci == NULL) {
 		mutex_unlock(&mem_ctls_mutex);
 		return NULL;

+ 21 - 11
drivers/edac/mce_amd.c

@@ -8,7 +8,7 @@ static struct amd_decoder_ops *fam_ops;
 static u8 xec_mask	 = 0xf;
 
 static bool report_gart_errors;
-static void (*nb_bus_decoder)(int node_id, struct mce *m);
+static void (*decode_dram_ecc)(int node_id, struct mce *m);
 
 void amd_report_gart_errors(bool v)
 {
@@ -18,16 +18,16 @@ EXPORT_SYMBOL_GPL(amd_report_gart_errors);
 
 void amd_register_ecc_decoder(void (*f)(int, struct mce *))
 {
-	nb_bus_decoder = f;
+	decode_dram_ecc = f;
 }
 EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
 
 void amd_unregister_ecc_decoder(void (*f)(int, struct mce *))
 {
-	if (nb_bus_decoder) {
-		WARN_ON(nb_bus_decoder != f);
+	if (decode_dram_ecc) {
+		WARN_ON(decode_dram_ecc != f);
 
-		nb_bus_decoder = NULL;
+		decode_dram_ecc = NULL;
 	}
 }
 EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
@@ -763,8 +763,8 @@ static void decode_mc4_mce(struct mce *m)
 
 			pr_cont("%s.\n", mc4_mce_desc[xec]);
 
-			if (nb_bus_decoder)
-				nb_bus_decoder(node_id, m);
+			if (decode_dram_ecc)
+				decode_dram_ecc(node_id, m);
 			return;
 		}
 		break;
@@ -877,6 +877,13 @@ static void decode_smca_errors(struct mce *m)
 		pr_emerg(HW_ERR "%s Error: ", ip_name);
 		pr_cont("%s.\n", smca_mce_descs[bank_type].descs[xec]);
 	}
+
+	/*
+	 * amd_get_nb_id() returns the last level cache id.
+	 * The last level cache on Fam17h is 1 level below the node.
+	 */
+	if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc)
+		decode_dram_ecc(amd_get_nb_id(m->extcpu) >> 1, m);
 }
 
 static inline void amd_decode_err_code(u16 ec)
@@ -957,10 +964,13 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
 		((m->status & MCI_STATUS_PCC)	? "PCC"	  : "-"),
 		((m->status & MCI_STATUS_ADDRV)	? "AddrV" : "-"));
 
-	if (c->x86 >= 0x15)
-		pr_cont("|%s|%s",
-			((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
-			((m->status & MCI_STATUS_POISON)   ? "Poison"   : "-"));
+	if (c->x86 >= 0x15) {
+		pr_cont("|%s", (m->status & MCI_STATUS_DEFERRED ? "Deferred" : "-"));
+
+		/* F15h, bank4, bit 43 is part of McaStatSubCache. */
+		if (c->x86 != 0x15 || m->bank != 4)
+			pr_cont("|%s", (m->status & MCI_STATUS_POISON ? "Poison" : "-"));
+	}
 
 	if (boot_cpu_has(X86_FEATURE_SMCA)) {
 		u32 low, high;

+ 17 - 0
drivers/edac/mpc85xx_edac.c

@@ -300,6 +300,22 @@ static int mpc85xx_pci_err_probe(struct platform_device *op)
 	return res;
 }
 
+static int mpc85xx_pci_err_remove(struct platform_device *op)
+{
+	struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
+	struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+
+	edac_dbg(0, "\n");
+
+	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, orig_pci_err_cap_dr);
+	out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
+
+	edac_pci_del_device(&op->dev);
+	edac_pci_free_ctl_info(pci);
+
+	return 0;
+}
+
 static const struct platform_device_id mpc85xx_pci_err_match[] = {
 	{
 		.name = "mpc85xx-pci-edac"
@@ -309,6 +325,7 @@ static const struct platform_device_id mpc85xx_pci_err_match[] = {
 
 static struct platform_driver mpc85xx_pci_err_driver = {
 	.probe = mpc85xx_pci_err_probe,
+	.remove = mpc85xx_pci_err_remove,
 	.id_table = mpc85xx_pci_err_match,
 	.driver = {
 		.name = "mpc85xx_pci_err",

+ 8 - 6
drivers/edac/sb_edac.c

@@ -23,6 +23,7 @@
 #include <linux/math64.h>
 #include <linux/mod_devicetable.h>
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include <asm/processor.h>
 #include <asm/mce.h>
 
@@ -3365,12 +3366,13 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
 	{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
 
 static const struct x86_cpu_id sbridge_cpuids[] = {
-	ICPU(0x2d, pci_dev_descr_sbridge_table),	/* SANDY_BRIDGE */
-	ICPU(0x3e, pci_dev_descr_ibridge_table),	/* IVY_BRIDGE */
-	ICPU(0x3f, pci_dev_descr_haswell_table),	/* HASWELL */
-	ICPU(0x4f, pci_dev_descr_broadwell_table),	/* BROADWELL */
-	ICPU(0x56, pci_dev_descr_broadwell_table),	/* BROADWELL-DE */
-	ICPU(0x57, pci_dev_descr_knl_table),		/* KNIGHTS_LANDING */
+	ICPU(INTEL_FAM6_SANDYBRIDGE_X,	  pci_dev_descr_sbridge_table),
+	ICPU(INTEL_FAM6_IVYBRIDGE_X,	  pci_dev_descr_ibridge_table),
+	ICPU(INTEL_FAM6_HASWELL_X,	  pci_dev_descr_haswell_table),
+	ICPU(INTEL_FAM6_BROADWELL_X,	  pci_dev_descr_broadwell_table),
+	ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
+	ICPU(INTEL_FAM6_XEON_PHI_KNL,	  pci_dev_descr_knl_table),
+	ICPU(INTEL_FAM6_XEON_PHI_KNM,	  pci_dev_descr_knl_table),
 	{ }
 };
 MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);

+ 4 - 3
drivers/edac/skx_edac.c

@@ -25,6 +25,7 @@
 #include <linux/math64.h>
 #include <linux/mod_devicetable.h>
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include <asm/processor.h>
 #include <asm/mce.h>
 
@@ -262,8 +263,8 @@ static int get_all_munits(const struct munit *m)
 	return -ENODEV;
 }
 
-const struct x86_cpu_id skx_cpuids[] = {
-	{ X86_VENDOR_INTEL, 6, 0x55, 0, 0 },	/* Skylake */
+static const struct x86_cpu_id skx_cpuids[] = {
+	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
@@ -1036,7 +1037,7 @@ static void skx_remove(void)
  *	search for all the devices we need
  *	check which DIMMs are present.
  */
-int __init skx_init(void)
+static int __init skx_init(void)
 {
 	const struct x86_cpu_id *id;
 	const struct munit *m;

+ 3 - 3
drivers/edac/xgene_edac.c

@@ -1602,16 +1602,16 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
 		dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
 	if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
 		dev_err(edac_dev->dev,
-			"Mutilple IOB PA read data RAM error\n");
+			"Multiple IOB PA read data RAM error\n");
 	if (reg & IOBPA_WDATA_CORRUPT_MASK)
 		dev_err(edac_dev->dev, "IOB PA write data RAM error\n");
 	if (reg & IOBPA_M_WDATA_CORRUPT_MASK)
 		dev_err(edac_dev->dev,
-			"Mutilple IOB PA write data RAM error\n");
+			"Multiple IOB PA write data RAM error\n");
 	if (reg & IOBPA_TRANS_CORRUPT_MASK)
 		dev_err(edac_dev->dev, "IOB PA transaction error\n");
 	if (reg & IOBPA_M_TRANS_CORRUPT_MASK)
-		dev_err(edac_dev->dev, "Mutilple IOB PA transaction error\n");
+		dev_err(edac_dev->dev, "Multiple IOB PA transaction error\n");
 	if (reg & IOBPA_REQIDRAM_CORRUPT_MASK)
 		dev_err(edac_dev->dev, "IOB PA transaction ID RAM error\n");
 	if (reg & IOBPA_M_REQIDRAM_CORRUPT_MASK)

+ 7 - 1
include/linux/edac.h

@@ -134,6 +134,7 @@ enum dev_type {
 enum hw_event_mc_err_type {
 	HW_EVENT_ERR_CORRECTED,
 	HW_EVENT_ERR_UNCORRECTED,
+	HW_EVENT_ERR_DEFERRED,
 	HW_EVENT_ERR_FATAL,
 	HW_EVENT_ERR_INFO,
 };
@@ -145,6 +146,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
 		return "Corrected";
 	case HW_EVENT_ERR_UNCORRECTED:
 		return "Uncorrected";
+	case HW_EVENT_ERR_DEFERRED:
+		return "Deferred";
 	case HW_EVENT_ERR_FATAL:
 		return "Fatal";
 	default:
@@ -192,10 +195,11 @@ static inline char *mc_event_error_type(const unsigned int err_type)
  * @MEM_DDR3:		DDR3 RAM
  * @MEM_RDDR3:		Registered DDR3 RAM
  *			This is a variant of the DDR3 memories.
- * @MEM_LRDDR3		Load-Reduced DDR3 memory.
+ * @MEM_LRDDR3:		Load-Reduced DDR3 memory.
  * @MEM_DDR4:		Unbuffered DDR4 RAM
  * @MEM_RDDR4:		Registered DDR4 RAM
  *			This is a variant of the DDR4 memories.
+ * @MEM_LRDDR4:		Load-Reduced DDR4 memory.
  */
 enum mem_type {
 	MEM_EMPTY = 0,
@@ -218,6 +222,7 @@ enum mem_type {
 	MEM_LRDDR3,
 	MEM_DDR4,
 	MEM_RDDR4,
+	MEM_LRDDR4,
 };
 
 #define MEM_FLAG_EMPTY		BIT(MEM_EMPTY)
@@ -239,6 +244,7 @@ enum mem_type {
 #define MEM_FLAG_RDDR3          BIT(MEM_RDDR3)
 #define MEM_FLAG_DDR4           BIT(MEM_DDR4)
 #define MEM_FLAG_RDDR4          BIT(MEM_RDDR4)
+#define MEM_FLAG_LRDDR4         BIT(MEM_LRDDR4)
 
 /**
  * enum edac-type - Error Detection and Correction capabilities and mode