|
@@ -35,7 +35,7 @@ static LIST_HEAD(sbridge_edac_list);
|
|
|
/*
|
|
|
* Alter this version for the module when modifications are made
|
|
|
*/
|
|
|
-#define SBRIDGE_REVISION " Ver: 1.1.1 "
|
|
|
+#define SBRIDGE_REVISION " Ver: 1.1.2 "
|
|
|
#define EDAC_MOD_STR "sbridge_edac"
|
|
|
|
|
|
/*
|
|
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
|
|
|
* sbridge structs
|
|
|
*/
|
|
|
|
|
|
-#define NUM_CHANNELS 8 /* 2MC per socket, four chan per MC */
|
|
|
+#define NUM_CHANNELS 4 /* Max channels per MC */
|
|
|
#define MAX_DIMMS 3 /* Max DIMMS per channel */
|
|
|
#define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */
|
|
|
#define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */
|
|
@@ -294,6 +294,12 @@ enum type {
|
|
|
KNIGHTS_LANDING,
|
|
|
};
|
|
|
|
|
|
+enum domain {
|
|
|
+ IMC0 = 0,
|
|
|
+ IMC1,
|
|
|
+ SOCK,
|
|
|
+};
|
|
|
+
|
|
|
struct sbridge_pvt;
|
|
|
struct sbridge_info {
|
|
|
enum type type;
|
|
@@ -324,11 +330,14 @@ struct sbridge_channel {
|
|
|
struct pci_id_descr {
|
|
|
int dev_id;
|
|
|
int optional;
|
|
|
+ enum domain dom;
|
|
|
};
|
|
|
|
|
|
struct pci_id_table {
|
|
|
const struct pci_id_descr *descr;
|
|
|
- int n_devs;
|
|
|
+ int n_devs_per_imc;
|
|
|
+ int n_devs_per_sock;
|
|
|
+ int n_imcs_per_sock;
|
|
|
enum type type;
|
|
|
};
|
|
|
|
|
@@ -337,7 +346,9 @@ struct sbridge_dev {
|
|
|
u8 bus, mc;
|
|
|
u8 node_id, source_id;
|
|
|
struct pci_dev **pdev;
|
|
|
+ enum domain dom;
|
|
|
int n_devs;
|
|
|
+ int i_devs;
|
|
|
struct mem_ctl_info *mci;
|
|
|
};
|
|
|
|
|
@@ -352,11 +363,12 @@ struct knl_pvt {
|
|
|
};
|
|
|
|
|
|
struct sbridge_pvt {
|
|
|
- struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
|
|
|
+ /* Devices per socket */
|
|
|
+ struct pci_dev *pci_ddrio;
|
|
|
struct pci_dev *pci_sad0, *pci_sad1;
|
|
|
- struct pci_dev *pci_ha0, *pci_ha1;
|
|
|
struct pci_dev *pci_br0, *pci_br1;
|
|
|
- struct pci_dev *pci_ha1_ta;
|
|
|
+ /* Devices per memory controller */
|
|
|
+ struct pci_dev *pci_ha, *pci_ta, *pci_ras;
|
|
|
struct pci_dev *pci_tad[NUM_CHANNELS];
|
|
|
|
|
|
struct sbridge_dev *sbridge_dev;
|
|
@@ -373,39 +385,42 @@ struct sbridge_pvt {
|
|
|
struct knl_pvt knl;
|
|
|
};
|
|
|
|
|
|
-#define PCI_DESCR(device_id, opt) \
|
|
|
+#define PCI_DESCR(device_id, opt, domain) \
|
|
|
.dev_id = (device_id), \
|
|
|
- .optional = opt
|
|
|
+ .optional = opt, \
|
|
|
+ .dom = domain
|
|
|
|
|
|
static const struct pci_id_descr pci_dev_descr_sbridge[] = {
|
|
|
/* Processor Home Agent */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0, IMC0) },
|
|
|
|
|
|
/* Memory controller */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
|
|
|
|
|
|
/* System Address Decoder */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0, SOCK) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0, SOCK) },
|
|
|
|
|
|
/* Broadcast Registers */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0, SOCK) },
|
|
|
};
|
|
|
|
|
|
-#define PCI_ID_TABLE_ENTRY(A, T) { \
|
|
|
+#define PCI_ID_TABLE_ENTRY(A, N, M, T) { \
|
|
|
.descr = A, \
|
|
|
- .n_devs = ARRAY_SIZE(A), \
|
|
|
+ .n_devs_per_imc = N, \
|
|
|
+ .n_devs_per_sock = ARRAY_SIZE(A), \
|
|
|
+ .n_imcs_per_sock = M, \
|
|
|
.type = T \
|
|
|
}
|
|
|
|
|
|
static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
|
|
|
- PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
|
|
|
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
|
|
|
{0,} /* 0 terminated list. */
|
|
|
};
|
|
|
|
|
@@ -439,40 +454,39 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
|
|
|
|
|
|
static const struct pci_id_descr pci_dev_descr_ibridge[] = {
|
|
|
/* Processor Home Agent */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) },
|
|
|
|
|
|
/* Memory controller */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) },
|
|
|
+
|
|
|
+ /* Optional, mode 2HA */
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1, IMC1) },
|
|
|
+
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
|
|
|
|
|
|
/* System Address Decoder */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0, SOCK) },
|
|
|
|
|
|
/* Broadcast Registers */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1, SOCK) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0, SOCK) },
|
|
|
|
|
|
- /* Optional, mode 2HA */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) },
|
|
|
-#if 0
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
|
|
|
-#endif
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) },
|
|
|
};
|
|
|
|
|
|
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
|
|
|
- PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
|
|
|
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
|
|
|
{0,} /* 0 terminated list. */
|
|
|
};
|
|
|
|
|
@@ -498,9 +512,9 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
|
|
|
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
|
|
|
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
|
|
|
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
|
|
|
-#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71
|
|
|
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM 0x2f71
|
|
|
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
|
|
|
-#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79
|
|
|
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM 0x2f79
|
|
|
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
|
|
|
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
|
|
|
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
|
|
@@ -517,35 +531,33 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
|
|
|
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
|
|
|
static const struct pci_id_descr pci_dev_descr_haswell[] = {
|
|
|
/* first item must be the HA */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1, IMC1) },
|
|
|
+
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
|
|
|
+
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
|
|
|
+
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1, SOCK) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1, SOCK) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1, SOCK) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1, SOCK) },
|
|
|
};
|
|
|
|
|
|
static const struct pci_id_table pci_dev_descr_haswell_table[] = {
|
|
|
- PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
|
|
|
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
|
|
|
{0,} /* 0 terminated list. */
|
|
|
};
|
|
|
|
|
@@ -559,7 +571,7 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
|
|
|
/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
|
|
|
#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
|
|
|
/* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
|
|
|
-#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL 0x7843
|
|
|
+#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN 0x7843
|
|
|
/* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
|
|
|
#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844
|
|
|
/* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
|
|
@@ -579,17 +591,17 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
|
|
|
*/
|
|
|
|
|
|
static const struct pci_id_descr pci_dev_descr_knl[] = {
|
|
|
- [0] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0) },
|
|
|
- [1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0) },
|
|
|
- [2 ... 3] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0)},
|
|
|
- [4 ... 41] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0) },
|
|
|
- [42 ... 47] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL, 0) },
|
|
|
- [48] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0) },
|
|
|
- [49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0) },
|
|
|
+ [0 ... 1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0, IMC0)},
|
|
|
+ [2 ... 7] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN, 0, IMC0) },
|
|
|
+ [8] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0, IMC0) },
|
|
|
+ [9] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
|
|
|
+ [10] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0, SOCK) },
|
|
|
+ [11] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0, SOCK) },
|
|
|
+ [12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0, SOCK) },
|
|
|
};
|
|
|
|
|
|
static const struct pci_id_table pci_dev_descr_knl_table[] = {
|
|
|
- PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
|
|
|
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
|
|
|
{0,}
|
|
|
};
|
|
|
|
|
@@ -615,9 +627,9 @@ static const struct pci_id_table pci_dev_descr_knl_table[] = {
|
|
|
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0
|
|
|
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60
|
|
|
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8
|
|
|
-#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL 0x6f71
|
|
|
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM 0x6f71
|
|
|
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68
|
|
|
-#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL 0x6f79
|
|
|
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM 0x6f79
|
|
|
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
|
|
|
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
|
|
|
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
|
|
@@ -632,32 +644,30 @@ static const struct pci_id_table pci_dev_descr_knl_table[] = {
|
|
|
|
|
|
static const struct pci_id_descr pci_dev_descr_broadwell[] = {
|
|
|
/* first item must be the HA */
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1) },
|
|
|
-
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1) },
|
|
|
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1, IMC1) },
|
|
|
+
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
|
|
|
+
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
|
|
|
+
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
|
|
|
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1, SOCK) },
|
|
|
};
|
|
|
|
|
|
static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
|
|
|
- PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
|
|
|
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
|
|
|
{0,} /* 0 terminated list. */
|
|
|
};
|
|
|
|
|
@@ -709,7 +719,8 @@ static inline int numcol(u32 mtr)
|
|
|
return 1 << cols;
|
|
|
}
|
|
|
|
|
|
-static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus)
|
|
|
+static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus,
|
|
|
+ struct sbridge_dev *prev)
|
|
|
{
|
|
|
struct sbridge_dev *sbridge_dev;
|
|
|
|
|
@@ -722,16 +733,19 @@ static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus)
|
|
|
struct sbridge_dev, list);
|
|
|
}
|
|
|
|
|
|
- list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
|
|
|
- if (sbridge_dev->bus == bus)
|
|
|
+ sbridge_dev = list_entry(prev ? prev->list.next
|
|
|
+ : sbridge_edac_list.next, struct sbridge_dev, list);
|
|
|
+
|
|
|
+ list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
|
|
|
+ if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom))
|
|
|
return sbridge_dev;
|
|
|
}
|
|
|
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
|
|
|
- const struct pci_id_table *table)
|
|
|
+static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
|
|
|
+ const struct pci_id_table *table)
|
|
|
{
|
|
|
struct sbridge_dev *sbridge_dev;
|
|
|
|
|
@@ -739,15 +753,17 @@ static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
|
|
|
if (!sbridge_dev)
|
|
|
return NULL;
|
|
|
|
|
|
- sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
|
|
|
- GFP_KERNEL);
|
|
|
+ sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
|
|
|
+ sizeof(*sbridge_dev->pdev),
|
|
|
+ GFP_KERNEL);
|
|
|
if (!sbridge_dev->pdev) {
|
|
|
kfree(sbridge_dev);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
sbridge_dev->bus = bus;
|
|
|
- sbridge_dev->n_devs = table->n_devs;
|
|
|
+ sbridge_dev->dom = dom;
|
|
|
+ sbridge_dev->n_devs = table->n_devs_per_imc;
|
|
|
list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
|
|
|
|
|
|
return sbridge_dev;
|
|
@@ -1044,79 +1060,6 @@ static int haswell_chan_hash(int idx, u64 addr)
|
|
|
return idx;
|
|
|
}
|
|
|
|
|
|
-/****************************************************************************
|
|
|
- Memory check routines
|
|
|
- ****************************************************************************/
|
|
|
-static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id)
|
|
|
-{
|
|
|
- struct pci_dev *pdev = NULL;
|
|
|
-
|
|
|
- do {
|
|
|
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev);
|
|
|
- if (pdev && pdev->bus->number == bus)
|
|
|
- break;
|
|
|
- } while (pdev);
|
|
|
-
|
|
|
- return pdev;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * check_if_ecc_is_active() - Checks if ECC is active
|
|
|
- * @bus: Device bus
|
|
|
- * @type: Memory controller type
|
|
|
- * returns: 0 in case ECC is active, -ENODEV if it can't be determined or
|
|
|
- * disabled
|
|
|
- */
|
|
|
-static int check_if_ecc_is_active(const u8 bus, enum type type)
|
|
|
-{
|
|
|
- struct pci_dev *pdev = NULL;
|
|
|
- u32 mcmtr, id;
|
|
|
-
|
|
|
- switch (type) {
|
|
|
- case IVY_BRIDGE:
|
|
|
- id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
|
|
|
- break;
|
|
|
- case HASWELL:
|
|
|
- id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
|
|
|
- break;
|
|
|
- case SANDY_BRIDGE:
|
|
|
- id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
|
|
|
- break;
|
|
|
- case BROADWELL:
|
|
|
- id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA;
|
|
|
- break;
|
|
|
- case KNIGHTS_LANDING:
|
|
|
- /*
|
|
|
- * KNL doesn't group things by bus the same way
|
|
|
- * SB/IB/Haswell does.
|
|
|
- */
|
|
|
- id = PCI_DEVICE_ID_INTEL_KNL_IMC_TA;
|
|
|
- break;
|
|
|
- default:
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
-
|
|
|
- if (type != KNIGHTS_LANDING)
|
|
|
- pdev = get_pdev_same_bus(bus, id);
|
|
|
- else
|
|
|
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, 0);
|
|
|
-
|
|
|
- if (!pdev) {
|
|
|
- sbridge_printk(KERN_ERR, "Couldn't find PCI device "
|
|
|
- "%04x:%04x! on bus %02d\n",
|
|
|
- PCI_VENDOR_ID_INTEL, id, bus);
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
-
|
|
|
- pci_read_config_dword(pdev,
|
|
|
- type == KNIGHTS_LANDING ? KNL_MCMTR : MCMTR, &mcmtr);
|
|
|
- if (!IS_ECC_ENABLED(mcmtr)) {
|
|
|
- sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/* Low bits of TAD limit, and some metadata. */
|
|
|
static const u32 knl_tad_dram_limit_lo[] = {
|
|
|
0x400, 0x500, 0x600, 0x700,
|
|
@@ -1587,25 +1530,13 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int get_dimm_config(struct mem_ctl_info *mci)
|
|
|
+static void get_source_id(struct mem_ctl_info *mci)
|
|
|
{
|
|
|
struct sbridge_pvt *pvt = mci->pvt_info;
|
|
|
- struct dimm_info *dimm;
|
|
|
- unsigned i, j, banks, ranks, rows, cols, npages;
|
|
|
- u64 size;
|
|
|
u32 reg;
|
|
|
- enum edac_type mode;
|
|
|
- enum mem_type mtype;
|
|
|
- int channels = pvt->info.type == KNIGHTS_LANDING ?
|
|
|
- KNL_MAX_CHANNELS : NUM_CHANNELS;
|
|
|
- u64 knl_mc_sizes[KNL_MAX_CHANNELS];
|
|
|
|
|
|
- if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
|
|
|
- pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, ®);
|
|
|
- pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
|
|
|
- }
|
|
|
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
|
|
|
- pvt->info.type == KNIGHTS_LANDING)
|
|
|
+ pvt->info.type == KNIGHTS_LANDING)
|
|
|
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
|
|
|
else
|
|
|
pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®);
|
|
@@ -1614,50 +1545,19 @@ static int get_dimm_config(struct mem_ctl_info *mci)
|
|
|
pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
|
|
|
else
|
|
|
pvt->sbridge_dev->source_id = SOURCE_ID(reg);
|
|
|
+}
|
|
|
|
|
|
- pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
|
|
|
- edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
|
|
|
- pvt->sbridge_dev->mc,
|
|
|
- pvt->sbridge_dev->node_id,
|
|
|
- pvt->sbridge_dev->source_id);
|
|
|
-
|
|
|
- /* KNL doesn't support mirroring or lockstep,
|
|
|
- * and is always closed page
|
|
|
- */
|
|
|
- if (pvt->info.type == KNIGHTS_LANDING) {
|
|
|
- mode = EDAC_S4ECD4ED;
|
|
|
- pvt->is_mirrored = false;
|
|
|
-
|
|
|
- if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
|
|
|
- return -1;
|
|
|
- } else {
|
|
|
- pci_read_config_dword(pvt->pci_ras, RASENABLES, ®);
|
|
|
- if (IS_MIRROR_ENABLED(reg)) {
|
|
|
- edac_dbg(0, "Memory mirror is enabled\n");
|
|
|
- pvt->is_mirrored = true;
|
|
|
- } else {
|
|
|
- edac_dbg(0, "Memory mirror is disabled\n");
|
|
|
- pvt->is_mirrored = false;
|
|
|
- }
|
|
|
-
|
|
|
- pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
|
|
|
- if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
|
|
|
- edac_dbg(0, "Lockstep is enabled\n");
|
|
|
- mode = EDAC_S8ECD8ED;
|
|
|
- pvt->is_lockstep = true;
|
|
|
- } else {
|
|
|
- edac_dbg(0, "Lockstep is disabled\n");
|
|
|
- mode = EDAC_S4ECD4ED;
|
|
|
- pvt->is_lockstep = false;
|
|
|
- }
|
|
|
- if (IS_CLOSE_PG(pvt->info.mcmtr)) {
|
|
|
- edac_dbg(0, "address map is on closed page mode\n");
|
|
|
- pvt->is_close_pg = true;
|
|
|
- } else {
|
|
|
- edac_dbg(0, "address map is on open page mode\n");
|
|
|
- pvt->is_close_pg = false;
|
|
|
- }
|
|
|
- }
|
|
|
+static int __populate_dimms(struct mem_ctl_info *mci,
|
|
|
+ u64 knl_mc_sizes[KNL_MAX_CHANNELS],
|
|
|
+ enum edac_type mode)
|
|
|
+{
|
|
|
+ struct sbridge_pvt *pvt = mci->pvt_info;
|
|
|
+ int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
|
|
|
+ : NUM_CHANNELS;
|
|
|
+ unsigned int i, j, banks, ranks, rows, cols, npages;
|
|
|
+ struct dimm_info *dimm;
|
|
|
+ enum mem_type mtype;
|
|
|
+ u64 size;
|
|
|
|
|
|
mtype = pvt->info.get_memory_type(pvt);
|
|
|
if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
|
|
@@ -1688,8 +1588,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
|
|
|
}
|
|
|
|
|
|
for (j = 0; j < max_dimms_per_channel; j++) {
|
|
|
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
|
|
|
- i, j, 0);
|
|
|
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
|
|
|
if (pvt->info.type == KNIGHTS_LANDING) {
|
|
|
pci_read_config_dword(pvt->knl.pci_channel[i],
|
|
|
knl_mtr_reg, &mtr);
|
|
@@ -1699,6 +1598,12 @@ static int get_dimm_config(struct mem_ctl_info *mci)
|
|
|
}
|
|
|
edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
|
|
|
if (IS_DIMM_PRESENT(mtr)) {
|
|
|
+ if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
|
|
|
+ sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
|
|
|
+ pvt->sbridge_dev->source_id,
|
|
|
+ pvt->sbridge_dev->dom, i);
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
pvt->channel[i].dimms++;
|
|
|
|
|
|
ranks = numrank(pvt->info.type, mtr);
|
|
@@ -1717,7 +1622,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
|
|
|
npages = MiB_TO_PAGES(size);
|
|
|
|
|
|
edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
|
|
|
- pvt->sbridge_dev->mc, i/4, i%4, j,
|
|
|
+ pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
|
|
|
size, npages,
|
|
|
banks, ranks, rows, cols);
|
|
|
|
|
@@ -1727,8 +1632,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
|
|
|
dimm->mtype = mtype;
|
|
|
dimm->edac_mode = mode;
|
|
|
snprintf(dimm->label, sizeof(dimm->label),
|
|
|
- "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
|
|
|
- pvt->sbridge_dev->source_id, i/4, i%4, j);
|
|
|
+ "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
|
|
|
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1736,6 +1641,65 @@ static int get_dimm_config(struct mem_ctl_info *mci)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int get_dimm_config(struct mem_ctl_info *mci)
|
|
|
+{
|
|
|
+ struct sbridge_pvt *pvt = mci->pvt_info;
|
|
|
+ u64 knl_mc_sizes[KNL_MAX_CHANNELS];
|
|
|
+ enum edac_type mode;
|
|
|
+ u32 reg;
|
|
|
+
|
|
|
+ if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
|
|
|
+ pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®);
|
|
|
+ pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
|
|
|
+ }
|
|
|
+ pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
|
|
|
+ edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
|
|
|
+ pvt->sbridge_dev->mc,
|
|
|
+ pvt->sbridge_dev->node_id,
|
|
|
+ pvt->sbridge_dev->source_id);
|
|
|
+
|
|
|
+ /* KNL doesn't support mirroring or lockstep,
|
|
|
+ * and is always closed page
|
|
|
+ */
|
|
|
+ if (pvt->info.type == KNIGHTS_LANDING) {
|
|
|
+ mode = EDAC_S4ECD4ED;
|
|
|
+ pvt->is_mirrored = false;
|
|
|
+
|
|
|
+ if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
|
|
|
+ return -1;
|
|
|
+ pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr);
|
|
|
+ } else {
|
|
|
+ pci_read_config_dword(pvt->pci_ras, RASENABLES, ®);
|
|
|
+ if (IS_MIRROR_ENABLED(reg)) {
|
|
|
+ edac_dbg(0, "Memory mirror is enabled\n");
|
|
|
+ pvt->is_mirrored = true;
|
|
|
+ } else {
|
|
|
+ edac_dbg(0, "Memory mirror is disabled\n");
|
|
|
+ pvt->is_mirrored = false;
|
|
|
+ }
|
|
|
+
|
|
|
+ pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
|
|
|
+ if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
|
|
|
+ edac_dbg(0, "Lockstep is enabled\n");
|
|
|
+ mode = EDAC_S8ECD8ED;
|
|
|
+ pvt->is_lockstep = true;
|
|
|
+ } else {
|
|
|
+ edac_dbg(0, "Lockstep is disabled\n");
|
|
|
+ mode = EDAC_S4ECD4ED;
|
|
|
+ pvt->is_lockstep = false;
|
|
|
+ }
|
|
|
+ if (IS_CLOSE_PG(pvt->info.mcmtr)) {
|
|
|
+ edac_dbg(0, "address map is on closed page mode\n");
|
|
|
+ pvt->is_close_pg = true;
|
|
|
+ } else {
|
|
|
+ edac_dbg(0, "address map is on open page mode\n");
|
|
|
+ pvt->is_close_pg = false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return __populate_dimms(mci, knl_mc_sizes, mode);
|
|
|
+}
|
|
|
+
|
|
|
static void get_memory_layout(const struct mem_ctl_info *mci)
|
|
|
{
|
|
|
struct sbridge_pvt *pvt = mci->pvt_info;
|
|
@@ -1816,8 +1780,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
|
|
*/
|
|
|
prv = 0;
|
|
|
for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
|
|
|
- pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
|
|
|
- ®);
|
|
|
+ pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], ®);
|
|
|
limit = TAD_LIMIT(reg);
|
|
|
if (limit <= prv)
|
|
|
break;
|
|
@@ -1899,12 +1862,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
|
|
|
+static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
|
|
|
{
|
|
|
struct sbridge_dev *sbridge_dev;
|
|
|
|
|
|
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
|
|
|
- if (sbridge_dev->node_id == node_id)
|
|
|
+ if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
|
|
|
return sbridge_dev->mci;
|
|
|
}
|
|
|
return NULL;
|
|
@@ -1925,7 +1888,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|
|
int interleave_mode, shiftup = 0;
|
|
|
unsigned sad_interleave[pvt->info.max_interleave];
|
|
|
u32 reg, dram_rule;
|
|
|
- u8 ch_way, sck_way, pkg, sad_ha = 0, ch_add = 0;
|
|
|
+ u8 ch_way, sck_way, pkg, sad_ha = 0;
|
|
|
u32 tad_offset;
|
|
|
u32 rir_way;
|
|
|
u32 mb, gb;
|
|
@@ -2038,13 +2001,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|
|
pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
|
|
|
*socket = sad_pkg_socket(pkg);
|
|
|
sad_ha = sad_pkg_ha(pkg);
|
|
|
- if (sad_ha)
|
|
|
- ch_add = 4;
|
|
|
|
|
|
if (a7mode) {
|
|
|
/* MCChanShiftUpEnable */
|
|
|
- pci_read_config_dword(pvt->pci_ha0,
|
|
|
- HASWELL_HASYSDEFEATURE2, ®);
|
|
|
+ pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®);
|
|
|
shiftup = GET_BITFIELD(reg, 22, 22);
|
|
|
}
|
|
|
|
|
@@ -2056,8 +2016,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|
|
pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
|
|
|
*socket = sad_pkg_socket(pkg);
|
|
|
sad_ha = sad_pkg_ha(pkg);
|
|
|
- if (sad_ha)
|
|
|
- ch_add = 4;
|
|
|
edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
|
|
|
idx, *socket, sad_ha);
|
|
|
}
|
|
@@ -2068,7 +2026,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|
|
* Move to the proper node structure, in order to access the
|
|
|
* right PCI registers
|
|
|
*/
|
|
|
- new_mci = get_mci_for_node_id(*socket);
|
|
|
+ new_mci = get_mci_for_node_id(*socket, sad_ha);
|
|
|
if (!new_mci) {
|
|
|
sprintf(msg, "Struct for socket #%u wasn't initialized",
|
|
|
*socket);
|
|
@@ -2081,14 +2039,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|
|
* Step 2) Get memory channel
|
|
|
*/
|
|
|
prv = 0;
|
|
|
- if (pvt->info.type == SANDY_BRIDGE)
|
|
|
- pci_ha = pvt->pci_ha0;
|
|
|
- else {
|
|
|
- if (sad_ha)
|
|
|
- pci_ha = pvt->pci_ha1;
|
|
|
- else
|
|
|
- pci_ha = pvt->pci_ha0;
|
|
|
- }
|
|
|
+ pci_ha = pvt->pci_ha;
|
|
|
for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
|
|
|
pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®);
|
|
|
limit = TAD_LIMIT(reg);
|
|
@@ -2139,9 +2090,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|
|
}
|
|
|
*channel_mask = 1 << base_ch;
|
|
|
|
|
|
- pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
|
|
|
- tad_ch_nilv_offset[n_tads],
|
|
|
- &tad_offset);
|
|
|
+ pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
|
|
|
|
|
|
if (pvt->is_mirrored) {
|
|
|
*channel_mask |= 1 << ((base_ch + 2) % 4);
|
|
@@ -2192,9 +2141,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|
|
* Step 3) Decode rank
|
|
|
*/
|
|
|
for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
|
|
|
- pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
|
|
|
- rir_way_limit[n_rir],
|
|
|
- ®);
|
|
|
+ pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], ®);
|
|
|
|
|
|
if (!IS_RIR_VALID(reg))
|
|
|
continue;
|
|
@@ -2222,9 +2169,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
|
|
idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */
|
|
|
idx %= 1 << rir_way;
|
|
|
|
|
|
- pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
|
|
|
- rir_offset[n_rir][idx],
|
|
|
- ®);
|
|
|
+ pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], ®);
|
|
|
*rank = RIR_RNK_TGT(pvt->info.type, reg);
|
|
|
|
|
|
edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
|
|
@@ -2277,10 +2222,11 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
|
|
|
const unsigned devno,
|
|
|
const int multi_bus)
|
|
|
{
|
|
|
- struct sbridge_dev *sbridge_dev;
|
|
|
+ struct sbridge_dev *sbridge_dev = NULL;
|
|
|
const struct pci_id_descr *dev_descr = &table->descr[devno];
|
|
|
struct pci_dev *pdev = NULL;
|
|
|
u8 bus = 0;
|
|
|
+ int i = 0;
|
|
|
|
|
|
sbridge_printk(KERN_DEBUG,
|
|
|
"Seeking for: PCI ID %04x:%04x\n",
|
|
@@ -2311,9 +2257,14 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
|
|
|
}
|
|
|
bus = pdev->bus->number;
|
|
|
|
|
|
- sbridge_dev = get_sbridge_dev(bus, multi_bus);
|
|
|
+next_imc:
|
|
|
+ sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
|
|
|
if (!sbridge_dev) {
|
|
|
- sbridge_dev = alloc_sbridge_dev(bus, table);
|
|
|
+
|
|
|
+ if (dev_descr->dom == SOCK)
|
|
|
+ goto out_imc;
|
|
|
+
|
|
|
+ sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table);
|
|
|
if (!sbridge_dev) {
|
|
|
pci_dev_put(pdev);
|
|
|
return -ENOMEM;
|
|
@@ -2321,7 +2272,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
|
|
|
(*num_mc)++;
|
|
|
}
|
|
|
|
|
|
- if (sbridge_dev->pdev[devno]) {
|
|
|
+ if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
|
|
|
sbridge_printk(KERN_ERR,
|
|
|
"Duplicated device for %04x:%04x\n",
|
|
|
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
|
|
@@ -2329,8 +2280,16 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
|
|
|
- sbridge_dev->pdev[devno] = pdev;
|
|
|
+ sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
|
|
|
+
|
|
|
+ /* pdev belongs to more than one IMC, do extra gets */
|
|
|
+ if (++i > 1)
|
|
|
+ pci_dev_get(pdev);
|
|
|
|
|
|
+ if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
|
|
|
+ goto next_imc;
|
|
|
+
|
|
|
+out_imc:
|
|
|
/* Be sure that the device is enabled */
|
|
|
if (unlikely(pci_enable_device(pdev) < 0)) {
|
|
|
sbridge_printk(KERN_ERR,
|
|
@@ -2374,7 +2333,7 @@ static int sbridge_get_all_devices(u8 *num_mc,
|
|
|
if (table->type == KNIGHTS_LANDING)
|
|
|
allow_dups = multi_bus = 1;
|
|
|
while (table && table->descr) {
|
|
|
- for (i = 0; i < table->n_devs; i++) {
|
|
|
+ for (i = 0; i < table->n_devs_per_sock; i++) {
|
|
|
if (!allow_dups || i == 0 ||
|
|
|
table->descr[i].dev_id !=
|
|
|
table->descr[i-1].dev_id) {
|
|
@@ -2385,7 +2344,7 @@ static int sbridge_get_all_devices(u8 *num_mc,
|
|
|
table, i, multi_bus);
|
|
|
if (rc < 0) {
|
|
|
if (i == 0) {
|
|
|
- i = table->n_devs;
|
|
|
+ i = table->n_devs_per_sock;
|
|
|
break;
|
|
|
}
|
|
|
sbridge_put_all_devices();
|
|
@@ -2399,6 +2358,13 @@ static int sbridge_get_all_devices(u8 *num_mc,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
|
|
|
+ * the format: XXXa. So we can convert from a device to the corresponding
|
|
|
+ * channel like this
|
|
|
+ */
|
|
|
+#define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
|
|
|
+
|
|
|
static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
struct sbridge_dev *sbridge_dev)
|
|
|
{
|
|
@@ -2423,7 +2389,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
pvt->pci_br0 = pdev;
|
|
|
break;
|
|
|
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
|
|
|
- pvt->pci_ha0 = pdev;
|
|
|
+ pvt->pci_ha = pdev;
|
|
|
break;
|
|
|
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
|
|
|
pvt->pci_ta = pdev;
|
|
@@ -2436,7 +2402,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
|
|
|
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
|
|
|
{
|
|
|
- int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
|
|
|
+ int id = TAD_DEV_TO_CHAN(pdev->device);
|
|
|
pvt->pci_tad[id] = pdev;
|
|
|
saw_chan_mask |= 1 << id;
|
|
|
}
|
|
@@ -2455,7 +2421,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
}
|
|
|
|
|
|
/* Check if everything were registered */
|
|
|
- if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
|
|
|
+ if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
|
|
|
!pvt->pci_ras || !pvt->pci_ta)
|
|
|
goto enodev;
|
|
|
|
|
@@ -2488,19 +2454,26 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
|
|
|
switch (pdev->device) {
|
|
|
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
|
|
|
- pvt->pci_ha0 = pdev;
|
|
|
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
|
|
|
+ pvt->pci_ha = pdev;
|
|
|
break;
|
|
|
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
|
|
|
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
|
|
|
pvt->pci_ta = pdev;
|
|
|
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
|
|
|
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
|
|
|
pvt->pci_ras = pdev;
|
|
|
break;
|
|
|
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
|
|
|
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
|
|
|
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
|
|
|
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
|
|
|
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
|
|
|
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
|
|
|
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
|
|
|
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
|
|
|
{
|
|
|
- int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0;
|
|
|
+ int id = TAD_DEV_TO_CHAN(pdev->device);
|
|
|
pvt->pci_tad[id] = pdev;
|
|
|
saw_chan_mask |= 1 << id;
|
|
|
}
|
|
@@ -2520,19 +2493,6 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
|
|
|
pvt->pci_br1 = pdev;
|
|
|
break;
|
|
|
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
|
|
|
- pvt->pci_ha1 = pdev;
|
|
|
- break;
|
|
|
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
|
|
|
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
|
|
|
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
|
|
|
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
|
|
|
- {
|
|
|
- int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 4;
|
|
|
- pvt->pci_tad[id] = pdev;
|
|
|
- saw_chan_mask |= 1 << id;
|
|
|
- }
|
|
|
- break;
|
|
|
default:
|
|
|
goto error;
|
|
|
}
|
|
@@ -2544,13 +2504,12 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
}
|
|
|
|
|
|
/* Check if everything were registered */
|
|
|
- if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
|
|
|
+ if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
|
|
|
!pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
|
|
|
goto enodev;
|
|
|
|
|
|
- if (saw_chan_mask != 0x0f && /* -EN */
|
|
|
- saw_chan_mask != 0x33 && /* -EP */
|
|
|
- saw_chan_mask != 0xff) /* -EX */
|
|
|
+ if (saw_chan_mask != 0x0f && /* -EN/-EX */
|
|
|
+ saw_chan_mask != 0x03) /* -EP */
|
|
|
goto enodev;
|
|
|
return 0;
|
|
|
|
|
@@ -2593,32 +2552,27 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
pvt->pci_sad1 = pdev;
|
|
|
break;
|
|
|
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
|
|
|
- pvt->pci_ha0 = pdev;
|
|
|
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
|
|
|
+ pvt->pci_ha = pdev;
|
|
|
break;
|
|
|
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
|
|
|
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
|
|
|
pvt->pci_ta = pdev;
|
|
|
break;
|
|
|
- case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL:
|
|
|
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
|
|
|
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
|
|
|
pvt->pci_ras = pdev;
|
|
|
break;
|
|
|
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
|
|
|
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
|
|
|
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
|
|
|
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
|
|
|
- {
|
|
|
- int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0;
|
|
|
-
|
|
|
- pvt->pci_tad[id] = pdev;
|
|
|
- saw_chan_mask |= 1 << id;
|
|
|
- }
|
|
|
- break;
|
|
|
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
|
|
|
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
|
|
|
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
|
|
|
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
|
|
|
{
|
|
|
- int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 + 4;
|
|
|
-
|
|
|
+ int id = TAD_DEV_TO_CHAN(pdev->device);
|
|
|
pvt->pci_tad[id] = pdev;
|
|
|
saw_chan_mask |= 1 << id;
|
|
|
}
|
|
@@ -2630,12 +2584,6 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
if (!pvt->pci_ddrio)
|
|
|
pvt->pci_ddrio = pdev;
|
|
|
break;
|
|
|
- case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
|
|
|
- pvt->pci_ha1 = pdev;
|
|
|
- break;
|
|
|
- case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
|
|
|
- pvt->pci_ha1_ta = pdev;
|
|
|
- break;
|
|
|
default:
|
|
|
break;
|
|
|
}
|
|
@@ -2647,13 +2595,12 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
}
|
|
|
|
|
|
/* Check if everything were registered */
|
|
|
- if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
|
|
|
+ if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
|
|
|
!pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
|
|
|
goto enodev;
|
|
|
|
|
|
- if (saw_chan_mask != 0x0f && /* -EN */
|
|
|
- saw_chan_mask != 0x33 && /* -EP */
|
|
|
- saw_chan_mask != 0xff) /* -EX */
|
|
|
+ if (saw_chan_mask != 0x0f && /* -EN/-EX */
|
|
|
+ saw_chan_mask != 0x03) /* -EP */
|
|
|
goto enodev;
|
|
|
return 0;
|
|
|
|
|
@@ -2690,30 +2637,27 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
pvt->pci_sad1 = pdev;
|
|
|
break;
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
|
|
|
- pvt->pci_ha0 = pdev;
|
|
|
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
|
|
|
+ pvt->pci_ha = pdev;
|
|
|
break;
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
|
|
|
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
|
|
|
pvt->pci_ta = pdev;
|
|
|
break;
|
|
|
- case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL:
|
|
|
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
|
|
|
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
|
|
|
pvt->pci_ras = pdev;
|
|
|
break;
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
|
|
|
- {
|
|
|
- int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0;
|
|
|
- pvt->pci_tad[id] = pdev;
|
|
|
- saw_chan_mask |= 1 << id;
|
|
|
- }
|
|
|
- break;
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
|
|
|
{
|
|
|
- int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 + 4;
|
|
|
+ int id = TAD_DEV_TO_CHAN(pdev->device);
|
|
|
pvt->pci_tad[id] = pdev;
|
|
|
saw_chan_mask |= 1 << id;
|
|
|
}
|
|
@@ -2721,12 +2665,6 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
|
|
|
pvt->pci_ddrio = pdev;
|
|
|
break;
|
|
|
- case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
|
|
|
- pvt->pci_ha1 = pdev;
|
|
|
- break;
|
|
|
- case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
|
|
|
- pvt->pci_ha1_ta = pdev;
|
|
|
- break;
|
|
|
default:
|
|
|
break;
|
|
|
}
|
|
@@ -2738,13 +2676,12 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
}
|
|
|
|
|
|
/* Check if everything were registered */
|
|
|
- if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
|
|
|
+ if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
|
|
|
!pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
|
|
|
goto enodev;
|
|
|
|
|
|
- if (saw_chan_mask != 0x0f && /* -EN */
|
|
|
- saw_chan_mask != 0x33 && /* -EP */
|
|
|
- saw_chan_mask != 0xff) /* -EX */
|
|
|
+ if (saw_chan_mask != 0x0f && /* -EN/-EX */
|
|
|
+ saw_chan_mask != 0x03) /* -EP */
|
|
|
goto enodev;
|
|
|
return 0;
|
|
|
|
|
@@ -2812,7 +2749,7 @@ static int knl_mci_bind_devs(struct mem_ctl_info *mci,
|
|
|
pvt->knl.pci_cha[devidx] = pdev;
|
|
|
break;
|
|
|
|
|
|
- case PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL:
|
|
|
+ case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
|
|
|
devidx = -1;
|
|
|
|
|
|
/*
|
|
@@ -3006,7 +2943,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
|
|
|
|
|
|
if (rc < 0)
|
|
|
goto err_parsing;
|
|
|
- new_mci = get_mci_for_node_id(socket);
|
|
|
+ new_mci = get_mci_for_node_id(socket, ha);
|
|
|
if (!new_mci) {
|
|
|
strcpy(msg, "Error: socket got corrupted!");
|
|
|
goto err_parsing;
|
|
@@ -3053,7 +2990,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
|
|
|
/* Call the helper to output message */
|
|
|
edac_mc_handle_error(tp_event, mci, core_err_cnt,
|
|
|
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
|
|
|
- 4*ha+channel, dimm, -1,
|
|
|
+ channel, dimm, -1,
|
|
|
optype, msg);
|
|
|
return;
|
|
|
err_parsing:
|
|
@@ -3078,7 +3015,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
|
|
|
if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
- mci = get_mci_for_node_id(mce->socketid);
|
|
|
+ mci = get_mci_for_node_id(mce->socketid, IMC0);
|
|
|
if (!mci)
|
|
|
return NOTIFY_DONE;
|
|
|
pvt = mci->pvt_info;
|
|
@@ -3159,11 +3096,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
|
|
|
struct pci_dev *pdev = sbridge_dev->pdev[0];
|
|
|
int rc;
|
|
|
|
|
|
- /* Check the number of active and not disabled channels */
|
|
|
- rc = check_if_ecc_is_active(sbridge_dev->bus, type);
|
|
|
- if (unlikely(rc < 0))
|
|
|
- return rc;
|
|
|
-
|
|
|
/* allocate a new MC control structure */
|
|
|
layers[0].type = EDAC_MC_LAYER_CHANNEL;
|
|
|
layers[0].size = type == KNIGHTS_LANDING ?
|
|
@@ -3192,7 +3124,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
|
|
|
MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
|
|
|
mci->edac_ctl_cap = EDAC_FLAG_NONE;
|
|
|
mci->edac_cap = EDAC_FLAG_NONE;
|
|
|
- mci->mod_name = "sbridge_edac.c";
|
|
|
+ mci->mod_name = "sb_edac.c";
|
|
|
mci->mod_ver = SBRIDGE_REVISION;
|
|
|
mci->dev_name = pci_name(pdev);
|
|
|
mci->ctl_page_to_phys = NULL;
|
|
@@ -3215,12 +3147,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
|
|
|
pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
|
|
|
pvt->info.interleave_pkg = ibridge_interleave_pkg;
|
|
|
pvt->info.get_width = ibridge_get_width;
|
|
|
- mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
|
|
|
|
|
|
/* Store pci devices at mci for faster access */
|
|
|
rc = ibridge_mci_bind_devs(mci, sbridge_dev);
|
|
|
if (unlikely(rc < 0))
|
|
|
goto fail0;
|
|
|
+ get_source_id(mci);
|
|
|
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
|
|
|
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
|
|
|
break;
|
|
|
case SANDY_BRIDGE:
|
|
|
pvt->info.rankcfgr = SB_RANK_CFG_A;
|
|
@@ -3238,12 +3172,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
|
|
|
pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
|
|
|
pvt->info.interleave_pkg = sbridge_interleave_pkg;
|
|
|
pvt->info.get_width = sbridge_get_width;
|
|
|
- mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
|
|
|
|
|
|
/* Store pci devices at mci for faster access */
|
|
|
rc = sbridge_mci_bind_devs(mci, sbridge_dev);
|
|
|
if (unlikely(rc < 0))
|
|
|
goto fail0;
|
|
|
+ get_source_id(mci);
|
|
|
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
|
|
|
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
|
|
|
break;
|
|
|
case HASWELL:
|
|
|
/* rankcfgr isn't used */
|
|
@@ -3261,12 +3197,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
|
|
|
pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
|
|
|
pvt->info.interleave_pkg = ibridge_interleave_pkg;
|
|
|
pvt->info.get_width = ibridge_get_width;
|
|
|
- mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
|
|
|
|
|
|
/* Store pci devices at mci for faster access */
|
|
|
rc = haswell_mci_bind_devs(mci, sbridge_dev);
|
|
|
if (unlikely(rc < 0))
|
|
|
goto fail0;
|
|
|
+ get_source_id(mci);
|
|
|
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
|
|
|
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
|
|
|
break;
|
|
|
case BROADWELL:
|
|
|
/* rankcfgr isn't used */
|
|
@@ -3284,12 +3222,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
|
|
|
pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
|
|
|
pvt->info.interleave_pkg = ibridge_interleave_pkg;
|
|
|
pvt->info.get_width = broadwell_get_width;
|
|
|
- mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx);
|
|
|
|
|
|
/* Store pci devices at mci for faster access */
|
|
|
rc = broadwell_mci_bind_devs(mci, sbridge_dev);
|
|
|
if (unlikely(rc < 0))
|
|
|
goto fail0;
|
|
|
+ get_source_id(mci);
|
|
|
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
|
|
|
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
|
|
|
break;
|
|
|
case KNIGHTS_LANDING:
|
|
|
/* pvt->info.rankcfgr == ??? */
|
|
@@ -3307,17 +3247,22 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
|
|
|
pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list);
|
|
|
pvt->info.interleave_pkg = ibridge_interleave_pkg;
|
|
|
pvt->info.get_width = knl_get_width;
|
|
|
- mci->ctl_name = kasprintf(GFP_KERNEL,
|
|
|
- "Knights Landing Socket#%d", mci->mc_idx);
|
|
|
|
|
|
rc = knl_mci_bind_devs(mci, sbridge_dev);
|
|
|
if (unlikely(rc < 0))
|
|
|
goto fail0;
|
|
|
+ get_source_id(mci);
|
|
|
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
|
|
|
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
/* Get dimm basic config and the memory layout */
|
|
|
- get_dimm_config(mci);
|
|
|
+ rc = get_dimm_config(mci);
|
|
|
+ if (rc < 0) {
|
|
|
+ edac_dbg(0, "MC: failed to get_dimm_config()\n");
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
get_memory_layout(mci);
|
|
|
|
|
|
/* record ptr to the generic device */
|
|
@@ -3327,13 +3272,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
|
|
|
if (unlikely(edac_mc_add_mc(mci))) {
|
|
|
edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
|
|
|
rc = -EINVAL;
|
|
|
- goto fail0;
|
|
|
+ goto fail;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-fail0:
|
|
|
+fail:
|
|
|
kfree(mci->ctl_name);
|
|
|
+fail0:
|
|
|
edac_mc_free(mci);
|
|
|
sbridge_dev->mci = NULL;
|
|
|
return rc;
|