mpc85xx_edac.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279
  1. /*
  2. * Freescale MPC85xx Memory Controller kernel module
  3. *
  4. * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
  5. *
  6. * Author: Dave Jiang <djiang@mvista.com>
  7. *
  8. * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
  9. * the terms of the GNU General Public License version 2. This program
  10. * is licensed "as is" without any warranty of any kind, whether express
  11. * or implied.
  12. *
  13. */
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/ctype.h>
  18. #include <linux/io.h>
  19. #include <linux/mod_devicetable.h>
  20. #include <linux/edac.h>
  21. #include <linux/smp.h>
  22. #include <linux/gfp.h>
  23. #include <linux/of_platform.h>
  24. #include <linux/of_device.h>
  25. #include "edac_module.h"
  26. #include "edac_core.h"
  27. #include "mpc85xx_edac.h"
  28. static int edac_dev_idx;
  29. #ifdef CONFIG_PCI
  30. static int edac_pci_idx;
  31. #endif
  32. static int edac_mc_idx;
  33. static u32 orig_ddr_err_disable;
  34. static u32 orig_ddr_err_sbe;
  35. /*
  36. * PCI Err defines
  37. */
  38. #ifdef CONFIG_PCI
  39. static u32 orig_pci_err_cap_dr;
  40. static u32 orig_pci_err_en;
  41. #endif
  42. static u32 orig_l2_err_disable;
  43. #ifdef CONFIG_FSL_SOC_BOOKE
  44. static u32 orig_hid1[2];
  45. #endif
  46. /************************ MC SYSFS parts ***********************************/
  47. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  48. static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev,
  49. struct device_attribute *mattr,
  50. char *data)
  51. {
  52. struct mem_ctl_info *mci = to_mci(dev);
  53. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  54. return sprintf(data, "0x%08x",
  55. in_be32(pdata->mc_vbase +
  56. MPC85XX_MC_DATA_ERR_INJECT_HI));
  57. }
  58. static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev,
  59. struct device_attribute *mattr,
  60. char *data)
  61. {
  62. struct mem_ctl_info *mci = to_mci(dev);
  63. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  64. return sprintf(data, "0x%08x",
  65. in_be32(pdata->mc_vbase +
  66. MPC85XX_MC_DATA_ERR_INJECT_LO));
  67. }
  68. static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev,
  69. struct device_attribute *mattr,
  70. char *data)
  71. {
  72. struct mem_ctl_info *mci = to_mci(dev);
  73. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  74. return sprintf(data, "0x%08x",
  75. in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
  76. }
  77. static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev,
  78. struct device_attribute *mattr,
  79. const char *data, size_t count)
  80. {
  81. struct mem_ctl_info *mci = to_mci(dev);
  82. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  83. if (isdigit(*data)) {
  84. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
  85. simple_strtoul(data, NULL, 0));
  86. return count;
  87. }
  88. return 0;
  89. }
  90. static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev,
  91. struct device_attribute *mattr,
  92. const char *data, size_t count)
  93. {
  94. struct mem_ctl_info *mci = to_mci(dev);
  95. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  96. if (isdigit(*data)) {
  97. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
  98. simple_strtoul(data, NULL, 0));
  99. return count;
  100. }
  101. return 0;
  102. }
  103. static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev,
  104. struct device_attribute *mattr,
  105. const char *data, size_t count)
  106. {
  107. struct mem_ctl_info *mci = to_mci(dev);
  108. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  109. if (isdigit(*data)) {
  110. out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
  111. simple_strtoul(data, NULL, 0));
  112. return count;
  113. }
  114. return 0;
  115. }
  116. DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
  117. mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store);
  118. DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
  119. mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store);
  120. DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
  121. mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store);
  122. static struct attribute *mpc85xx_dev_attrs[] = {
  123. &dev_attr_inject_data_hi.attr,
  124. &dev_attr_inject_data_lo.attr,
  125. &dev_attr_inject_ctrl.attr,
  126. NULL
  127. };
  128. ATTRIBUTE_GROUPS(mpc85xx_dev);
  129. /**************************** PCI Err device ***************************/
  130. #ifdef CONFIG_PCI
  131. static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
  132. {
  133. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  134. u32 err_detect;
  135. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  136. /* master aborts can happen during PCI config cycles */
  137. if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
  138. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  139. return;
  140. }
  141. printk(KERN_ERR "PCI error(s) detected\n");
  142. printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
  143. printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
  144. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
  145. printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
  146. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
  147. printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
  148. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
  149. printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
  150. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
  151. printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
  152. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
  153. /* clear error bits */
  154. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  155. if (err_detect & PCI_EDE_PERR_MASK)
  156. edac_pci_handle_pe(pci, pci->ctl_name);
  157. if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
  158. edac_pci_handle_npe(pci, pci->ctl_name);
  159. }
  160. static void mpc85xx_pcie_check(struct edac_pci_ctl_info *pci)
  161. {
  162. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  163. u32 err_detect;
  164. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  165. pr_err("PCIe error(s) detected\n");
  166. pr_err("PCIe ERR_DR register: 0x%08x\n", err_detect);
  167. pr_err("PCIe ERR_CAP_STAT register: 0x%08x\n",
  168. in_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR));
  169. pr_err("PCIe ERR_CAP_R0 register: 0x%08x\n",
  170. in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R0));
  171. pr_err("PCIe ERR_CAP_R1 register: 0x%08x\n",
  172. in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R1));
  173. pr_err("PCIe ERR_CAP_R2 register: 0x%08x\n",
  174. in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R2));
  175. pr_err("PCIe ERR_CAP_R3 register: 0x%08x\n",
  176. in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R3));
  177. /* clear error bits */
  178. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  179. }
  180. static int mpc85xx_pcie_find_capability(struct device_node *np)
  181. {
  182. struct pci_controller *hose;
  183. if (!np)
  184. return -EINVAL;
  185. hose = pci_find_hose_for_OF_device(np);
  186. return early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
  187. }
  188. static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
  189. {
  190. struct edac_pci_ctl_info *pci = dev_id;
  191. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  192. u32 err_detect;
  193. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  194. if (!err_detect)
  195. return IRQ_NONE;
  196. if (pdata->is_pcie)
  197. mpc85xx_pcie_check(pci);
  198. else
  199. mpc85xx_pci_check(pci);
  200. return IRQ_HANDLED;
  201. }
  202. int mpc85xx_pci_err_probe(struct platform_device *op)
  203. {
  204. struct edac_pci_ctl_info *pci;
  205. struct mpc85xx_pci_pdata *pdata;
  206. struct resource r;
  207. int res = 0;
  208. if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
  209. return -ENOMEM;
  210. pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
  211. if (!pci)
  212. return -ENOMEM;
  213. /* make sure error reporting method is sane */
  214. switch (edac_op_state) {
  215. case EDAC_OPSTATE_POLL:
  216. case EDAC_OPSTATE_INT:
  217. break;
  218. default:
  219. edac_op_state = EDAC_OPSTATE_INT;
  220. break;
  221. }
  222. pdata = pci->pvt_info;
  223. pdata->name = "mpc85xx_pci_err";
  224. pdata->irq = NO_IRQ;
  225. if (mpc85xx_pcie_find_capability(op->dev.of_node) > 0)
  226. pdata->is_pcie = true;
  227. dev_set_drvdata(&op->dev, pci);
  228. pci->dev = &op->dev;
  229. pci->mod_name = EDAC_MOD_STR;
  230. pci->ctl_name = pdata->name;
  231. pci->dev_name = dev_name(&op->dev);
  232. if (edac_op_state == EDAC_OPSTATE_POLL) {
  233. if (pdata->is_pcie)
  234. pci->edac_check = mpc85xx_pcie_check;
  235. else
  236. pci->edac_check = mpc85xx_pci_check;
  237. }
  238. pdata->edac_idx = edac_pci_idx++;
  239. res = of_address_to_resource(op->dev.of_node, 0, &r);
  240. if (res) {
  241. printk(KERN_ERR "%s: Unable to get resource for "
  242. "PCI err regs\n", __func__);
  243. goto err;
  244. }
  245. /* we only need the error registers */
  246. r.start += 0xe00;
  247. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  248. pdata->name)) {
  249. printk(KERN_ERR "%s: Error while requesting mem region\n",
  250. __func__);
  251. res = -EBUSY;
  252. goto err;
  253. }
  254. pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  255. if (!pdata->pci_vbase) {
  256. printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
  257. res = -ENOMEM;
  258. goto err;
  259. }
  260. if (pdata->is_pcie) {
  261. orig_pci_err_cap_dr =
  262. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR);
  263. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, ~0);
  264. orig_pci_err_en =
  265. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
  266. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, 0);
  267. } else {
  268. orig_pci_err_cap_dr =
  269. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
  270. /* PCI master abort is expected during config cycles */
  271. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
  272. orig_pci_err_en =
  273. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
  274. /* disable master abort reporting */
  275. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
  276. }
  277. /* clear error bits */
  278. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
  279. if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
  280. edac_dbg(3, "failed edac_pci_add_device()\n");
  281. goto err;
  282. }
  283. if (edac_op_state == EDAC_OPSTATE_INT) {
  284. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  285. res = devm_request_irq(&op->dev, pdata->irq,
  286. mpc85xx_pci_isr,
  287. IRQF_SHARED,
  288. "[EDAC] PCI err", pci);
  289. if (res < 0) {
  290. printk(KERN_ERR
  291. "%s: Unable to request irq %d for "
  292. "MPC85xx PCI err\n", __func__, pdata->irq);
  293. irq_dispose_mapping(pdata->irq);
  294. res = -ENODEV;
  295. goto err2;
  296. }
  297. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
  298. pdata->irq);
  299. }
  300. if (pdata->is_pcie) {
  301. /*
  302. * Enable all PCIe error interrupt & error detect except invalid
  303. * PEX_CONFIG_ADDR/PEX_CONFIG_DATA access interrupt generation
  304. * enable bit and invalid PEX_CONFIG_ADDR/PEX_CONFIG_DATA access
  305. * detection enable bit. Because PCIe bus code to initialize and
  306. * configure these PCIe devices on booting will use some invalid
  307. * PEX_CONFIG_ADDR/PEX_CONFIG_DATA, edac driver prints the much
  308. * notice information. So disable this detect to fix ugly print.
  309. */
  310. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0
  311. & ~PEX_ERR_ICCAIE_EN_BIT);
  312. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, 0
  313. | PEX_ERR_ICCAD_DISR_BIT);
  314. }
  315. devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
  316. edac_dbg(3, "success\n");
  317. printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
  318. return 0;
  319. err2:
  320. edac_pci_del_device(&op->dev);
  321. err:
  322. edac_pci_free_ctl_info(pci);
  323. devres_release_group(&op->dev, mpc85xx_pci_err_probe);
  324. return res;
  325. }
  326. EXPORT_SYMBOL(mpc85xx_pci_err_probe);
  327. #endif /* CONFIG_PCI */
  328. /**************************** L2 Err device ***************************/
  329. /************************ L2 SYSFS parts ***********************************/
  330. static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
  331. *edac_dev, char *data)
  332. {
  333. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  334. return sprintf(data, "0x%08x",
  335. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
  336. }
  337. static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
  338. *edac_dev, char *data)
  339. {
  340. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  341. return sprintf(data, "0x%08x",
  342. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
  343. }
  344. static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
  345. *edac_dev, char *data)
  346. {
  347. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  348. return sprintf(data, "0x%08x",
  349. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
  350. }
  351. static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
  352. *edac_dev, const char *data,
  353. size_t count)
  354. {
  355. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  356. if (isdigit(*data)) {
  357. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
  358. simple_strtoul(data, NULL, 0));
  359. return count;
  360. }
  361. return 0;
  362. }
  363. static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
  364. *edac_dev, const char *data,
  365. size_t count)
  366. {
  367. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  368. if (isdigit(*data)) {
  369. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
  370. simple_strtoul(data, NULL, 0));
  371. return count;
  372. }
  373. return 0;
  374. }
  375. static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
  376. *edac_dev, const char *data,
  377. size_t count)
  378. {
  379. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  380. if (isdigit(*data)) {
  381. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
  382. simple_strtoul(data, NULL, 0));
  383. return count;
  384. }
  385. return 0;
  386. }
  387. static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
  388. {
  389. .attr = {
  390. .name = "inject_data_hi",
  391. .mode = (S_IRUGO | S_IWUSR)
  392. },
  393. .show = mpc85xx_l2_inject_data_hi_show,
  394. .store = mpc85xx_l2_inject_data_hi_store},
  395. {
  396. .attr = {
  397. .name = "inject_data_lo",
  398. .mode = (S_IRUGO | S_IWUSR)
  399. },
  400. .show = mpc85xx_l2_inject_data_lo_show,
  401. .store = mpc85xx_l2_inject_data_lo_store},
  402. {
  403. .attr = {
  404. .name = "inject_ctrl",
  405. .mode = (S_IRUGO | S_IWUSR)
  406. },
  407. .show = mpc85xx_l2_inject_ctrl_show,
  408. .store = mpc85xx_l2_inject_ctrl_store},
  409. /* End of list */
  410. {
  411. .attr = {.name = NULL}
  412. }
  413. };
  414. static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
  415. *edac_dev)
  416. {
  417. edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
  418. }
  419. /***************************** L2 ops ***********************************/
  420. static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
  421. {
  422. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  423. u32 err_detect;
  424. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  425. if (!(err_detect & L2_EDE_MASK))
  426. return;
  427. printk(KERN_ERR "ECC Error in CPU L2 cache\n");
  428. printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
  429. printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
  430. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
  431. printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
  432. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
  433. printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
  434. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
  435. printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
  436. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
  437. printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
  438. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
  439. /* clear error detect register */
  440. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
  441. if (err_detect & L2_EDE_CE_MASK)
  442. edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
  443. if (err_detect & L2_EDE_UE_MASK)
  444. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  445. }
  446. static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
  447. {
  448. struct edac_device_ctl_info *edac_dev = dev_id;
  449. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  450. u32 err_detect;
  451. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  452. if (!(err_detect & L2_EDE_MASK))
  453. return IRQ_NONE;
  454. mpc85xx_l2_check(edac_dev);
  455. return IRQ_HANDLED;
  456. }
  457. static int mpc85xx_l2_err_probe(struct platform_device *op)
  458. {
  459. struct edac_device_ctl_info *edac_dev;
  460. struct mpc85xx_l2_pdata *pdata;
  461. struct resource r;
  462. int res;
  463. if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
  464. return -ENOMEM;
  465. edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
  466. "cpu", 1, "L", 1, 2, NULL, 0,
  467. edac_dev_idx);
  468. if (!edac_dev) {
  469. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  470. return -ENOMEM;
  471. }
  472. pdata = edac_dev->pvt_info;
  473. pdata->name = "mpc85xx_l2_err";
  474. pdata->irq = NO_IRQ;
  475. edac_dev->dev = &op->dev;
  476. dev_set_drvdata(edac_dev->dev, edac_dev);
  477. edac_dev->ctl_name = pdata->name;
  478. edac_dev->dev_name = pdata->name;
  479. res = of_address_to_resource(op->dev.of_node, 0, &r);
  480. if (res) {
  481. printk(KERN_ERR "%s: Unable to get resource for "
  482. "L2 err regs\n", __func__);
  483. goto err;
  484. }
  485. /* we only need the error registers */
  486. r.start += 0xe00;
  487. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  488. pdata->name)) {
  489. printk(KERN_ERR "%s: Error while requesting mem region\n",
  490. __func__);
  491. res = -EBUSY;
  492. goto err;
  493. }
  494. pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  495. if (!pdata->l2_vbase) {
  496. printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
  497. res = -ENOMEM;
  498. goto err;
  499. }
  500. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
  501. orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
  502. /* clear the err_dis */
  503. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
  504. edac_dev->mod_name = EDAC_MOD_STR;
  505. if (edac_op_state == EDAC_OPSTATE_POLL)
  506. edac_dev->edac_check = mpc85xx_l2_check;
  507. mpc85xx_set_l2_sysfs_attributes(edac_dev);
  508. pdata->edac_idx = edac_dev_idx++;
  509. if (edac_device_add_device(edac_dev) > 0) {
  510. edac_dbg(3, "failed edac_device_add_device()\n");
  511. goto err;
  512. }
  513. if (edac_op_state == EDAC_OPSTATE_INT) {
  514. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  515. res = devm_request_irq(&op->dev, pdata->irq,
  516. mpc85xx_l2_isr, IRQF_SHARED,
  517. "[EDAC] L2 err", edac_dev);
  518. if (res < 0) {
  519. printk(KERN_ERR
  520. "%s: Unable to request irq %d for "
  521. "MPC85xx L2 err\n", __func__, pdata->irq);
  522. irq_dispose_mapping(pdata->irq);
  523. res = -ENODEV;
  524. goto err2;
  525. }
  526. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
  527. pdata->irq);
  528. edac_dev->op_state = OP_RUNNING_INTERRUPT;
  529. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
  530. }
  531. devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
  532. edac_dbg(3, "success\n");
  533. printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
  534. return 0;
  535. err2:
  536. edac_device_del_device(&op->dev);
  537. err:
  538. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  539. edac_device_free_ctl_info(edac_dev);
  540. return res;
  541. }
  542. static int mpc85xx_l2_err_remove(struct platform_device *op)
  543. {
  544. struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
  545. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  546. edac_dbg(0, "\n");
  547. if (edac_op_state == EDAC_OPSTATE_INT) {
  548. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
  549. irq_dispose_mapping(pdata->irq);
  550. }
  551. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
  552. edac_device_del_device(&op->dev);
  553. edac_device_free_ctl_info(edac_dev);
  554. return 0;
  555. }
  556. static const struct of_device_id mpc85xx_l2_err_of_match[] = {
  557. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  558. { .compatible = "fsl,8540-l2-cache-controller", },
  559. { .compatible = "fsl,8541-l2-cache-controller", },
  560. { .compatible = "fsl,8544-l2-cache-controller", },
  561. { .compatible = "fsl,8548-l2-cache-controller", },
  562. { .compatible = "fsl,8555-l2-cache-controller", },
  563. { .compatible = "fsl,8568-l2-cache-controller", },
  564. { .compatible = "fsl,mpc8536-l2-cache-controller", },
  565. { .compatible = "fsl,mpc8540-l2-cache-controller", },
  566. { .compatible = "fsl,mpc8541-l2-cache-controller", },
  567. { .compatible = "fsl,mpc8544-l2-cache-controller", },
  568. { .compatible = "fsl,mpc8548-l2-cache-controller", },
  569. { .compatible = "fsl,mpc8555-l2-cache-controller", },
  570. { .compatible = "fsl,mpc8560-l2-cache-controller", },
  571. { .compatible = "fsl,mpc8568-l2-cache-controller", },
  572. { .compatible = "fsl,mpc8569-l2-cache-controller", },
  573. { .compatible = "fsl,mpc8572-l2-cache-controller", },
  574. { .compatible = "fsl,p1020-l2-cache-controller", },
  575. { .compatible = "fsl,p1021-l2-cache-controller", },
  576. { .compatible = "fsl,p2020-l2-cache-controller", },
  577. {},
  578. };
  579. MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
  580. static struct platform_driver mpc85xx_l2_err_driver = {
  581. .probe = mpc85xx_l2_err_probe,
  582. .remove = mpc85xx_l2_err_remove,
  583. .driver = {
  584. .name = "mpc85xx_l2_err",
  585. .of_match_table = mpc85xx_l2_err_of_match,
  586. },
  587. };
  588. /**************************** MC Err device ***************************/
  589. /*
  590. * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
  591. * MPC8572 User's Manual. Each line represents a syndrome bit column as a
  592. * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
  593. * below correspond to Freescale's manuals.
  594. */
  595. static unsigned int ecc_table[16] = {
  596. /* MSB LSB */
  597. /* [0:31] [32:63] */
  598. 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
  599. 0x00ff00ff, 0x00fff0ff,
  600. 0x0f0f0f0f, 0x0f0fff00,
  601. 0x11113333, 0x7777000f,
  602. 0x22224444, 0x8888222f,
  603. 0x44448888, 0xffff4441,
  604. 0x8888ffff, 0x11118882,
  605. 0xffff1111, 0x22221114, /* Syndrome bit 0 */
  606. };
  607. /*
  608. * Calculate the correct ECC value for a 64-bit value specified by high:low
  609. */
  610. static u8 calculate_ecc(u32 high, u32 low)
  611. {
  612. u32 mask_low;
  613. u32 mask_high;
  614. int bit_cnt;
  615. u8 ecc = 0;
  616. int i;
  617. int j;
  618. for (i = 0; i < 8; i++) {
  619. mask_high = ecc_table[i * 2];
  620. mask_low = ecc_table[i * 2 + 1];
  621. bit_cnt = 0;
  622. for (j = 0; j < 32; j++) {
  623. if ((mask_high >> j) & 1)
  624. bit_cnt ^= (high >> j) & 1;
  625. if ((mask_low >> j) & 1)
  626. bit_cnt ^= (low >> j) & 1;
  627. }
  628. ecc |= bit_cnt << i;
  629. }
  630. return ecc;
  631. }
  632. /*
  633. * Create the syndrome code which is generated if the data line specified by
  634. * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
  635. * User's Manual and 9-61 in the MPC8572 User's Manual.
  636. */
  637. static u8 syndrome_from_bit(unsigned int bit) {
  638. int i;
  639. u8 syndrome = 0;
  640. /*
  641. * Cycle through the upper or lower 32-bit portion of each value in
  642. * ecc_table depending on if 'bit' is in the upper or lower half of
  643. * 64-bit data.
  644. */
  645. for (i = bit < 32; i < 16; i += 2)
  646. syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
  647. return syndrome;
  648. }
  649. /*
  650. * Decode data and ecc syndrome to determine what went wrong
  651. * Note: This can only decode single-bit errors
  652. */
  653. static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
  654. int *bad_data_bit, int *bad_ecc_bit)
  655. {
  656. int i;
  657. u8 syndrome;
  658. *bad_data_bit = -1;
  659. *bad_ecc_bit = -1;
  660. /*
  661. * Calculate the ECC of the captured data and XOR it with the captured
  662. * ECC to find an ECC syndrome value we can search for
  663. */
  664. syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
  665. /* Check if a data line is stuck... */
  666. for (i = 0; i < 64; i++) {
  667. if (syndrome == syndrome_from_bit(i)) {
  668. *bad_data_bit = i;
  669. return;
  670. }
  671. }
  672. /* If data is correct, check ECC bits for errors... */
  673. for (i = 0; i < 8; i++) {
  674. if ((syndrome >> i) & 0x1) {
  675. *bad_ecc_bit = i;
  676. return;
  677. }
  678. }
  679. }
  680. static void mpc85xx_mc_check(struct mem_ctl_info *mci)
  681. {
  682. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  683. struct csrow_info *csrow;
  684. u32 bus_width;
  685. u32 err_detect;
  686. u32 syndrome;
  687. u32 err_addr;
  688. u32 pfn;
  689. int row_index;
  690. u32 cap_high;
  691. u32 cap_low;
  692. int bad_data_bit;
  693. int bad_ecc_bit;
  694. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  695. if (!err_detect)
  696. return;
  697. mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
  698. err_detect);
  699. /* no more processing if not ECC bit errors */
  700. if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
  701. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  702. return;
  703. }
  704. syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
  705. /* Mask off appropriate bits of syndrome based on bus width */
  706. bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) &
  707. DSC_DBW_MASK) ? 32 : 64;
  708. if (bus_width == 64)
  709. syndrome &= 0xff;
  710. else
  711. syndrome &= 0xffff;
  712. err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
  713. pfn = err_addr >> PAGE_SHIFT;
  714. for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
  715. csrow = mci->csrows[row_index];
  716. if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
  717. break;
  718. }
  719. cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI);
  720. cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO);
  721. /*
  722. * Analyze single-bit errors on 64-bit wide buses
  723. * TODO: Add support for 32-bit wide buses
  724. */
  725. if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
  726. sbe_ecc_decode(cap_high, cap_low, syndrome,
  727. &bad_data_bit, &bad_ecc_bit);
  728. if (bad_data_bit != -1)
  729. mpc85xx_mc_printk(mci, KERN_ERR,
  730. "Faulty Data bit: %d\n", bad_data_bit);
  731. if (bad_ecc_bit != -1)
  732. mpc85xx_mc_printk(mci, KERN_ERR,
  733. "Faulty ECC bit: %d\n", bad_ecc_bit);
  734. mpc85xx_mc_printk(mci, KERN_ERR,
  735. "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  736. cap_high ^ (1 << (bad_data_bit - 32)),
  737. cap_low ^ (1 << bad_data_bit),
  738. syndrome ^ (1 << bad_ecc_bit));
  739. }
  740. mpc85xx_mc_printk(mci, KERN_ERR,
  741. "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  742. cap_high, cap_low, syndrome);
  743. mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr);
  744. mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
  745. /* we are out of range */
  746. if (row_index == mci->nr_csrows)
  747. mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
  748. if (err_detect & DDR_EDE_SBE)
  749. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  750. pfn, err_addr & ~PAGE_MASK, syndrome,
  751. row_index, 0, -1,
  752. mci->ctl_name, "");
  753. if (err_detect & DDR_EDE_MBE)
  754. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  755. pfn, err_addr & ~PAGE_MASK, syndrome,
  756. row_index, 0, -1,
  757. mci->ctl_name, "");
  758. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  759. }
  760. static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
  761. {
  762. struct mem_ctl_info *mci = dev_id;
  763. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  764. u32 err_detect;
  765. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  766. if (!err_detect)
  767. return IRQ_NONE;
  768. mpc85xx_mc_check(mci);
  769. return IRQ_HANDLED;
  770. }
  771. static void mpc85xx_init_csrows(struct mem_ctl_info *mci)
  772. {
  773. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  774. struct csrow_info *csrow;
  775. struct dimm_info *dimm;
  776. u32 sdram_ctl;
  777. u32 sdtype;
  778. enum mem_type mtype;
  779. u32 cs_bnds;
  780. int index;
  781. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  782. sdtype = sdram_ctl & DSC_SDTYPE_MASK;
  783. if (sdram_ctl & DSC_RD_EN) {
  784. switch (sdtype) {
  785. case DSC_SDTYPE_DDR:
  786. mtype = MEM_RDDR;
  787. break;
  788. case DSC_SDTYPE_DDR2:
  789. mtype = MEM_RDDR2;
  790. break;
  791. case DSC_SDTYPE_DDR3:
  792. mtype = MEM_RDDR3;
  793. break;
  794. default:
  795. mtype = MEM_UNKNOWN;
  796. break;
  797. }
  798. } else {
  799. switch (sdtype) {
  800. case DSC_SDTYPE_DDR:
  801. mtype = MEM_DDR;
  802. break;
  803. case DSC_SDTYPE_DDR2:
  804. mtype = MEM_DDR2;
  805. break;
  806. case DSC_SDTYPE_DDR3:
  807. mtype = MEM_DDR3;
  808. break;
  809. default:
  810. mtype = MEM_UNKNOWN;
  811. break;
  812. }
  813. }
  814. for (index = 0; index < mci->nr_csrows; index++) {
  815. u32 start;
  816. u32 end;
  817. csrow = mci->csrows[index];
  818. dimm = csrow->channels[0]->dimm;
  819. cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
  820. (index * MPC85XX_MC_CS_BNDS_OFS));
  821. start = (cs_bnds & 0xffff0000) >> 16;
  822. end = (cs_bnds & 0x0000ffff);
  823. if (start == end)
  824. continue; /* not populated */
  825. start <<= (24 - PAGE_SHIFT);
  826. end <<= (24 - PAGE_SHIFT);
  827. end |= (1 << (24 - PAGE_SHIFT)) - 1;
  828. csrow->first_page = start;
  829. csrow->last_page = end;
  830. dimm->nr_pages = end + 1 - start;
  831. dimm->grain = 8;
  832. dimm->mtype = mtype;
  833. dimm->dtype = DEV_UNKNOWN;
  834. if (sdram_ctl & DSC_X32_EN)
  835. dimm->dtype = DEV_X32;
  836. dimm->edac_mode = EDAC_SECDED;
  837. }
  838. }
  839. static int mpc85xx_mc_err_probe(struct platform_device *op)
  840. {
  841. struct mem_ctl_info *mci;
  842. struct edac_mc_layer layers[2];
  843. struct mpc85xx_mc_pdata *pdata;
  844. struct resource r;
  845. u32 sdram_ctl;
  846. int res;
  847. if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
  848. return -ENOMEM;
  849. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  850. layers[0].size = 4;
  851. layers[0].is_virt_csrow = true;
  852. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  853. layers[1].size = 1;
  854. layers[1].is_virt_csrow = false;
  855. mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
  856. sizeof(*pdata));
  857. if (!mci) {
  858. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  859. return -ENOMEM;
  860. }
  861. pdata = mci->pvt_info;
  862. pdata->name = "mpc85xx_mc_err";
  863. pdata->irq = NO_IRQ;
  864. mci->pdev = &op->dev;
  865. pdata->edac_idx = edac_mc_idx++;
  866. dev_set_drvdata(mci->pdev, mci);
  867. mci->ctl_name = pdata->name;
  868. mci->dev_name = pdata->name;
  869. res = of_address_to_resource(op->dev.of_node, 0, &r);
  870. if (res) {
  871. printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
  872. __func__);
  873. goto err;
  874. }
  875. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  876. pdata->name)) {
  877. printk(KERN_ERR "%s: Error while requesting mem region\n",
  878. __func__);
  879. res = -EBUSY;
  880. goto err;
  881. }
  882. pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  883. if (!pdata->mc_vbase) {
  884. printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
  885. res = -ENOMEM;
  886. goto err;
  887. }
  888. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  889. if (!(sdram_ctl & DSC_ECC_EN)) {
  890. /* no ECC */
  891. printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
  892. res = -ENODEV;
  893. goto err;
  894. }
  895. edac_dbg(3, "init mci\n");
  896. mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
  897. MEM_FLAG_DDR | MEM_FLAG_DDR2;
  898. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  899. mci->edac_cap = EDAC_FLAG_SECDED;
  900. mci->mod_name = EDAC_MOD_STR;
  901. mci->mod_ver = MPC85XX_REVISION;
  902. if (edac_op_state == EDAC_OPSTATE_POLL)
  903. mci->edac_check = mpc85xx_mc_check;
  904. mci->ctl_page_to_phys = NULL;
  905. mci->scrub_mode = SCRUB_SW_SRC;
  906. mpc85xx_init_csrows(mci);
  907. /* store the original error disable bits */
  908. orig_ddr_err_disable =
  909. in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
  910. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
  911. /* clear all error bits */
  912. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
  913. if (edac_mc_add_mc_with_groups(mci, mpc85xx_dev_groups)) {
  914. edac_dbg(3, "failed edac_mc_add_mc()\n");
  915. goto err;
  916. }
  917. if (edac_op_state == EDAC_OPSTATE_INT) {
  918. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
  919. DDR_EIE_MBEE | DDR_EIE_SBEE);
  920. /* store the original error management threshold */
  921. orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
  922. MPC85XX_MC_ERR_SBE) & 0xff0000;
  923. /* set threshold to 1 error per interrupt */
  924. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
  925. /* register interrupts */
  926. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  927. res = devm_request_irq(&op->dev, pdata->irq,
  928. mpc85xx_mc_isr,
  929. IRQF_SHARED,
  930. "[EDAC] MC err", mci);
  931. if (res < 0) {
  932. printk(KERN_ERR "%s: Unable to request irq %d for "
  933. "MPC85xx DRAM ERR\n", __func__, pdata->irq);
  934. irq_dispose_mapping(pdata->irq);
  935. res = -ENODEV;
  936. goto err2;
  937. }
  938. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
  939. pdata->irq);
  940. }
  941. devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
  942. edac_dbg(3, "success\n");
  943. printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
  944. return 0;
  945. err2:
  946. edac_mc_del_mc(&op->dev);
  947. err:
  948. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  949. edac_mc_free(mci);
  950. return res;
  951. }
  952. static int mpc85xx_mc_err_remove(struct platform_device *op)
  953. {
  954. struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
  955. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  956. edac_dbg(0, "\n");
  957. if (edac_op_state == EDAC_OPSTATE_INT) {
  958. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
  959. irq_dispose_mapping(pdata->irq);
  960. }
  961. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
  962. orig_ddr_err_disable);
  963. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
  964. edac_mc_del_mc(&op->dev);
  965. edac_mc_free(mci);
  966. return 0;
  967. }
  968. static const struct of_device_id mpc85xx_mc_err_of_match[] = {
  969. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  970. { .compatible = "fsl,8540-memory-controller", },
  971. { .compatible = "fsl,8541-memory-controller", },
  972. { .compatible = "fsl,8544-memory-controller", },
  973. { .compatible = "fsl,8548-memory-controller", },
  974. { .compatible = "fsl,8555-memory-controller", },
  975. { .compatible = "fsl,8568-memory-controller", },
  976. { .compatible = "fsl,mpc8536-memory-controller", },
  977. { .compatible = "fsl,mpc8540-memory-controller", },
  978. { .compatible = "fsl,mpc8541-memory-controller", },
  979. { .compatible = "fsl,mpc8544-memory-controller", },
  980. { .compatible = "fsl,mpc8548-memory-controller", },
  981. { .compatible = "fsl,mpc8555-memory-controller", },
  982. { .compatible = "fsl,mpc8560-memory-controller", },
  983. { .compatible = "fsl,mpc8568-memory-controller", },
  984. { .compatible = "fsl,mpc8569-memory-controller", },
  985. { .compatible = "fsl,mpc8572-memory-controller", },
  986. { .compatible = "fsl,mpc8349-memory-controller", },
  987. { .compatible = "fsl,p1020-memory-controller", },
  988. { .compatible = "fsl,p1021-memory-controller", },
  989. { .compatible = "fsl,p2020-memory-controller", },
  990. { .compatible = "fsl,qoriq-memory-controller", },
  991. {},
  992. };
  993. MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
  994. static struct platform_driver mpc85xx_mc_err_driver = {
  995. .probe = mpc85xx_mc_err_probe,
  996. .remove = mpc85xx_mc_err_remove,
  997. .driver = {
  998. .name = "mpc85xx_mc_err",
  999. .of_match_table = mpc85xx_mc_err_of_match,
  1000. },
  1001. };
  1002. #ifdef CONFIG_FSL_SOC_BOOKE
  1003. static void __init mpc85xx_mc_clear_rfxe(void *data)
  1004. {
  1005. orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
  1006. mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
  1007. }
  1008. #endif
  1009. static int __init mpc85xx_mc_init(void)
  1010. {
  1011. int res = 0;
  1012. u32 pvr = 0;
  1013. printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
  1014. "(C) 2006 Montavista Software\n");
  1015. /* make sure error reporting method is sane */
  1016. switch (edac_op_state) {
  1017. case EDAC_OPSTATE_POLL:
  1018. case EDAC_OPSTATE_INT:
  1019. break;
  1020. default:
  1021. edac_op_state = EDAC_OPSTATE_INT;
  1022. break;
  1023. }
  1024. res = platform_driver_register(&mpc85xx_mc_err_driver);
  1025. if (res)
  1026. printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
  1027. res = platform_driver_register(&mpc85xx_l2_err_driver);
  1028. if (res)
  1029. printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
  1030. #ifdef CONFIG_FSL_SOC_BOOKE
  1031. pvr = mfspr(SPRN_PVR);
  1032. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1033. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1034. /*
  1035. * need to clear HID1[RFXE] to disable machine check int
  1036. * so we can catch it
  1037. */
  1038. if (edac_op_state == EDAC_OPSTATE_INT)
  1039. on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
  1040. }
  1041. #endif
  1042. return 0;
  1043. }
  1044. module_init(mpc85xx_mc_init);
  1045. #ifdef CONFIG_FSL_SOC_BOOKE
  1046. static void __exit mpc85xx_mc_restore_hid1(void *data)
  1047. {
  1048. mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
  1049. }
  1050. #endif
  1051. static void __exit mpc85xx_mc_exit(void)
  1052. {
  1053. #ifdef CONFIG_FSL_SOC_BOOKE
  1054. u32 pvr = mfspr(SPRN_PVR);
  1055. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1056. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1057. on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
  1058. }
  1059. #endif
  1060. platform_driver_unregister(&mpc85xx_l2_err_driver);
  1061. platform_driver_unregister(&mpc85xx_mc_err_driver);
  1062. }
  1063. module_exit(mpc85xx_mc_exit);
  1064. MODULE_LICENSE("GPL");
  1065. MODULE_AUTHOR("Montavista Software, Inc.");
  1066. module_param(edac_op_state, int, 0444);
  1067. MODULE_PARM_DESC(edac_op_state,
  1068. "EDAC Error Reporting state: 0=Poll, 2=Interrupt");