mpc85xx_edac.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301
  1. /*
  2. * Freescale MPC85xx Memory Controller kenel module
  3. *
  4. * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
  5. *
  6. * Author: Dave Jiang <djiang@mvista.com>
  7. *
  8. * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
  9. * the terms of the GNU General Public License version 2. This program
  10. * is licensed "as is" without any warranty of any kind, whether express
  11. * or implied.
  12. *
  13. */
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/ctype.h>
  18. #include <linux/io.h>
  19. #include <linux/mod_devicetable.h>
  20. #include <linux/edac.h>
  21. #include <linux/smp.h>
  22. #include <linux/gfp.h>
  23. #include <linux/of_platform.h>
  24. #include <linux/of_device.h>
  25. #include "edac_module.h"
  26. #include "edac_core.h"
  27. #include "mpc85xx_edac.h"
  28. static int edac_dev_idx;
  29. #ifdef CONFIG_PCI
  30. static int edac_pci_idx;
  31. #endif
  32. static int edac_mc_idx;
  33. static u32 orig_ddr_err_disable;
  34. static u32 orig_ddr_err_sbe;
  35. /*
  36. * PCI Err defines
  37. */
  38. #ifdef CONFIG_PCI
  39. static u32 orig_pci_err_cap_dr;
  40. static u32 orig_pci_err_en;
  41. #endif
  42. static u32 orig_l2_err_disable;
  43. #ifdef CONFIG_FSL_SOC_BOOKE
  44. static u32 orig_hid1[2];
  45. #endif
  46. /************************ MC SYSFS parts ***********************************/
  47. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  48. static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev,
  49. struct device_attribute *mattr,
  50. char *data)
  51. {
  52. struct mem_ctl_info *mci = to_mci(dev);
  53. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  54. return sprintf(data, "0x%08x",
  55. in_be32(pdata->mc_vbase +
  56. MPC85XX_MC_DATA_ERR_INJECT_HI));
  57. }
  58. static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev,
  59. struct device_attribute *mattr,
  60. char *data)
  61. {
  62. struct mem_ctl_info *mci = to_mci(dev);
  63. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  64. return sprintf(data, "0x%08x",
  65. in_be32(pdata->mc_vbase +
  66. MPC85XX_MC_DATA_ERR_INJECT_LO));
  67. }
  68. static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev,
  69. struct device_attribute *mattr,
  70. char *data)
  71. {
  72. struct mem_ctl_info *mci = to_mci(dev);
  73. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  74. return sprintf(data, "0x%08x",
  75. in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
  76. }
  77. static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev,
  78. struct device_attribute *mattr,
  79. const char *data, size_t count)
  80. {
  81. struct mem_ctl_info *mci = to_mci(dev);
  82. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  83. if (isdigit(*data)) {
  84. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
  85. simple_strtoul(data, NULL, 0));
  86. return count;
  87. }
  88. return 0;
  89. }
  90. static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev,
  91. struct device_attribute *mattr,
  92. const char *data, size_t count)
  93. {
  94. struct mem_ctl_info *mci = to_mci(dev);
  95. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  96. if (isdigit(*data)) {
  97. out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
  98. simple_strtoul(data, NULL, 0));
  99. return count;
  100. }
  101. return 0;
  102. }
  103. static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev,
  104. struct device_attribute *mattr,
  105. const char *data, size_t count)
  106. {
  107. struct mem_ctl_info *mci = to_mci(dev);
  108. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  109. if (isdigit(*data)) {
  110. out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
  111. simple_strtoul(data, NULL, 0));
  112. return count;
  113. }
  114. return 0;
  115. }
  116. DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
  117. mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store);
  118. DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
  119. mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store);
  120. DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
  121. mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store);
  122. static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci)
  123. {
  124. int rc;
  125. rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi);
  126. if (rc < 0)
  127. return rc;
  128. rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo);
  129. if (rc < 0)
  130. return rc;
  131. rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl);
  132. if (rc < 0)
  133. return rc;
  134. return 0;
  135. }
  136. static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci)
  137. {
  138. device_remove_file(&mci->dev, &dev_attr_inject_data_hi);
  139. device_remove_file(&mci->dev, &dev_attr_inject_data_lo);
  140. device_remove_file(&mci->dev, &dev_attr_inject_ctrl);
  141. }
  142. /**************************** PCI Err device ***************************/
  143. #ifdef CONFIG_PCI
  144. static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
  145. {
  146. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  147. u32 err_detect;
  148. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  149. /* master aborts can happen during PCI config cycles */
  150. if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
  151. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  152. return;
  153. }
  154. printk(KERN_ERR "PCI error(s) detected\n");
  155. printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
  156. printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
  157. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
  158. printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
  159. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
  160. printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
  161. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
  162. printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
  163. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
  164. printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
  165. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
  166. /* clear error bits */
  167. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  168. if (err_detect & PCI_EDE_PERR_MASK)
  169. edac_pci_handle_pe(pci, pci->ctl_name);
  170. if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
  171. edac_pci_handle_npe(pci, pci->ctl_name);
  172. }
  173. static void mpc85xx_pcie_check(struct edac_pci_ctl_info *pci)
  174. {
  175. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  176. u32 err_detect;
  177. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  178. pr_err("PCIe error(s) detected\n");
  179. pr_err("PCIe ERR_DR register: 0x%08x\n", err_detect);
  180. pr_err("PCIe ERR_CAP_STAT register: 0x%08x\n",
  181. in_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR));
  182. pr_err("PCIe ERR_CAP_R0 register: 0x%08x\n",
  183. in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R0));
  184. pr_err("PCIe ERR_CAP_R1 register: 0x%08x\n",
  185. in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R1));
  186. pr_err("PCIe ERR_CAP_R2 register: 0x%08x\n",
  187. in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R2));
  188. pr_err("PCIe ERR_CAP_R3 register: 0x%08x\n",
  189. in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R3));
  190. /* clear error bits */
  191. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
  192. }
  193. static int mpc85xx_pcie_find_capability(struct device_node *np)
  194. {
  195. struct pci_controller *hose;
  196. if (!np)
  197. return -EINVAL;
  198. hose = pci_find_hose_for_OF_device(np);
  199. return early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
  200. }
  201. static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
  202. {
  203. struct edac_pci_ctl_info *pci = dev_id;
  204. struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
  205. u32 err_detect;
  206. err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
  207. if (!err_detect)
  208. return IRQ_NONE;
  209. if (pdata->is_pcie)
  210. mpc85xx_pcie_check(pci);
  211. else
  212. mpc85xx_pci_check(pci);
  213. return IRQ_HANDLED;
  214. }
  215. int mpc85xx_pci_err_probe(struct platform_device *op)
  216. {
  217. struct edac_pci_ctl_info *pci;
  218. struct mpc85xx_pci_pdata *pdata;
  219. struct resource r;
  220. int res = 0;
  221. if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
  222. return -ENOMEM;
  223. pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
  224. if (!pci)
  225. return -ENOMEM;
  226. /* make sure error reporting method is sane */
  227. switch (edac_op_state) {
  228. case EDAC_OPSTATE_POLL:
  229. case EDAC_OPSTATE_INT:
  230. break;
  231. default:
  232. edac_op_state = EDAC_OPSTATE_INT;
  233. break;
  234. }
  235. pdata = pci->pvt_info;
  236. pdata->name = "mpc85xx_pci_err";
  237. pdata->irq = NO_IRQ;
  238. if (mpc85xx_pcie_find_capability(op->dev.of_node) > 0)
  239. pdata->is_pcie = true;
  240. dev_set_drvdata(&op->dev, pci);
  241. pci->dev = &op->dev;
  242. pci->mod_name = EDAC_MOD_STR;
  243. pci->ctl_name = pdata->name;
  244. pci->dev_name = dev_name(&op->dev);
  245. if (edac_op_state == EDAC_OPSTATE_POLL) {
  246. if (pdata->is_pcie)
  247. pci->edac_check = mpc85xx_pcie_check;
  248. else
  249. pci->edac_check = mpc85xx_pci_check;
  250. }
  251. pdata->edac_idx = edac_pci_idx++;
  252. res = of_address_to_resource(op->dev.of_node, 0, &r);
  253. if (res) {
  254. printk(KERN_ERR "%s: Unable to get resource for "
  255. "PCI err regs\n", __func__);
  256. goto err;
  257. }
  258. /* we only need the error registers */
  259. r.start += 0xe00;
  260. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  261. pdata->name)) {
  262. printk(KERN_ERR "%s: Error while requesting mem region\n",
  263. __func__);
  264. res = -EBUSY;
  265. goto err;
  266. }
  267. pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  268. if (!pdata->pci_vbase) {
  269. printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
  270. res = -ENOMEM;
  271. goto err;
  272. }
  273. if (pdata->is_pcie) {
  274. orig_pci_err_cap_dr =
  275. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR);
  276. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, ~0);
  277. orig_pci_err_en =
  278. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
  279. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, 0);
  280. } else {
  281. orig_pci_err_cap_dr =
  282. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
  283. /* PCI master abort is expected during config cycles */
  284. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
  285. orig_pci_err_en =
  286. in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
  287. /* disable master abort reporting */
  288. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
  289. }
  290. /* clear error bits */
  291. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
  292. if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
  293. edac_dbg(3, "failed edac_pci_add_device()\n");
  294. goto err;
  295. }
  296. if (edac_op_state == EDAC_OPSTATE_INT) {
  297. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  298. res = devm_request_irq(&op->dev, pdata->irq,
  299. mpc85xx_pci_isr,
  300. IRQF_SHARED,
  301. "[EDAC] PCI err", pci);
  302. if (res < 0) {
  303. printk(KERN_ERR
  304. "%s: Unable to request irq %d for "
  305. "MPC85xx PCI err\n", __func__, pdata->irq);
  306. irq_dispose_mapping(pdata->irq);
  307. res = -ENODEV;
  308. goto err2;
  309. }
  310. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
  311. pdata->irq);
  312. }
  313. if (pdata->is_pcie) {
  314. /*
  315. * Enable all PCIe error interrupt & error detect except invalid
  316. * PEX_CONFIG_ADDR/PEX_CONFIG_DATA access interrupt generation
  317. * enable bit and invalid PEX_CONFIG_ADDR/PEX_CONFIG_DATA access
  318. * detection enable bit. Because PCIe bus code to initialize and
  319. * configure these PCIe devices on booting will use some invalid
  320. * PEX_CONFIG_ADDR/PEX_CONFIG_DATA, edac driver prints the much
  321. * notice information. So disable this detect to fix ugly print.
  322. */
  323. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0
  324. & ~PEX_ERR_ICCAIE_EN_BIT);
  325. out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, 0
  326. | PEX_ERR_ICCAD_DISR_BIT);
  327. }
  328. devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
  329. edac_dbg(3, "success\n");
  330. printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
  331. return 0;
  332. err2:
  333. edac_pci_del_device(&op->dev);
  334. err:
  335. edac_pci_free_ctl_info(pci);
  336. devres_release_group(&op->dev, mpc85xx_pci_err_probe);
  337. return res;
  338. }
  339. EXPORT_SYMBOL(mpc85xx_pci_err_probe);
  340. #endif /* CONFIG_PCI */
  341. /**************************** L2 Err device ***************************/
  342. /************************ L2 SYSFS parts ***********************************/
  343. static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
  344. *edac_dev, char *data)
  345. {
  346. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  347. return sprintf(data, "0x%08x",
  348. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
  349. }
  350. static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
  351. *edac_dev, char *data)
  352. {
  353. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  354. return sprintf(data, "0x%08x",
  355. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
  356. }
  357. static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
  358. *edac_dev, char *data)
  359. {
  360. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  361. return sprintf(data, "0x%08x",
  362. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
  363. }
  364. static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
  365. *edac_dev, const char *data,
  366. size_t count)
  367. {
  368. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  369. if (isdigit(*data)) {
  370. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
  371. simple_strtoul(data, NULL, 0));
  372. return count;
  373. }
  374. return 0;
  375. }
  376. static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
  377. *edac_dev, const char *data,
  378. size_t count)
  379. {
  380. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  381. if (isdigit(*data)) {
  382. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
  383. simple_strtoul(data, NULL, 0));
  384. return count;
  385. }
  386. return 0;
  387. }
  388. static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
  389. *edac_dev, const char *data,
  390. size_t count)
  391. {
  392. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  393. if (isdigit(*data)) {
  394. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
  395. simple_strtoul(data, NULL, 0));
  396. return count;
  397. }
  398. return 0;
  399. }
  400. static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
  401. {
  402. .attr = {
  403. .name = "inject_data_hi",
  404. .mode = (S_IRUGO | S_IWUSR)
  405. },
  406. .show = mpc85xx_l2_inject_data_hi_show,
  407. .store = mpc85xx_l2_inject_data_hi_store},
  408. {
  409. .attr = {
  410. .name = "inject_data_lo",
  411. .mode = (S_IRUGO | S_IWUSR)
  412. },
  413. .show = mpc85xx_l2_inject_data_lo_show,
  414. .store = mpc85xx_l2_inject_data_lo_store},
  415. {
  416. .attr = {
  417. .name = "inject_ctrl",
  418. .mode = (S_IRUGO | S_IWUSR)
  419. },
  420. .show = mpc85xx_l2_inject_ctrl_show,
  421. .store = mpc85xx_l2_inject_ctrl_store},
  422. /* End of list */
  423. {
  424. .attr = {.name = NULL}
  425. }
  426. };
  427. static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
  428. *edac_dev)
  429. {
  430. edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
  431. }
  432. /***************************** L2 ops ***********************************/
  433. static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
  434. {
  435. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  436. u32 err_detect;
  437. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  438. if (!(err_detect & L2_EDE_MASK))
  439. return;
  440. printk(KERN_ERR "ECC Error in CPU L2 cache\n");
  441. printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
  442. printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
  443. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
  444. printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
  445. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
  446. printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
  447. in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
  448. printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
  449. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
  450. printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
  451. in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
  452. /* clear error detect register */
  453. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
  454. if (err_detect & L2_EDE_CE_MASK)
  455. edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
  456. if (err_detect & L2_EDE_UE_MASK)
  457. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  458. }
  459. static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
  460. {
  461. struct edac_device_ctl_info *edac_dev = dev_id;
  462. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  463. u32 err_detect;
  464. err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
  465. if (!(err_detect & L2_EDE_MASK))
  466. return IRQ_NONE;
  467. mpc85xx_l2_check(edac_dev);
  468. return IRQ_HANDLED;
  469. }
  470. static int mpc85xx_l2_err_probe(struct platform_device *op)
  471. {
  472. struct edac_device_ctl_info *edac_dev;
  473. struct mpc85xx_l2_pdata *pdata;
  474. struct resource r;
  475. int res;
  476. if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
  477. return -ENOMEM;
  478. edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
  479. "cpu", 1, "L", 1, 2, NULL, 0,
  480. edac_dev_idx);
  481. if (!edac_dev) {
  482. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  483. return -ENOMEM;
  484. }
  485. pdata = edac_dev->pvt_info;
  486. pdata->name = "mpc85xx_l2_err";
  487. pdata->irq = NO_IRQ;
  488. edac_dev->dev = &op->dev;
  489. dev_set_drvdata(edac_dev->dev, edac_dev);
  490. edac_dev->ctl_name = pdata->name;
  491. edac_dev->dev_name = pdata->name;
  492. res = of_address_to_resource(op->dev.of_node, 0, &r);
  493. if (res) {
  494. printk(KERN_ERR "%s: Unable to get resource for "
  495. "L2 err regs\n", __func__);
  496. goto err;
  497. }
  498. /* we only need the error registers */
  499. r.start += 0xe00;
  500. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  501. pdata->name)) {
  502. printk(KERN_ERR "%s: Error while requesting mem region\n",
  503. __func__);
  504. res = -EBUSY;
  505. goto err;
  506. }
  507. pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  508. if (!pdata->l2_vbase) {
  509. printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
  510. res = -ENOMEM;
  511. goto err;
  512. }
  513. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
  514. orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
  515. /* clear the err_dis */
  516. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
  517. edac_dev->mod_name = EDAC_MOD_STR;
  518. if (edac_op_state == EDAC_OPSTATE_POLL)
  519. edac_dev->edac_check = mpc85xx_l2_check;
  520. mpc85xx_set_l2_sysfs_attributes(edac_dev);
  521. pdata->edac_idx = edac_dev_idx++;
  522. if (edac_device_add_device(edac_dev) > 0) {
  523. edac_dbg(3, "failed edac_device_add_device()\n");
  524. goto err;
  525. }
  526. if (edac_op_state == EDAC_OPSTATE_INT) {
  527. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  528. res = devm_request_irq(&op->dev, pdata->irq,
  529. mpc85xx_l2_isr, IRQF_SHARED,
  530. "[EDAC] L2 err", edac_dev);
  531. if (res < 0) {
  532. printk(KERN_ERR
  533. "%s: Unable to request irq %d for "
  534. "MPC85xx L2 err\n", __func__, pdata->irq);
  535. irq_dispose_mapping(pdata->irq);
  536. res = -ENODEV;
  537. goto err2;
  538. }
  539. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
  540. pdata->irq);
  541. edac_dev->op_state = OP_RUNNING_INTERRUPT;
  542. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
  543. }
  544. devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
  545. edac_dbg(3, "success\n");
  546. printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
  547. return 0;
  548. err2:
  549. edac_device_del_device(&op->dev);
  550. err:
  551. devres_release_group(&op->dev, mpc85xx_l2_err_probe);
  552. edac_device_free_ctl_info(edac_dev);
  553. return res;
  554. }
  555. static int mpc85xx_l2_err_remove(struct platform_device *op)
  556. {
  557. struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
  558. struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
  559. edac_dbg(0, "\n");
  560. if (edac_op_state == EDAC_OPSTATE_INT) {
  561. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
  562. irq_dispose_mapping(pdata->irq);
  563. }
  564. out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
  565. edac_device_del_device(&op->dev);
  566. edac_device_free_ctl_info(edac_dev);
  567. return 0;
  568. }
  569. static struct of_device_id mpc85xx_l2_err_of_match[] = {
  570. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  571. { .compatible = "fsl,8540-l2-cache-controller", },
  572. { .compatible = "fsl,8541-l2-cache-controller", },
  573. { .compatible = "fsl,8544-l2-cache-controller", },
  574. { .compatible = "fsl,8548-l2-cache-controller", },
  575. { .compatible = "fsl,8555-l2-cache-controller", },
  576. { .compatible = "fsl,8568-l2-cache-controller", },
  577. { .compatible = "fsl,mpc8536-l2-cache-controller", },
  578. { .compatible = "fsl,mpc8540-l2-cache-controller", },
  579. { .compatible = "fsl,mpc8541-l2-cache-controller", },
  580. { .compatible = "fsl,mpc8544-l2-cache-controller", },
  581. { .compatible = "fsl,mpc8548-l2-cache-controller", },
  582. { .compatible = "fsl,mpc8555-l2-cache-controller", },
  583. { .compatible = "fsl,mpc8560-l2-cache-controller", },
  584. { .compatible = "fsl,mpc8568-l2-cache-controller", },
  585. { .compatible = "fsl,mpc8569-l2-cache-controller", },
  586. { .compatible = "fsl,mpc8572-l2-cache-controller", },
  587. { .compatible = "fsl,p1020-l2-cache-controller", },
  588. { .compatible = "fsl,p1021-l2-cache-controller", },
  589. { .compatible = "fsl,p2020-l2-cache-controller", },
  590. {},
  591. };
  592. MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
  593. static struct platform_driver mpc85xx_l2_err_driver = {
  594. .probe = mpc85xx_l2_err_probe,
  595. .remove = mpc85xx_l2_err_remove,
  596. .driver = {
  597. .name = "mpc85xx_l2_err",
  598. .of_match_table = mpc85xx_l2_err_of_match,
  599. },
  600. };
  601. /**************************** MC Err device ***************************/
  602. /*
  603. * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
  604. * MPC8572 User's Manual. Each line represents a syndrome bit column as a
  605. * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
  606. * below correspond to Freescale's manuals.
  607. */
  608. static unsigned int ecc_table[16] = {
  609. /* MSB LSB */
  610. /* [0:31] [32:63] */
  611. 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
  612. 0x00ff00ff, 0x00fff0ff,
  613. 0x0f0f0f0f, 0x0f0fff00,
  614. 0x11113333, 0x7777000f,
  615. 0x22224444, 0x8888222f,
  616. 0x44448888, 0xffff4441,
  617. 0x8888ffff, 0x11118882,
  618. 0xffff1111, 0x22221114, /* Syndrome bit 0 */
  619. };
  620. /*
  621. * Calculate the correct ECC value for a 64-bit value specified by high:low
  622. */
  623. static u8 calculate_ecc(u32 high, u32 low)
  624. {
  625. u32 mask_low;
  626. u32 mask_high;
  627. int bit_cnt;
  628. u8 ecc = 0;
  629. int i;
  630. int j;
  631. for (i = 0; i < 8; i++) {
  632. mask_high = ecc_table[i * 2];
  633. mask_low = ecc_table[i * 2 + 1];
  634. bit_cnt = 0;
  635. for (j = 0; j < 32; j++) {
  636. if ((mask_high >> j) & 1)
  637. bit_cnt ^= (high >> j) & 1;
  638. if ((mask_low >> j) & 1)
  639. bit_cnt ^= (low >> j) & 1;
  640. }
  641. ecc |= bit_cnt << i;
  642. }
  643. return ecc;
  644. }
  645. /*
  646. * Create the syndrome code which is generated if the data line specified by
  647. * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
  648. * User's Manual and 9-61 in the MPC8572 User's Manual.
  649. */
  650. static u8 syndrome_from_bit(unsigned int bit) {
  651. int i;
  652. u8 syndrome = 0;
  653. /*
  654. * Cycle through the upper or lower 32-bit portion of each value in
  655. * ecc_table depending on if 'bit' is in the upper or lower half of
  656. * 64-bit data.
  657. */
  658. for (i = bit < 32; i < 16; i += 2)
  659. syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
  660. return syndrome;
  661. }
  662. /*
  663. * Decode data and ecc syndrome to determine what went wrong
  664. * Note: This can only decode single-bit errors
  665. */
  666. static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
  667. int *bad_data_bit, int *bad_ecc_bit)
  668. {
  669. int i;
  670. u8 syndrome;
  671. *bad_data_bit = -1;
  672. *bad_ecc_bit = -1;
  673. /*
  674. * Calculate the ECC of the captured data and XOR it with the captured
  675. * ECC to find an ECC syndrome value we can search for
  676. */
  677. syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
  678. /* Check if a data line is stuck... */
  679. for (i = 0; i < 64; i++) {
  680. if (syndrome == syndrome_from_bit(i)) {
  681. *bad_data_bit = i;
  682. return;
  683. }
  684. }
  685. /* If data is correct, check ECC bits for errors... */
  686. for (i = 0; i < 8; i++) {
  687. if ((syndrome >> i) & 0x1) {
  688. *bad_ecc_bit = i;
  689. return;
  690. }
  691. }
  692. }
  693. static void mpc85xx_mc_check(struct mem_ctl_info *mci)
  694. {
  695. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  696. struct csrow_info *csrow;
  697. u32 bus_width;
  698. u32 err_detect;
  699. u32 syndrome;
  700. u32 err_addr;
  701. u32 pfn;
  702. int row_index;
  703. u32 cap_high;
  704. u32 cap_low;
  705. int bad_data_bit;
  706. int bad_ecc_bit;
  707. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  708. if (!err_detect)
  709. return;
  710. mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
  711. err_detect);
  712. /* no more processing if not ECC bit errors */
  713. if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
  714. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  715. return;
  716. }
  717. syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
  718. /* Mask off appropriate bits of syndrome based on bus width */
  719. bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) &
  720. DSC_DBW_MASK) ? 32 : 64;
  721. if (bus_width == 64)
  722. syndrome &= 0xff;
  723. else
  724. syndrome &= 0xffff;
  725. err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
  726. pfn = err_addr >> PAGE_SHIFT;
  727. for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
  728. csrow = mci->csrows[row_index];
  729. if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
  730. break;
  731. }
  732. cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI);
  733. cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO);
  734. /*
  735. * Analyze single-bit errors on 64-bit wide buses
  736. * TODO: Add support for 32-bit wide buses
  737. */
  738. if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
  739. sbe_ecc_decode(cap_high, cap_low, syndrome,
  740. &bad_data_bit, &bad_ecc_bit);
  741. if (bad_data_bit != -1)
  742. mpc85xx_mc_printk(mci, KERN_ERR,
  743. "Faulty Data bit: %d\n", bad_data_bit);
  744. if (bad_ecc_bit != -1)
  745. mpc85xx_mc_printk(mci, KERN_ERR,
  746. "Faulty ECC bit: %d\n", bad_ecc_bit);
  747. mpc85xx_mc_printk(mci, KERN_ERR,
  748. "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  749. cap_high ^ (1 << (bad_data_bit - 32)),
  750. cap_low ^ (1 << bad_data_bit),
  751. syndrome ^ (1 << bad_ecc_bit));
  752. }
  753. mpc85xx_mc_printk(mci, KERN_ERR,
  754. "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
  755. cap_high, cap_low, syndrome);
  756. mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr);
  757. mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
  758. /* we are out of range */
  759. if (row_index == mci->nr_csrows)
  760. mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
  761. if (err_detect & DDR_EDE_SBE)
  762. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  763. pfn, err_addr & ~PAGE_MASK, syndrome,
  764. row_index, 0, -1,
  765. mci->ctl_name, "");
  766. if (err_detect & DDR_EDE_MBE)
  767. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  768. pfn, err_addr & ~PAGE_MASK, syndrome,
  769. row_index, 0, -1,
  770. mci->ctl_name, "");
  771. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
  772. }
  773. static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
  774. {
  775. struct mem_ctl_info *mci = dev_id;
  776. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  777. u32 err_detect;
  778. err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
  779. if (!err_detect)
  780. return IRQ_NONE;
  781. mpc85xx_mc_check(mci);
  782. return IRQ_HANDLED;
  783. }
  784. static void mpc85xx_init_csrows(struct mem_ctl_info *mci)
  785. {
  786. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  787. struct csrow_info *csrow;
  788. struct dimm_info *dimm;
  789. u32 sdram_ctl;
  790. u32 sdtype;
  791. enum mem_type mtype;
  792. u32 cs_bnds;
  793. int index;
  794. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  795. sdtype = sdram_ctl & DSC_SDTYPE_MASK;
  796. if (sdram_ctl & DSC_RD_EN) {
  797. switch (sdtype) {
  798. case DSC_SDTYPE_DDR:
  799. mtype = MEM_RDDR;
  800. break;
  801. case DSC_SDTYPE_DDR2:
  802. mtype = MEM_RDDR2;
  803. break;
  804. case DSC_SDTYPE_DDR3:
  805. mtype = MEM_RDDR3;
  806. break;
  807. default:
  808. mtype = MEM_UNKNOWN;
  809. break;
  810. }
  811. } else {
  812. switch (sdtype) {
  813. case DSC_SDTYPE_DDR:
  814. mtype = MEM_DDR;
  815. break;
  816. case DSC_SDTYPE_DDR2:
  817. mtype = MEM_DDR2;
  818. break;
  819. case DSC_SDTYPE_DDR3:
  820. mtype = MEM_DDR3;
  821. break;
  822. default:
  823. mtype = MEM_UNKNOWN;
  824. break;
  825. }
  826. }
  827. for (index = 0; index < mci->nr_csrows; index++) {
  828. u32 start;
  829. u32 end;
  830. csrow = mci->csrows[index];
  831. dimm = csrow->channels[0]->dimm;
  832. cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
  833. (index * MPC85XX_MC_CS_BNDS_OFS));
  834. start = (cs_bnds & 0xffff0000) >> 16;
  835. end = (cs_bnds & 0x0000ffff);
  836. if (start == end)
  837. continue; /* not populated */
  838. start <<= (24 - PAGE_SHIFT);
  839. end <<= (24 - PAGE_SHIFT);
  840. end |= (1 << (24 - PAGE_SHIFT)) - 1;
  841. csrow->first_page = start;
  842. csrow->last_page = end;
  843. dimm->nr_pages = end + 1 - start;
  844. dimm->grain = 8;
  845. dimm->mtype = mtype;
  846. dimm->dtype = DEV_UNKNOWN;
  847. if (sdram_ctl & DSC_X32_EN)
  848. dimm->dtype = DEV_X32;
  849. dimm->edac_mode = EDAC_SECDED;
  850. }
  851. }
  852. static int mpc85xx_mc_err_probe(struct platform_device *op)
  853. {
  854. struct mem_ctl_info *mci;
  855. struct edac_mc_layer layers[2];
  856. struct mpc85xx_mc_pdata *pdata;
  857. struct resource r;
  858. u32 sdram_ctl;
  859. int res;
  860. if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
  861. return -ENOMEM;
  862. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  863. layers[0].size = 4;
  864. layers[0].is_virt_csrow = true;
  865. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  866. layers[1].size = 1;
  867. layers[1].is_virt_csrow = false;
  868. mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
  869. sizeof(*pdata));
  870. if (!mci) {
  871. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  872. return -ENOMEM;
  873. }
  874. pdata = mci->pvt_info;
  875. pdata->name = "mpc85xx_mc_err";
  876. pdata->irq = NO_IRQ;
  877. mci->pdev = &op->dev;
  878. pdata->edac_idx = edac_mc_idx++;
  879. dev_set_drvdata(mci->pdev, mci);
  880. mci->ctl_name = pdata->name;
  881. mci->dev_name = pdata->name;
  882. res = of_address_to_resource(op->dev.of_node, 0, &r);
  883. if (res) {
  884. printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
  885. __func__);
  886. goto err;
  887. }
  888. if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
  889. pdata->name)) {
  890. printk(KERN_ERR "%s: Error while requesting mem region\n",
  891. __func__);
  892. res = -EBUSY;
  893. goto err;
  894. }
  895. pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
  896. if (!pdata->mc_vbase) {
  897. printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
  898. res = -ENOMEM;
  899. goto err;
  900. }
  901. sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
  902. if (!(sdram_ctl & DSC_ECC_EN)) {
  903. /* no ECC */
  904. printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
  905. res = -ENODEV;
  906. goto err;
  907. }
  908. edac_dbg(3, "init mci\n");
  909. mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
  910. MEM_FLAG_DDR | MEM_FLAG_DDR2;
  911. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  912. mci->edac_cap = EDAC_FLAG_SECDED;
  913. mci->mod_name = EDAC_MOD_STR;
  914. mci->mod_ver = MPC85XX_REVISION;
  915. if (edac_op_state == EDAC_OPSTATE_POLL)
  916. mci->edac_check = mpc85xx_mc_check;
  917. mci->ctl_page_to_phys = NULL;
  918. mci->scrub_mode = SCRUB_SW_SRC;
  919. mpc85xx_init_csrows(mci);
  920. /* store the original error disable bits */
  921. orig_ddr_err_disable =
  922. in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
  923. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
  924. /* clear all error bits */
  925. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
  926. if (edac_mc_add_mc(mci)) {
  927. edac_dbg(3, "failed edac_mc_add_mc()\n");
  928. goto err;
  929. }
  930. if (mpc85xx_create_sysfs_attributes(mci)) {
  931. edac_mc_del_mc(mci->pdev);
  932. edac_dbg(3, "failed edac_mc_add_mc()\n");
  933. goto err;
  934. }
  935. if (edac_op_state == EDAC_OPSTATE_INT) {
  936. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
  937. DDR_EIE_MBEE | DDR_EIE_SBEE);
  938. /* store the original error management threshold */
  939. orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
  940. MPC85XX_MC_ERR_SBE) & 0xff0000;
  941. /* set threshold to 1 error per interrupt */
  942. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
  943. /* register interrupts */
  944. pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
  945. res = devm_request_irq(&op->dev, pdata->irq,
  946. mpc85xx_mc_isr,
  947. IRQF_SHARED,
  948. "[EDAC] MC err", mci);
  949. if (res < 0) {
  950. printk(KERN_ERR "%s: Unable to request irq %d for "
  951. "MPC85xx DRAM ERR\n", __func__, pdata->irq);
  952. irq_dispose_mapping(pdata->irq);
  953. res = -ENODEV;
  954. goto err2;
  955. }
  956. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
  957. pdata->irq);
  958. }
  959. devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
  960. edac_dbg(3, "success\n");
  961. printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
  962. return 0;
  963. err2:
  964. edac_mc_del_mc(&op->dev);
  965. err:
  966. devres_release_group(&op->dev, mpc85xx_mc_err_probe);
  967. edac_mc_free(mci);
  968. return res;
  969. }
  970. static int mpc85xx_mc_err_remove(struct platform_device *op)
  971. {
  972. struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
  973. struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
  974. edac_dbg(0, "\n");
  975. if (edac_op_state == EDAC_OPSTATE_INT) {
  976. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
  977. irq_dispose_mapping(pdata->irq);
  978. }
  979. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
  980. orig_ddr_err_disable);
  981. out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
  982. mpc85xx_remove_sysfs_attributes(mci);
  983. edac_mc_del_mc(&op->dev);
  984. edac_mc_free(mci);
  985. return 0;
  986. }
  987. static struct of_device_id mpc85xx_mc_err_of_match[] = {
  988. /* deprecate the fsl,85.. forms in the future, 2.6.30? */
  989. { .compatible = "fsl,8540-memory-controller", },
  990. { .compatible = "fsl,8541-memory-controller", },
  991. { .compatible = "fsl,8544-memory-controller", },
  992. { .compatible = "fsl,8548-memory-controller", },
  993. { .compatible = "fsl,8555-memory-controller", },
  994. { .compatible = "fsl,8568-memory-controller", },
  995. { .compatible = "fsl,mpc8536-memory-controller", },
  996. { .compatible = "fsl,mpc8540-memory-controller", },
  997. { .compatible = "fsl,mpc8541-memory-controller", },
  998. { .compatible = "fsl,mpc8544-memory-controller", },
  999. { .compatible = "fsl,mpc8548-memory-controller", },
  1000. { .compatible = "fsl,mpc8555-memory-controller", },
  1001. { .compatible = "fsl,mpc8560-memory-controller", },
  1002. { .compatible = "fsl,mpc8568-memory-controller", },
  1003. { .compatible = "fsl,mpc8569-memory-controller", },
  1004. { .compatible = "fsl,mpc8572-memory-controller", },
  1005. { .compatible = "fsl,mpc8349-memory-controller", },
  1006. { .compatible = "fsl,p1020-memory-controller", },
  1007. { .compatible = "fsl,p1021-memory-controller", },
  1008. { .compatible = "fsl,p2020-memory-controller", },
  1009. { .compatible = "fsl,qoriq-memory-controller", },
  1010. {},
  1011. };
  1012. MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
  1013. static struct platform_driver mpc85xx_mc_err_driver = {
  1014. .probe = mpc85xx_mc_err_probe,
  1015. .remove = mpc85xx_mc_err_remove,
  1016. .driver = {
  1017. .name = "mpc85xx_mc_err",
  1018. .of_match_table = mpc85xx_mc_err_of_match,
  1019. },
  1020. };
  1021. #ifdef CONFIG_FSL_SOC_BOOKE
  1022. static void __init mpc85xx_mc_clear_rfxe(void *data)
  1023. {
  1024. orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
  1025. mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
  1026. }
  1027. #endif
  1028. static int __init mpc85xx_mc_init(void)
  1029. {
  1030. int res = 0;
  1031. u32 pvr = 0;
  1032. printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
  1033. "(C) 2006 Montavista Software\n");
  1034. /* make sure error reporting method is sane */
  1035. switch (edac_op_state) {
  1036. case EDAC_OPSTATE_POLL:
  1037. case EDAC_OPSTATE_INT:
  1038. break;
  1039. default:
  1040. edac_op_state = EDAC_OPSTATE_INT;
  1041. break;
  1042. }
  1043. res = platform_driver_register(&mpc85xx_mc_err_driver);
  1044. if (res)
  1045. printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
  1046. res = platform_driver_register(&mpc85xx_l2_err_driver);
  1047. if (res)
  1048. printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
  1049. #ifdef CONFIG_FSL_SOC_BOOKE
  1050. pvr = mfspr(SPRN_PVR);
  1051. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1052. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1053. /*
  1054. * need to clear HID1[RFXE] to disable machine check int
  1055. * so we can catch it
  1056. */
  1057. if (edac_op_state == EDAC_OPSTATE_INT)
  1058. on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
  1059. }
  1060. #endif
  1061. return 0;
  1062. }
  1063. module_init(mpc85xx_mc_init);
  1064. #ifdef CONFIG_FSL_SOC_BOOKE
  1065. static void __exit mpc85xx_mc_restore_hid1(void *data)
  1066. {
  1067. mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
  1068. }
  1069. #endif
  1070. static void __exit mpc85xx_mc_exit(void)
  1071. {
  1072. #ifdef CONFIG_FSL_SOC_BOOKE
  1073. u32 pvr = mfspr(SPRN_PVR);
  1074. if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
  1075. (PVR_VER(pvr) == PVR_VER_E500V2)) {
  1076. on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
  1077. }
  1078. #endif
  1079. platform_driver_unregister(&mpc85xx_l2_err_driver);
  1080. platform_driver_unregister(&mpc85xx_mc_err_driver);
  1081. }
  1082. module_exit(mpc85xx_mc_exit);
  1083. MODULE_LICENSE("GPL");
  1084. MODULE_AUTHOR("Montavista Software, Inc.");
  1085. module_param(edac_op_state, int, 0444);
  1086. MODULE_PARM_DESC(edac_op_state,
  1087. "EDAC Error Reporting state: 0=Poll, 2=Interrupt");