thunderx_edac.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174
  1. /*
  2. * Cavium ThunderX memory controller kernel module
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright Cavium, Inc. (C) 2015-2017. All rights reserved.
  9. *
  10. */
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/edac.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/string.h>
  16. #include <linux/stop_machine.h>
  17. #include <linux/delay.h>
  18. #include <linux/sizes.h>
  19. #include <linux/atomic.h>
  20. #include <linux/bitfield.h>
  21. #include <linux/circ_buf.h>
  22. #include <asm/page.h>
  23. #include "edac_module.h"
  24. #define phys_to_pfn(phys) (PFN_DOWN(phys))
  25. #define THUNDERX_NODE GENMASK(45, 44)
  26. enum {
  27. ERR_CORRECTED = 1,
  28. ERR_UNCORRECTED = 2,
  29. ERR_UNKNOWN = 3,
  30. };
  31. #define MAX_SYNDROME_REGS 4
  32. struct error_syndrome {
  33. u64 reg[MAX_SYNDROME_REGS];
  34. };
  35. struct error_descr {
  36. int type;
  37. u64 mask;
  38. char *descr;
  39. };
  40. static void decode_register(char *str, size_t size,
  41. const struct error_descr *descr,
  42. const uint64_t reg)
  43. {
  44. int ret = 0;
  45. while (descr->type && descr->mask && descr->descr) {
  46. if (reg & descr->mask) {
  47. ret = snprintf(str, size, "\n\t%s, %s",
  48. descr->type == ERR_CORRECTED ?
  49. "Corrected" : "Uncorrected",
  50. descr->descr);
  51. str += ret;
  52. size -= ret;
  53. }
  54. descr++;
  55. }
  56. }
  57. static unsigned long get_bits(unsigned long data, int pos, int width)
  58. {
  59. return (data >> pos) & ((1 << width) - 1);
  60. }
  61. #define L2C_CTL 0x87E080800000
  62. #define L2C_CTL_DISIDXALIAS BIT(0)
  63. #define PCI_DEVICE_ID_THUNDER_LMC 0xa022
  64. #define LMC_FADR 0x20
  65. #define LMC_FADR_FDIMM(x) ((x >> 37) & 0x1)
  66. #define LMC_FADR_FBUNK(x) ((x >> 36) & 0x1)
  67. #define LMC_FADR_FBANK(x) ((x >> 32) & 0xf)
  68. #define LMC_FADR_FROW(x) ((x >> 14) & 0xffff)
  69. #define LMC_FADR_FCOL(x) ((x >> 0) & 0x1fff)
  70. #define LMC_NXM_FADR 0x28
  71. #define LMC_ECC_SYND 0x38
  72. #define LMC_ECC_PARITY_TEST 0x108
  73. #define LMC_INT_W1S 0x150
  74. #define LMC_INT_ENA_W1C 0x158
  75. #define LMC_INT_ENA_W1S 0x160
  76. #define LMC_CONFIG 0x188
  77. #define LMC_CONFIG_BG2 BIT(62)
  78. #define LMC_CONFIG_RANK_ENA BIT(42)
  79. #define LMC_CONFIG_PBANK_LSB(x) (((x) >> 5) & 0xF)
  80. #define LMC_CONFIG_ROW_LSB(x) (((x) >> 2) & 0x7)
  81. #define LMC_CONTROL 0x190
  82. #define LMC_CONTROL_XOR_BANK BIT(16)
  83. #define LMC_INT 0x1F0
  84. #define LMC_INT_DDR_ERR BIT(11)
  85. #define LMC_INT_DED_ERR (0xFUL << 5)
  86. #define LMC_INT_SEC_ERR (0xFUL << 1)
  87. #define LMC_INT_NXM_WR_MASK BIT(0)
  88. #define LMC_DDR_PLL_CTL 0x258
  89. #define LMC_DDR_PLL_CTL_DDR4 BIT(29)
  90. #define LMC_FADR_SCRAMBLED 0x330
  91. #define LMC_INT_UE (LMC_INT_DDR_ERR | LMC_INT_DED_ERR | \
  92. LMC_INT_NXM_WR_MASK)
  93. #define LMC_INT_CE (LMC_INT_SEC_ERR)
  94. static const struct error_descr lmc_errors[] = {
  95. {
  96. .type = ERR_CORRECTED,
  97. .mask = LMC_INT_SEC_ERR,
  98. .descr = "Single-bit ECC error",
  99. },
  100. {
  101. .type = ERR_UNCORRECTED,
  102. .mask = LMC_INT_DDR_ERR,
  103. .descr = "DDR chip error",
  104. },
  105. {
  106. .type = ERR_UNCORRECTED,
  107. .mask = LMC_INT_DED_ERR,
  108. .descr = "Double-bit ECC error",
  109. },
  110. {
  111. .type = ERR_UNCORRECTED,
  112. .mask = LMC_INT_NXM_WR_MASK,
  113. .descr = "Non-existent memory write",
  114. },
  115. {0, 0, NULL},
  116. };
  117. #define LMC_INT_EN_DDR_ERROR_ALERT_ENA BIT(5)
  118. #define LMC_INT_EN_DLCRAM_DED_ERR BIT(4)
  119. #define LMC_INT_EN_DLCRAM_SEC_ERR BIT(3)
  120. #define LMC_INT_INTR_DED_ENA BIT(2)
  121. #define LMC_INT_INTR_SEC_ENA BIT(1)
  122. #define LMC_INT_INTR_NXM_WR_ENA BIT(0)
  123. #define LMC_INT_ENA_ALL GENMASK(5, 0)
  124. #define LMC_DDR_PLL_CTL 0x258
  125. #define LMC_DDR_PLL_CTL_DDR4 BIT(29)
  126. #define LMC_CONTROL 0x190
  127. #define LMC_CONTROL_RDIMM BIT(0)
  128. #define LMC_SCRAM_FADR 0x330
  129. #define LMC_CHAR_MASK0 0x228
  130. #define LMC_CHAR_MASK2 0x238
  131. #define RING_ENTRIES 8
  132. struct debugfs_entry {
  133. const char *name;
  134. umode_t mode;
  135. const struct file_operations fops;
  136. };
  137. struct lmc_err_ctx {
  138. u64 reg_int;
  139. u64 reg_fadr;
  140. u64 reg_nxm_fadr;
  141. u64 reg_scram_fadr;
  142. u64 reg_ecc_synd;
  143. };
  144. struct thunderx_lmc {
  145. void __iomem *regs;
  146. struct pci_dev *pdev;
  147. struct msix_entry msix_ent;
  148. atomic_t ecc_int;
  149. u64 mask0;
  150. u64 mask2;
  151. u64 parity_test;
  152. u64 node;
  153. int xbits;
  154. int bank_width;
  155. int pbank_lsb;
  156. int dimm_lsb;
  157. int rank_lsb;
  158. int bank_lsb;
  159. int row_lsb;
  160. int col_hi_lsb;
  161. int xor_bank;
  162. int l2c_alias;
  163. struct page *mem;
  164. struct lmc_err_ctx err_ctx[RING_ENTRIES];
  165. unsigned long ring_head;
  166. unsigned long ring_tail;
  167. };
  168. #define ring_pos(pos, size) ((pos) & (size - 1))
  169. #define DEBUGFS_STRUCT(_name, _mode, _write, _read) \
  170. static struct debugfs_entry debugfs_##_name = { \
  171. .name = __stringify(_name), \
  172. .mode = VERIFY_OCTAL_PERMISSIONS(_mode), \
  173. .fops = { \
  174. .open = simple_open, \
  175. .write = _write, \
  176. .read = _read, \
  177. .llseek = generic_file_llseek, \
  178. }, \
  179. }
  180. #define DEBUGFS_FIELD_ATTR(_type, _field) \
  181. static ssize_t thunderx_##_type##_##_field##_read(struct file *file, \
  182. char __user *data, \
  183. size_t count, loff_t *ppos) \
  184. { \
  185. struct thunderx_##_type *pdata = file->private_data; \
  186. char buf[20]; \
  187. \
  188. snprintf(buf, count, "0x%016llx", pdata->_field); \
  189. return simple_read_from_buffer(data, count, ppos, \
  190. buf, sizeof(buf)); \
  191. } \
  192. \
  193. static ssize_t thunderx_##_type##_##_field##_write(struct file *file, \
  194. const char __user *data, \
  195. size_t count, loff_t *ppos) \
  196. { \
  197. struct thunderx_##_type *pdata = file->private_data; \
  198. int res; \
  199. \
  200. res = kstrtoull_from_user(data, count, 0, &pdata->_field); \
  201. \
  202. return res ? res : count; \
  203. } \
  204. \
  205. DEBUGFS_STRUCT(_field, 0600, \
  206. thunderx_##_type##_##_field##_write, \
  207. thunderx_##_type##_##_field##_read) \
  208. #define DEBUGFS_REG_ATTR(_type, _name, _reg) \
  209. static ssize_t thunderx_##_type##_##_name##_read(struct file *file, \
  210. char __user *data, \
  211. size_t count, loff_t *ppos) \
  212. { \
  213. struct thunderx_##_type *pdata = file->private_data; \
  214. char buf[20]; \
  215. \
  216. sprintf(buf, "0x%016llx", readq(pdata->regs + _reg)); \
  217. return simple_read_from_buffer(data, count, ppos, \
  218. buf, sizeof(buf)); \
  219. } \
  220. \
  221. static ssize_t thunderx_##_type##_##_name##_write(struct file *file, \
  222. const char __user *data, \
  223. size_t count, loff_t *ppos) \
  224. { \
  225. struct thunderx_##_type *pdata = file->private_data; \
  226. u64 val; \
  227. int res; \
  228. \
  229. res = kstrtoull_from_user(data, count, 0, &val); \
  230. \
  231. if (!res) { \
  232. writeq(val, pdata->regs + _reg); \
  233. res = count; \
  234. } \
  235. \
  236. return res; \
  237. } \
  238. \
  239. DEBUGFS_STRUCT(_name, 0600, \
  240. thunderx_##_type##_##_name##_write, \
  241. thunderx_##_type##_##_name##_read)
  242. #define LMC_DEBUGFS_ENT(_field) DEBUGFS_FIELD_ATTR(lmc, _field)
  243. /*
  244. * To get an ECC error injected, the following steps are needed:
  245. * - Setup the ECC injection by writing the appropriate parameters:
  246. * echo <bit mask value> > /sys/kernel/debug/<device number>/ecc_mask0
  247. * echo <bit mask value> > /sys/kernel/debug/<device number>/ecc_mask2
  248. * echo 0x802 > /sys/kernel/debug/<device number>/ecc_parity_test
  249. * - Do the actual injection:
  250. * echo 1 > /sys/kernel/debug/<device number>/inject_ecc
  251. */
  252. static ssize_t thunderx_lmc_inject_int_write(struct file *file,
  253. const char __user *data,
  254. size_t count, loff_t *ppos)
  255. {
  256. struct thunderx_lmc *lmc = file->private_data;
  257. u64 val;
  258. int res;
  259. res = kstrtoull_from_user(data, count, 0, &val);
  260. if (!res) {
  261. /* Trigger the interrupt */
  262. writeq(val, lmc->regs + LMC_INT_W1S);
  263. res = count;
  264. }
  265. return res;
  266. }
  267. static ssize_t thunderx_lmc_int_read(struct file *file,
  268. char __user *data,
  269. size_t count, loff_t *ppos)
  270. {
  271. struct thunderx_lmc *lmc = file->private_data;
  272. char buf[20];
  273. u64 lmc_int = readq(lmc->regs + LMC_INT);
  274. snprintf(buf, sizeof(buf), "0x%016llx", lmc_int);
  275. return simple_read_from_buffer(data, count, ppos, buf, sizeof(buf));
  276. }
  277. #define TEST_PATTERN 0xa5
  278. static int inject_ecc_fn(void *arg)
  279. {
  280. struct thunderx_lmc *lmc = arg;
  281. uintptr_t addr, phys;
  282. unsigned int cline_size = cache_line_size();
  283. const unsigned int lines = PAGE_SIZE / cline_size;
  284. unsigned int i, cl_idx;
  285. addr = (uintptr_t)page_address(lmc->mem);
  286. phys = (uintptr_t)page_to_phys(lmc->mem);
  287. cl_idx = (phys & 0x7f) >> 4;
  288. lmc->parity_test &= ~(7ULL << 8);
  289. lmc->parity_test |= (cl_idx << 8);
  290. writeq(lmc->mask0, lmc->regs + LMC_CHAR_MASK0);
  291. writeq(lmc->mask2, lmc->regs + LMC_CHAR_MASK2);
  292. writeq(lmc->parity_test, lmc->regs + LMC_ECC_PARITY_TEST);
  293. readq(lmc->regs + LMC_CHAR_MASK0);
  294. readq(lmc->regs + LMC_CHAR_MASK2);
  295. readq(lmc->regs + LMC_ECC_PARITY_TEST);
  296. for (i = 0; i < lines; i++) {
  297. memset((void *)addr, TEST_PATTERN, cline_size);
  298. barrier();
  299. /*
  300. * Flush L1 cachelines to the PoC (L2).
  301. * This will cause cacheline eviction to the L2.
  302. */
  303. asm volatile("dc civac, %0\n"
  304. "dsb sy\n"
  305. : : "r"(addr + i * cline_size));
  306. }
  307. for (i = 0; i < lines; i++) {
  308. /*
  309. * Flush L2 cachelines to the DRAM.
  310. * This will cause cacheline eviction to the DRAM
  311. * and ECC corruption according to the masks set.
  312. */
  313. __asm__ volatile("sys #0,c11,C1,#2, %0\n"
  314. : : "r"(phys + i * cline_size));
  315. }
  316. for (i = 0; i < lines; i++) {
  317. /*
  318. * Invalidate L2 cachelines.
  319. * The subsequent load will cause cacheline fetch
  320. * from the DRAM and an error interrupt
  321. */
  322. __asm__ volatile("sys #0,c11,C1,#1, %0"
  323. : : "r"(phys + i * cline_size));
  324. }
  325. for (i = 0; i < lines; i++) {
  326. /*
  327. * Invalidate L1 cachelines.
  328. * The subsequent load will cause cacheline fetch
  329. * from the L2 and/or DRAM
  330. */
  331. asm volatile("dc ivac, %0\n"
  332. "dsb sy\n"
  333. : : "r"(addr + i * cline_size));
  334. }
  335. return 0;
  336. }
  337. static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
  338. const char __user *data,
  339. size_t count, loff_t *ppos)
  340. {
  341. struct thunderx_lmc *lmc = file->private_data;
  342. unsigned int cline_size = cache_line_size();
  343. u8 tmp[cline_size];
  344. void __iomem *addr;
  345. unsigned int offs, timeout = 100000;
  346. atomic_set(&lmc->ecc_int, 0);
  347. lmc->mem = alloc_pages_node(lmc->node, GFP_KERNEL, 0);
  348. if (!lmc->mem)
  349. return -ENOMEM;
  350. addr = page_address(lmc->mem);
  351. while (!atomic_read(&lmc->ecc_int) && timeout--) {
  352. stop_machine(inject_ecc_fn, lmc, NULL);
  353. for (offs = 0; offs < PAGE_SIZE; offs += sizeof(tmp)) {
  354. /*
  355. * Do a load from the previously rigged location
  356. * This should generate an error interrupt.
  357. */
  358. memcpy(tmp, addr + offs, cline_size);
  359. asm volatile("dsb ld\n");
  360. }
  361. }
  362. __free_pages(lmc->mem, 0);
  363. return count;
  364. }
  365. LMC_DEBUGFS_ENT(mask0);
  366. LMC_DEBUGFS_ENT(mask2);
  367. LMC_DEBUGFS_ENT(parity_test);
  368. DEBUGFS_STRUCT(inject_int, 0200, thunderx_lmc_inject_int_write, NULL);
  369. DEBUGFS_STRUCT(inject_ecc, 0200, thunderx_lmc_inject_ecc_write, NULL);
  370. DEBUGFS_STRUCT(int_w1c, 0400, NULL, thunderx_lmc_int_read);
  371. struct debugfs_entry *lmc_dfs_ents[] = {
  372. &debugfs_mask0,
  373. &debugfs_mask2,
  374. &debugfs_parity_test,
  375. &debugfs_inject_ecc,
  376. &debugfs_inject_int,
  377. &debugfs_int_w1c,
  378. };
  379. static int thunderx_create_debugfs_nodes(struct dentry *parent,
  380. struct debugfs_entry *attrs[],
  381. void *data,
  382. size_t num)
  383. {
  384. int i;
  385. struct dentry *ent;
  386. if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
  387. return 0;
  388. if (!parent)
  389. return -ENOENT;
  390. for (i = 0; i < num; i++) {
  391. ent = edac_debugfs_create_file(attrs[i]->name, attrs[i]->mode,
  392. parent, data, &attrs[i]->fops);
  393. if (!ent)
  394. break;
  395. }
  396. return i;
  397. }
  398. static phys_addr_t thunderx_faddr_to_phys(u64 faddr, struct thunderx_lmc *lmc)
  399. {
  400. phys_addr_t addr = 0;
  401. int bank, xbits;
  402. addr |= lmc->node << 40;
  403. addr |= LMC_FADR_FDIMM(faddr) << lmc->dimm_lsb;
  404. addr |= LMC_FADR_FBUNK(faddr) << lmc->rank_lsb;
  405. addr |= LMC_FADR_FROW(faddr) << lmc->row_lsb;
  406. addr |= (LMC_FADR_FCOL(faddr) >> 4) << lmc->col_hi_lsb;
  407. bank = LMC_FADR_FBANK(faddr) << lmc->bank_lsb;
  408. if (lmc->xor_bank)
  409. bank ^= get_bits(addr, 12 + lmc->xbits, lmc->bank_width);
  410. addr |= bank << lmc->bank_lsb;
  411. xbits = PCI_FUNC(lmc->pdev->devfn);
  412. if (lmc->l2c_alias)
  413. xbits ^= get_bits(addr, 20, lmc->xbits) ^
  414. get_bits(addr, 12, lmc->xbits);
  415. addr |= xbits << 7;
  416. return addr;
  417. }
  418. static unsigned int thunderx_get_num_lmcs(unsigned int node)
  419. {
  420. unsigned int number = 0;
  421. struct pci_dev *pdev = NULL;
  422. do {
  423. pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
  424. PCI_DEVICE_ID_THUNDER_LMC,
  425. pdev);
  426. if (pdev) {
  427. #ifdef CONFIG_NUMA
  428. if (pdev->dev.numa_node == node)
  429. number++;
  430. #else
  431. number++;
  432. #endif
  433. }
  434. } while (pdev);
  435. return number;
  436. }
  437. #define LMC_MESSAGE_SIZE 120
  438. #define LMC_OTHER_SIZE (50 * ARRAY_SIZE(lmc_errors))
  439. static irqreturn_t thunderx_lmc_err_isr(int irq, void *dev_id)
  440. {
  441. struct mem_ctl_info *mci = dev_id;
  442. struct thunderx_lmc *lmc = mci->pvt_info;
  443. unsigned long head = ring_pos(lmc->ring_head, ARRAY_SIZE(lmc->err_ctx));
  444. struct lmc_err_ctx *ctx = &lmc->err_ctx[head];
  445. writeq(0, lmc->regs + LMC_CHAR_MASK0);
  446. writeq(0, lmc->regs + LMC_CHAR_MASK2);
  447. writeq(0x2, lmc->regs + LMC_ECC_PARITY_TEST);
  448. ctx->reg_int = readq(lmc->regs + LMC_INT);
  449. ctx->reg_fadr = readq(lmc->regs + LMC_FADR);
  450. ctx->reg_nxm_fadr = readq(lmc->regs + LMC_NXM_FADR);
  451. ctx->reg_scram_fadr = readq(lmc->regs + LMC_SCRAM_FADR);
  452. ctx->reg_ecc_synd = readq(lmc->regs + LMC_ECC_SYND);
  453. lmc->ring_head++;
  454. atomic_set(&lmc->ecc_int, 1);
  455. /* Clear the interrupt */
  456. writeq(ctx->reg_int, lmc->regs + LMC_INT);
  457. return IRQ_WAKE_THREAD;
  458. }
  459. static irqreturn_t thunderx_lmc_threaded_isr(int irq, void *dev_id)
  460. {
  461. struct mem_ctl_info *mci = dev_id;
  462. struct thunderx_lmc *lmc = mci->pvt_info;
  463. phys_addr_t phys_addr;
  464. unsigned long tail;
  465. struct lmc_err_ctx *ctx;
  466. irqreturn_t ret = IRQ_NONE;
  467. char *msg;
  468. char *other;
  469. msg = kmalloc(LMC_MESSAGE_SIZE, GFP_KERNEL);
  470. other = kmalloc(LMC_OTHER_SIZE, GFP_KERNEL);
  471. if (!msg || !other)
  472. goto err_free;
  473. while (CIRC_CNT(lmc->ring_head, lmc->ring_tail,
  474. ARRAY_SIZE(lmc->err_ctx))) {
  475. tail = ring_pos(lmc->ring_tail, ARRAY_SIZE(lmc->err_ctx));
  476. ctx = &lmc->err_ctx[tail];
  477. dev_dbg(&lmc->pdev->dev, "LMC_INT: %016llx\n",
  478. ctx->reg_int);
  479. dev_dbg(&lmc->pdev->dev, "LMC_FADR: %016llx\n",
  480. ctx->reg_fadr);
  481. dev_dbg(&lmc->pdev->dev, "LMC_NXM_FADR: %016llx\n",
  482. ctx->reg_nxm_fadr);
  483. dev_dbg(&lmc->pdev->dev, "LMC_SCRAM_FADR: %016llx\n",
  484. ctx->reg_scram_fadr);
  485. dev_dbg(&lmc->pdev->dev, "LMC_ECC_SYND: %016llx\n",
  486. ctx->reg_ecc_synd);
  487. snprintf(msg, LMC_MESSAGE_SIZE,
  488. "DIMM %lld rank %lld bank %lld row %lld col %lld",
  489. LMC_FADR_FDIMM(ctx->reg_scram_fadr),
  490. LMC_FADR_FBUNK(ctx->reg_scram_fadr),
  491. LMC_FADR_FBANK(ctx->reg_scram_fadr),
  492. LMC_FADR_FROW(ctx->reg_scram_fadr),
  493. LMC_FADR_FCOL(ctx->reg_scram_fadr));
  494. decode_register(other, LMC_OTHER_SIZE, lmc_errors,
  495. ctx->reg_int);
  496. phys_addr = thunderx_faddr_to_phys(ctx->reg_fadr, lmc);
  497. if (ctx->reg_int & LMC_INT_UE)
  498. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  499. phys_to_pfn(phys_addr),
  500. offset_in_page(phys_addr),
  501. 0, -1, -1, -1, msg, other);
  502. else if (ctx->reg_int & LMC_INT_CE)
  503. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  504. phys_to_pfn(phys_addr),
  505. offset_in_page(phys_addr),
  506. 0, -1, -1, -1, msg, other);
  507. lmc->ring_tail++;
  508. }
  509. ret = IRQ_HANDLED;
  510. err_free:
  511. kfree(msg);
  512. kfree(other);
  513. return ret;
  514. }
  515. #ifdef CONFIG_PM
  516. static int thunderx_lmc_suspend(struct pci_dev *pdev, pm_message_t state)
  517. {
  518. pci_save_state(pdev);
  519. pci_disable_device(pdev);
  520. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  521. return 0;
  522. }
  523. static int thunderx_lmc_resume(struct pci_dev *pdev)
  524. {
  525. pci_set_power_state(pdev, PCI_D0);
  526. pci_enable_wake(pdev, PCI_D0, 0);
  527. pci_restore_state(pdev);
  528. return 0;
  529. }
  530. #endif
  531. static const struct pci_device_id thunderx_lmc_pci_tbl[] = {
  532. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_LMC) },
  533. { 0, },
  534. };
  535. static inline int pci_dev_to_mc_idx(struct pci_dev *pdev)
  536. {
  537. int node = dev_to_node(&pdev->dev);
  538. int ret = PCI_FUNC(pdev->devfn);
  539. ret += max(node, 0) << 3;
  540. return ret;
  541. }
  542. static int thunderx_lmc_probe(struct pci_dev *pdev,
  543. const struct pci_device_id *id)
  544. {
  545. struct thunderx_lmc *lmc;
  546. struct edac_mc_layer layer;
  547. struct mem_ctl_info *mci;
  548. u64 lmc_control, lmc_ddr_pll_ctl, lmc_config;
  549. int ret;
  550. u64 lmc_int;
  551. void *l2c_ioaddr;
  552. layer.type = EDAC_MC_LAYER_SLOT;
  553. layer.size = 2;
  554. layer.is_virt_csrow = false;
  555. ret = pcim_enable_device(pdev);
  556. if (ret) {
  557. dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
  558. return ret;
  559. }
  560. ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_lmc");
  561. if (ret) {
  562. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  563. return ret;
  564. }
  565. mci = edac_mc_alloc(pci_dev_to_mc_idx(pdev), 1, &layer,
  566. sizeof(struct thunderx_lmc));
  567. if (!mci)
  568. return -ENOMEM;
  569. mci->pdev = &pdev->dev;
  570. lmc = mci->pvt_info;
  571. pci_set_drvdata(pdev, mci);
  572. lmc->regs = pcim_iomap_table(pdev)[0];
  573. lmc_control = readq(lmc->regs + LMC_CONTROL);
  574. lmc_ddr_pll_ctl = readq(lmc->regs + LMC_DDR_PLL_CTL);
  575. lmc_config = readq(lmc->regs + LMC_CONFIG);
  576. if (lmc_control & LMC_CONTROL_RDIMM) {
  577. mci->mtype_cap = FIELD_GET(LMC_DDR_PLL_CTL_DDR4,
  578. lmc_ddr_pll_ctl) ?
  579. MEM_RDDR4 : MEM_RDDR3;
  580. } else {
  581. mci->mtype_cap = FIELD_GET(LMC_DDR_PLL_CTL_DDR4,
  582. lmc_ddr_pll_ctl) ?
  583. MEM_DDR4 : MEM_DDR3;
  584. }
  585. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  586. mci->edac_cap = EDAC_FLAG_SECDED;
  587. mci->mod_name = "thunderx-lmc";
  588. mci->mod_ver = "1";
  589. mci->ctl_name = "thunderx-lmc";
  590. mci->dev_name = dev_name(&pdev->dev);
  591. mci->scrub_mode = SCRUB_NONE;
  592. lmc->pdev = pdev;
  593. lmc->msix_ent.entry = 0;
  594. lmc->ring_head = 0;
  595. lmc->ring_tail = 0;
  596. ret = pci_enable_msix_exact(pdev, &lmc->msix_ent, 1);
  597. if (ret) {
  598. dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
  599. goto err_free;
  600. }
  601. ret = devm_request_threaded_irq(&pdev->dev, lmc->msix_ent.vector,
  602. thunderx_lmc_err_isr,
  603. thunderx_lmc_threaded_isr, 0,
  604. "[EDAC] ThunderX LMC", mci);
  605. if (ret) {
  606. dev_err(&pdev->dev, "Cannot set ISR: %d\n", ret);
  607. goto err_free;
  608. }
  609. lmc->node = FIELD_GET(THUNDERX_NODE, pci_resource_start(pdev, 0));
  610. lmc->xbits = thunderx_get_num_lmcs(lmc->node) >> 1;
  611. lmc->bank_width = (FIELD_GET(LMC_DDR_PLL_CTL_DDR4, lmc_ddr_pll_ctl) &&
  612. FIELD_GET(LMC_CONFIG_BG2, lmc_config)) ? 4 : 3;
  613. lmc->pbank_lsb = (lmc_config >> 5) & 0xf;
  614. lmc->dimm_lsb = 28 + lmc->pbank_lsb + lmc->xbits;
  615. lmc->rank_lsb = lmc->dimm_lsb;
  616. lmc->rank_lsb -= FIELD_GET(LMC_CONFIG_RANK_ENA, lmc_config) ? 1 : 0;
  617. lmc->bank_lsb = 7 + lmc->xbits;
  618. lmc->row_lsb = 14 + LMC_CONFIG_ROW_LSB(lmc_config) + lmc->xbits;
  619. lmc->col_hi_lsb = lmc->bank_lsb + lmc->bank_width;
  620. lmc->xor_bank = lmc_control & LMC_CONTROL_XOR_BANK;
  621. l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node),
  622. PAGE_SIZE);
  623. if (!l2c_ioaddr) {
  624. dev_err(&pdev->dev, "Cannot map L2C_CTL\n");
  625. goto err_free;
  626. }
  627. lmc->l2c_alias = !(readq(l2c_ioaddr) & L2C_CTL_DISIDXALIAS);
  628. iounmap(l2c_ioaddr);
  629. ret = edac_mc_add_mc(mci);
  630. if (ret) {
  631. dev_err(&pdev->dev, "Cannot add the MC: %d\n", ret);
  632. goto err_free;
  633. }
  634. lmc_int = readq(lmc->regs + LMC_INT);
  635. writeq(lmc_int, lmc->regs + LMC_INT);
  636. writeq(LMC_INT_ENA_ALL, lmc->regs + LMC_INT_ENA_W1S);
  637. if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
  638. ret = thunderx_create_debugfs_nodes(mci->debugfs,
  639. lmc_dfs_ents,
  640. lmc,
  641. ARRAY_SIZE(lmc_dfs_ents));
  642. if (ret != ARRAY_SIZE(lmc_dfs_ents)) {
  643. dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
  644. ret, ret >= 0 ? " created" : "");
  645. }
  646. }
  647. return 0;
  648. err_free:
  649. pci_set_drvdata(pdev, NULL);
  650. edac_mc_free(mci);
  651. return ret;
  652. }
  653. static void thunderx_lmc_remove(struct pci_dev *pdev)
  654. {
  655. struct mem_ctl_info *mci = pci_get_drvdata(pdev);
  656. struct thunderx_lmc *lmc = mci->pvt_info;
  657. writeq(LMC_INT_ENA_ALL, lmc->regs + LMC_INT_ENA_W1C);
  658. edac_mc_del_mc(&pdev->dev);
  659. edac_mc_free(mci);
  660. }
  661. MODULE_DEVICE_TABLE(pci, thunderx_lmc_pci_tbl);
  662. static struct pci_driver thunderx_lmc_driver = {
  663. .name = "thunderx_lmc_edac",
  664. .probe = thunderx_lmc_probe,
  665. .remove = thunderx_lmc_remove,
  666. #ifdef CONFIG_PM
  667. .suspend = thunderx_lmc_suspend,
  668. .resume = thunderx_lmc_resume,
  669. #endif
  670. .id_table = thunderx_lmc_pci_tbl,
  671. };
  672. /*---------------------- OCX driver ---------------------------------*/
  673. #define PCI_DEVICE_ID_THUNDER_OCX 0xa013
  674. #define OCX_LINK_INTS 3
  675. #define OCX_INTS (OCX_LINK_INTS + 1)
  676. #define OCX_RX_LANES 24
  677. #define OCX_RX_LANE_STATS 15
  678. #define OCX_COM_INT 0x100
  679. #define OCX_COM_INT_W1S 0x108
  680. #define OCX_COM_INT_ENA_W1S 0x110
  681. #define OCX_COM_INT_ENA_W1C 0x118
  682. #define OCX_COM_IO_BADID BIT(54)
  683. #define OCX_COM_MEM_BADID BIT(53)
  684. #define OCX_COM_COPR_BADID BIT(52)
  685. #define OCX_COM_WIN_REQ_BADID BIT(51)
  686. #define OCX_COM_WIN_REQ_TOUT BIT(50)
  687. #define OCX_COM_RX_LANE GENMASK(23, 0)
  688. #define OCX_COM_INT_CE (OCX_COM_IO_BADID | \
  689. OCX_COM_MEM_BADID | \
  690. OCX_COM_COPR_BADID | \
  691. OCX_COM_WIN_REQ_BADID | \
  692. OCX_COM_WIN_REQ_TOUT)
  693. static const struct error_descr ocx_com_errors[] = {
  694. {
  695. .type = ERR_CORRECTED,
  696. .mask = OCX_COM_IO_BADID,
  697. .descr = "Invalid IO transaction node ID",
  698. },
  699. {
  700. .type = ERR_CORRECTED,
  701. .mask = OCX_COM_MEM_BADID,
  702. .descr = "Invalid memory transaction node ID",
  703. },
  704. {
  705. .type = ERR_CORRECTED,
  706. .mask = OCX_COM_COPR_BADID,
  707. .descr = "Invalid coprocessor transaction node ID",
  708. },
  709. {
  710. .type = ERR_CORRECTED,
  711. .mask = OCX_COM_WIN_REQ_BADID,
  712. .descr = "Invalid SLI transaction node ID",
  713. },
  714. {
  715. .type = ERR_CORRECTED,
  716. .mask = OCX_COM_WIN_REQ_TOUT,
  717. .descr = "Window/core request timeout",
  718. },
  719. {0, 0, NULL},
  720. };
  721. #define OCX_COM_LINKX_INT(x) (0x120 + (x) * 8)
  722. #define OCX_COM_LINKX_INT_W1S(x) (0x140 + (x) * 8)
  723. #define OCX_COM_LINKX_INT_ENA_W1S(x) (0x160 + (x) * 8)
  724. #define OCX_COM_LINKX_INT_ENA_W1C(x) (0x180 + (x) * 8)
  725. #define OCX_COM_LINK_BAD_WORD BIT(13)
  726. #define OCX_COM_LINK_ALIGN_FAIL BIT(12)
  727. #define OCX_COM_LINK_ALIGN_DONE BIT(11)
  728. #define OCX_COM_LINK_UP BIT(10)
  729. #define OCX_COM_LINK_STOP BIT(9)
  730. #define OCX_COM_LINK_BLK_ERR BIT(8)
  731. #define OCX_COM_LINK_REINIT BIT(7)
  732. #define OCX_COM_LINK_LNK_DATA BIT(6)
  733. #define OCX_COM_LINK_RXFIFO_DBE BIT(5)
  734. #define OCX_COM_LINK_RXFIFO_SBE BIT(4)
  735. #define OCX_COM_LINK_TXFIFO_DBE BIT(3)
  736. #define OCX_COM_LINK_TXFIFO_SBE BIT(2)
  737. #define OCX_COM_LINK_REPLAY_DBE BIT(1)
  738. #define OCX_COM_LINK_REPLAY_SBE BIT(0)
  739. static const struct error_descr ocx_com_link_errors[] = {
  740. {
  741. .type = ERR_CORRECTED,
  742. .mask = OCX_COM_LINK_REPLAY_SBE,
  743. .descr = "Replay buffer single-bit error",
  744. },
  745. {
  746. .type = ERR_CORRECTED,
  747. .mask = OCX_COM_LINK_TXFIFO_SBE,
  748. .descr = "TX FIFO single-bit error",
  749. },
  750. {
  751. .type = ERR_CORRECTED,
  752. .mask = OCX_COM_LINK_RXFIFO_SBE,
  753. .descr = "RX FIFO single-bit error",
  754. },
  755. {
  756. .type = ERR_CORRECTED,
  757. .mask = OCX_COM_LINK_BLK_ERR,
  758. .descr = "Block code error",
  759. },
  760. {
  761. .type = ERR_CORRECTED,
  762. .mask = OCX_COM_LINK_ALIGN_FAIL,
  763. .descr = "Link alignment failure",
  764. },
  765. {
  766. .type = ERR_CORRECTED,
  767. .mask = OCX_COM_LINK_BAD_WORD,
  768. .descr = "Bad code word",
  769. },
  770. {
  771. .type = ERR_UNCORRECTED,
  772. .mask = OCX_COM_LINK_REPLAY_DBE,
  773. .descr = "Replay buffer double-bit error",
  774. },
  775. {
  776. .type = ERR_UNCORRECTED,
  777. .mask = OCX_COM_LINK_TXFIFO_DBE,
  778. .descr = "TX FIFO double-bit error",
  779. },
  780. {
  781. .type = ERR_UNCORRECTED,
  782. .mask = OCX_COM_LINK_RXFIFO_DBE,
  783. .descr = "RX FIFO double-bit error",
  784. },
  785. {
  786. .type = ERR_UNCORRECTED,
  787. .mask = OCX_COM_LINK_STOP,
  788. .descr = "Link stopped",
  789. },
  790. {0, 0, NULL},
  791. };
  792. #define OCX_COM_LINK_INT_UE (OCX_COM_LINK_REPLAY_DBE | \
  793. OCX_COM_LINK_TXFIFO_DBE | \
  794. OCX_COM_LINK_RXFIFO_DBE | \
  795. OCX_COM_LINK_STOP)
  796. #define OCX_COM_LINK_INT_CE (OCX_COM_LINK_REPLAY_SBE | \
  797. OCX_COM_LINK_TXFIFO_SBE | \
  798. OCX_COM_LINK_RXFIFO_SBE | \
  799. OCX_COM_LINK_BLK_ERR | \
  800. OCX_COM_LINK_ALIGN_FAIL | \
  801. OCX_COM_LINK_BAD_WORD)
  802. #define OCX_LNE_INT(x) (0x8018 + (x) * 0x100)
  803. #define OCX_LNE_INT_EN(x) (0x8020 + (x) * 0x100)
  804. #define OCX_LNE_BAD_CNT(x) (0x8028 + (x) * 0x100)
  805. #define OCX_LNE_CFG(x) (0x8000 + (x) * 0x100)
  806. #define OCX_LNE_STAT(x, y) (0x8040 + (x) * 0x100 + (y) * 8)
  807. #define OCX_LNE_CFG_RX_BDRY_LOCK_DIS BIT(8)
  808. #define OCX_LNE_CFG_RX_STAT_WRAP_DIS BIT(2)
  809. #define OCX_LNE_CFG_RX_STAT_RDCLR BIT(1)
  810. #define OCX_LNE_CFG_RX_STAT_ENA BIT(0)
  811. #define OCX_LANE_BAD_64B67B BIT(8)
  812. #define OCX_LANE_DSKEW_FIFO_OVFL BIT(5)
  813. #define OCX_LANE_SCRM_SYNC_LOSS BIT(4)
  814. #define OCX_LANE_UKWN_CNTL_WORD BIT(3)
  815. #define OCX_LANE_CRC32_ERR BIT(2)
  816. #define OCX_LANE_BDRY_SYNC_LOSS BIT(1)
  817. #define OCX_LANE_SERDES_LOCK_LOSS BIT(0)
  818. #define OCX_COM_LANE_INT_UE (0)
  819. #define OCX_COM_LANE_INT_CE (OCX_LANE_SERDES_LOCK_LOSS | \
  820. OCX_LANE_BDRY_SYNC_LOSS | \
  821. OCX_LANE_CRC32_ERR | \
  822. OCX_LANE_UKWN_CNTL_WORD | \
  823. OCX_LANE_SCRM_SYNC_LOSS | \
  824. OCX_LANE_DSKEW_FIFO_OVFL | \
  825. OCX_LANE_BAD_64B67B)
  826. static const struct error_descr ocx_lane_errors[] = {
  827. {
  828. .type = ERR_CORRECTED,
  829. .mask = OCX_LANE_SERDES_LOCK_LOSS,
  830. .descr = "RX SerDes lock lost",
  831. },
  832. {
  833. .type = ERR_CORRECTED,
  834. .mask = OCX_LANE_BDRY_SYNC_LOSS,
  835. .descr = "RX word boundary lost",
  836. },
  837. {
  838. .type = ERR_CORRECTED,
  839. .mask = OCX_LANE_CRC32_ERR,
  840. .descr = "CRC32 error",
  841. },
  842. {
  843. .type = ERR_CORRECTED,
  844. .mask = OCX_LANE_UKWN_CNTL_WORD,
  845. .descr = "Unknown control word",
  846. },
  847. {
  848. .type = ERR_CORRECTED,
  849. .mask = OCX_LANE_SCRM_SYNC_LOSS,
  850. .descr = "Scrambler synchronization lost",
  851. },
  852. {
  853. .type = ERR_CORRECTED,
  854. .mask = OCX_LANE_DSKEW_FIFO_OVFL,
  855. .descr = "RX deskew FIFO overflow",
  856. },
  857. {
  858. .type = ERR_CORRECTED,
  859. .mask = OCX_LANE_BAD_64B67B,
  860. .descr = "Bad 64B/67B codeword",
  861. },
  862. {0, 0, NULL},
  863. };
  864. #define OCX_LNE_INT_ENA_ALL (GENMASK(9, 8) | GENMASK(6, 0))
  865. #define OCX_COM_INT_ENA_ALL (GENMASK(54, 50) | GENMASK(23, 0))
  866. #define OCX_COM_LINKX_INT_ENA_ALL (GENMASK(13, 12) | \
  867. GENMASK(9, 7) | GENMASK(5, 0))
  868. #define OCX_TLKX_ECC_CTL(x) (0x10018 + (x) * 0x2000)
  869. #define OCX_RLKX_ECC_CTL(x) (0x18018 + (x) * 0x2000)
  870. struct ocx_com_err_ctx {
  871. u64 reg_com_int;
  872. u64 reg_lane_int[OCX_RX_LANES];
  873. u64 reg_lane_stat11[OCX_RX_LANES];
  874. };
  875. struct ocx_link_err_ctx {
  876. u64 reg_com_link_int;
  877. int link;
  878. };
  879. struct thunderx_ocx {
  880. void __iomem *regs;
  881. int com_link;
  882. struct pci_dev *pdev;
  883. struct edac_device_ctl_info *edac_dev;
  884. struct dentry *debugfs;
  885. struct msix_entry msix_ent[OCX_INTS];
  886. struct ocx_com_err_ctx com_err_ctx[RING_ENTRIES];
  887. struct ocx_link_err_ctx link_err_ctx[RING_ENTRIES];
  888. unsigned long com_ring_head;
  889. unsigned long com_ring_tail;
  890. unsigned long link_ring_head;
  891. unsigned long link_ring_tail;
  892. };
  893. #define OCX_MESSAGE_SIZE SZ_1K
  894. #define OCX_OTHER_SIZE (50 * ARRAY_SIZE(ocx_com_link_errors))
  895. /* This handler is threaded */
  896. static irqreturn_t thunderx_ocx_com_isr(int irq, void *irq_id)
  897. {
  898. struct msix_entry *msix = irq_id;
  899. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  900. msix_ent[msix->entry]);
  901. int lane;
  902. unsigned long head = ring_pos(ocx->com_ring_head,
  903. ARRAY_SIZE(ocx->com_err_ctx));
  904. struct ocx_com_err_ctx *ctx = &ocx->com_err_ctx[head];
  905. ctx->reg_com_int = readq(ocx->regs + OCX_COM_INT);
  906. for (lane = 0; lane < OCX_RX_LANES; lane++) {
  907. ctx->reg_lane_int[lane] =
  908. readq(ocx->regs + OCX_LNE_INT(lane));
  909. ctx->reg_lane_stat11[lane] =
  910. readq(ocx->regs + OCX_LNE_STAT(lane, 11));
  911. writeq(ctx->reg_lane_int[lane], ocx->regs + OCX_LNE_INT(lane));
  912. }
  913. writeq(ctx->reg_com_int, ocx->regs + OCX_COM_INT);
  914. ocx->com_ring_head++;
  915. return IRQ_WAKE_THREAD;
  916. }
  917. static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
  918. {
  919. struct msix_entry *msix = irq_id;
  920. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  921. msix_ent[msix->entry]);
  922. irqreturn_t ret = IRQ_NONE;
  923. unsigned long tail;
  924. struct ocx_com_err_ctx *ctx;
  925. int lane;
  926. char *msg;
  927. char *other;
  928. msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
  929. other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
  930. if (!msg || !other)
  931. goto err_free;
  932. while (CIRC_CNT(ocx->com_ring_head, ocx->com_ring_tail,
  933. ARRAY_SIZE(ocx->com_err_ctx))) {
  934. tail = ring_pos(ocx->com_ring_tail,
  935. ARRAY_SIZE(ocx->com_err_ctx));
  936. ctx = &ocx->com_err_ctx[tail];
  937. snprintf(msg, OCX_MESSAGE_SIZE, "%s: OCX_COM_INT: %016llx",
  938. ocx->edac_dev->ctl_name, ctx->reg_com_int);
  939. decode_register(other, OCX_OTHER_SIZE,
  940. ocx_com_errors, ctx->reg_com_int);
  941. strncat(msg, other, OCX_MESSAGE_SIZE);
  942. for (lane = 0; lane < OCX_RX_LANES; lane++)
  943. if (ctx->reg_com_int & BIT(lane)) {
  944. snprintf(other, OCX_OTHER_SIZE,
  945. "\n\tOCX_LNE_INT[%02d]: %016llx OCX_LNE_STAT11[%02d]: %016llx",
  946. lane, ctx->reg_lane_int[lane],
  947. lane, ctx->reg_lane_stat11[lane]);
  948. strncat(msg, other, OCX_MESSAGE_SIZE);
  949. decode_register(other, OCX_OTHER_SIZE,
  950. ocx_lane_errors,
  951. ctx->reg_lane_int[lane]);
  952. strncat(msg, other, OCX_MESSAGE_SIZE);
  953. }
  954. if (ctx->reg_com_int & OCX_COM_INT_CE)
  955. edac_device_handle_ce(ocx->edac_dev, 0, 0, msg);
  956. ocx->com_ring_tail++;
  957. }
  958. ret = IRQ_HANDLED;
  959. err_free:
  960. kfree(other);
  961. kfree(msg);
  962. return ret;
  963. }
  964. static irqreturn_t thunderx_ocx_lnk_isr(int irq, void *irq_id)
  965. {
  966. struct msix_entry *msix = irq_id;
  967. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  968. msix_ent[msix->entry]);
  969. unsigned long head = ring_pos(ocx->link_ring_head,
  970. ARRAY_SIZE(ocx->link_err_ctx));
  971. struct ocx_link_err_ctx *ctx = &ocx->link_err_ctx[head];
  972. ctx->link = msix->entry;
  973. ctx->reg_com_link_int = readq(ocx->regs + OCX_COM_LINKX_INT(ctx->link));
  974. writeq(ctx->reg_com_link_int, ocx->regs + OCX_COM_LINKX_INT(ctx->link));
  975. ocx->link_ring_head++;
  976. return IRQ_WAKE_THREAD;
  977. }
  978. static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
  979. {
  980. struct msix_entry *msix = irq_id;
  981. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  982. msix_ent[msix->entry]);
  983. irqreturn_t ret = IRQ_NONE;
  984. unsigned long tail;
  985. struct ocx_link_err_ctx *ctx;
  986. char *msg;
  987. char *other;
  988. msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
  989. other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
  990. if (!msg || !other)
  991. goto err_free;
  992. while (CIRC_CNT(ocx->link_ring_head, ocx->link_ring_tail,
  993. ARRAY_SIZE(ocx->link_err_ctx))) {
  994. tail = ring_pos(ocx->link_ring_head,
  995. ARRAY_SIZE(ocx->link_err_ctx));
  996. ctx = &ocx->link_err_ctx[tail];
  997. snprintf(msg, OCX_MESSAGE_SIZE,
  998. "%s: OCX_COM_LINK_INT[%d]: %016llx",
  999. ocx->edac_dev->ctl_name,
  1000. ctx->link, ctx->reg_com_link_int);
  1001. decode_register(other, OCX_OTHER_SIZE,
  1002. ocx_com_link_errors, ctx->reg_com_link_int);
  1003. strncat(msg, other, OCX_MESSAGE_SIZE);
  1004. if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
  1005. edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
  1006. else if (ctx->reg_com_link_int & OCX_COM_LINK_INT_CE)
  1007. edac_device_handle_ce(ocx->edac_dev, 0, 0, msg);
  1008. ocx->link_ring_tail++;
  1009. }
  1010. ret = IRQ_HANDLED;
  1011. err_free:
  1012. kfree(other);
  1013. kfree(msg);
  1014. return ret;
  1015. }
  1016. #define OCX_DEBUGFS_ATTR(_name, _reg) DEBUGFS_REG_ATTR(ocx, _name, _reg)
  1017. OCX_DEBUGFS_ATTR(tlk0_ecc_ctl, OCX_TLKX_ECC_CTL(0));
  1018. OCX_DEBUGFS_ATTR(tlk1_ecc_ctl, OCX_TLKX_ECC_CTL(1));
  1019. OCX_DEBUGFS_ATTR(tlk2_ecc_ctl, OCX_TLKX_ECC_CTL(2));
  1020. OCX_DEBUGFS_ATTR(rlk0_ecc_ctl, OCX_RLKX_ECC_CTL(0));
  1021. OCX_DEBUGFS_ATTR(rlk1_ecc_ctl, OCX_RLKX_ECC_CTL(1));
  1022. OCX_DEBUGFS_ATTR(rlk2_ecc_ctl, OCX_RLKX_ECC_CTL(2));
  1023. OCX_DEBUGFS_ATTR(com_link0_int, OCX_COM_LINKX_INT_W1S(0));
  1024. OCX_DEBUGFS_ATTR(com_link1_int, OCX_COM_LINKX_INT_W1S(1));
  1025. OCX_DEBUGFS_ATTR(com_link2_int, OCX_COM_LINKX_INT_W1S(2));
  1026. OCX_DEBUGFS_ATTR(lne00_badcnt, OCX_LNE_BAD_CNT(0));
  1027. OCX_DEBUGFS_ATTR(lne01_badcnt, OCX_LNE_BAD_CNT(1));
  1028. OCX_DEBUGFS_ATTR(lne02_badcnt, OCX_LNE_BAD_CNT(2));
  1029. OCX_DEBUGFS_ATTR(lne03_badcnt, OCX_LNE_BAD_CNT(3));
  1030. OCX_DEBUGFS_ATTR(lne04_badcnt, OCX_LNE_BAD_CNT(4));
  1031. OCX_DEBUGFS_ATTR(lne05_badcnt, OCX_LNE_BAD_CNT(5));
  1032. OCX_DEBUGFS_ATTR(lne06_badcnt, OCX_LNE_BAD_CNT(6));
  1033. OCX_DEBUGFS_ATTR(lne07_badcnt, OCX_LNE_BAD_CNT(7));
  1034. OCX_DEBUGFS_ATTR(lne08_badcnt, OCX_LNE_BAD_CNT(8));
  1035. OCX_DEBUGFS_ATTR(lne09_badcnt, OCX_LNE_BAD_CNT(9));
  1036. OCX_DEBUGFS_ATTR(lne10_badcnt, OCX_LNE_BAD_CNT(10));
  1037. OCX_DEBUGFS_ATTR(lne11_badcnt, OCX_LNE_BAD_CNT(11));
  1038. OCX_DEBUGFS_ATTR(lne12_badcnt, OCX_LNE_BAD_CNT(12));
  1039. OCX_DEBUGFS_ATTR(lne13_badcnt, OCX_LNE_BAD_CNT(13));
  1040. OCX_DEBUGFS_ATTR(lne14_badcnt, OCX_LNE_BAD_CNT(14));
  1041. OCX_DEBUGFS_ATTR(lne15_badcnt, OCX_LNE_BAD_CNT(15));
  1042. OCX_DEBUGFS_ATTR(lne16_badcnt, OCX_LNE_BAD_CNT(16));
  1043. OCX_DEBUGFS_ATTR(lne17_badcnt, OCX_LNE_BAD_CNT(17));
  1044. OCX_DEBUGFS_ATTR(lne18_badcnt, OCX_LNE_BAD_CNT(18));
  1045. OCX_DEBUGFS_ATTR(lne19_badcnt, OCX_LNE_BAD_CNT(19));
  1046. OCX_DEBUGFS_ATTR(lne20_badcnt, OCX_LNE_BAD_CNT(20));
  1047. OCX_DEBUGFS_ATTR(lne21_badcnt, OCX_LNE_BAD_CNT(21));
  1048. OCX_DEBUGFS_ATTR(lne22_badcnt, OCX_LNE_BAD_CNT(22));
  1049. OCX_DEBUGFS_ATTR(lne23_badcnt, OCX_LNE_BAD_CNT(23));
  1050. OCX_DEBUGFS_ATTR(com_int, OCX_COM_INT_W1S);
  1051. struct debugfs_entry *ocx_dfs_ents[] = {
  1052. &debugfs_tlk0_ecc_ctl,
  1053. &debugfs_tlk1_ecc_ctl,
  1054. &debugfs_tlk2_ecc_ctl,
  1055. &debugfs_rlk0_ecc_ctl,
  1056. &debugfs_rlk1_ecc_ctl,
  1057. &debugfs_rlk2_ecc_ctl,
  1058. &debugfs_com_link0_int,
  1059. &debugfs_com_link1_int,
  1060. &debugfs_com_link2_int,
  1061. &debugfs_lne00_badcnt,
  1062. &debugfs_lne01_badcnt,
  1063. &debugfs_lne02_badcnt,
  1064. &debugfs_lne03_badcnt,
  1065. &debugfs_lne04_badcnt,
  1066. &debugfs_lne05_badcnt,
  1067. &debugfs_lne06_badcnt,
  1068. &debugfs_lne07_badcnt,
  1069. &debugfs_lne08_badcnt,
  1070. &debugfs_lne09_badcnt,
  1071. &debugfs_lne10_badcnt,
  1072. &debugfs_lne11_badcnt,
  1073. &debugfs_lne12_badcnt,
  1074. &debugfs_lne13_badcnt,
  1075. &debugfs_lne14_badcnt,
  1076. &debugfs_lne15_badcnt,
  1077. &debugfs_lne16_badcnt,
  1078. &debugfs_lne17_badcnt,
  1079. &debugfs_lne18_badcnt,
  1080. &debugfs_lne19_badcnt,
  1081. &debugfs_lne20_badcnt,
  1082. &debugfs_lne21_badcnt,
  1083. &debugfs_lne22_badcnt,
  1084. &debugfs_lne23_badcnt,
  1085. &debugfs_com_int,
  1086. };
  1087. static const struct pci_device_id thunderx_ocx_pci_tbl[] = {
  1088. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_OCX) },
  1089. { 0, },
  1090. };
  1091. static void thunderx_ocx_clearstats(struct thunderx_ocx *ocx)
  1092. {
  1093. int lane, stat, cfg;
  1094. for (lane = 0; lane < OCX_RX_LANES; lane++) {
  1095. cfg = readq(ocx->regs + OCX_LNE_CFG(lane));
  1096. cfg |= OCX_LNE_CFG_RX_STAT_RDCLR;
  1097. cfg &= ~OCX_LNE_CFG_RX_STAT_ENA;
  1098. writeq(cfg, ocx->regs + OCX_LNE_CFG(lane));
  1099. for (stat = 0; stat < OCX_RX_LANE_STATS; stat++)
  1100. readq(ocx->regs + OCX_LNE_STAT(lane, stat));
  1101. }
  1102. }
  1103. static int thunderx_ocx_probe(struct pci_dev *pdev,
  1104. const struct pci_device_id *id)
  1105. {
  1106. struct thunderx_ocx *ocx;
  1107. struct edac_device_ctl_info *edac_dev;
  1108. char name[32];
  1109. int idx;
  1110. int i;
  1111. int ret;
  1112. u64 reg;
  1113. ret = pcim_enable_device(pdev);
  1114. if (ret) {
  1115. dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
  1116. return ret;
  1117. }
  1118. ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_ocx");
  1119. if (ret) {
  1120. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  1121. return ret;
  1122. }
  1123. idx = edac_device_alloc_index();
  1124. snprintf(name, sizeof(name), "OCX%d", idx);
  1125. edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_ocx),
  1126. name, 1, "CCPI", 1,
  1127. 0, NULL, 0, idx);
  1128. if (!edac_dev) {
  1129. dev_err(&pdev->dev, "Cannot allocate EDAC device: %d\n", ret);
  1130. return -ENOMEM;
  1131. }
  1132. ocx = edac_dev->pvt_info;
  1133. ocx->edac_dev = edac_dev;
  1134. ocx->com_ring_head = 0;
  1135. ocx->com_ring_tail = 0;
  1136. ocx->link_ring_head = 0;
  1137. ocx->link_ring_tail = 0;
  1138. ocx->regs = pcim_iomap_table(pdev)[0];
  1139. if (!ocx->regs) {
  1140. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  1141. ret = -ENODEV;
  1142. goto err_free;
  1143. }
  1144. ocx->pdev = pdev;
  1145. for (i = 0; i < OCX_INTS; i++) {
  1146. ocx->msix_ent[i].entry = i;
  1147. ocx->msix_ent[i].vector = 0;
  1148. }
  1149. ret = pci_enable_msix_exact(pdev, ocx->msix_ent, OCX_INTS);
  1150. if (ret) {
  1151. dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
  1152. goto err_free;
  1153. }
  1154. for (i = 0; i < OCX_INTS; i++) {
  1155. ret = devm_request_threaded_irq(&pdev->dev,
  1156. ocx->msix_ent[i].vector,
  1157. (i == 3) ?
  1158. thunderx_ocx_com_isr :
  1159. thunderx_ocx_lnk_isr,
  1160. (i == 3) ?
  1161. thunderx_ocx_com_threaded_isr :
  1162. thunderx_ocx_lnk_threaded_isr,
  1163. 0, "[EDAC] ThunderX OCX",
  1164. &ocx->msix_ent[i]);
  1165. if (ret)
  1166. goto err_free;
  1167. }
  1168. edac_dev->dev = &pdev->dev;
  1169. edac_dev->dev_name = dev_name(&pdev->dev);
  1170. edac_dev->mod_name = "thunderx-ocx";
  1171. edac_dev->ctl_name = "thunderx-ocx";
  1172. ret = edac_device_add_device(edac_dev);
  1173. if (ret) {
  1174. dev_err(&pdev->dev, "Cannot add EDAC device: %d\n", ret);
  1175. goto err_free;
  1176. }
  1177. if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
  1178. ocx->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
  1179. ret = thunderx_create_debugfs_nodes(ocx->debugfs,
  1180. ocx_dfs_ents,
  1181. ocx,
  1182. ARRAY_SIZE(ocx_dfs_ents));
  1183. if (ret != ARRAY_SIZE(ocx_dfs_ents)) {
  1184. dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
  1185. ret, ret >= 0 ? " created" : "");
  1186. }
  1187. }
  1188. pci_set_drvdata(pdev, edac_dev);
  1189. thunderx_ocx_clearstats(ocx);
  1190. for (i = 0; i < OCX_RX_LANES; i++) {
  1191. writeq(OCX_LNE_INT_ENA_ALL,
  1192. ocx->regs + OCX_LNE_INT_EN(i));
  1193. reg = readq(ocx->regs + OCX_LNE_INT(i));
  1194. writeq(reg, ocx->regs + OCX_LNE_INT(i));
  1195. }
  1196. for (i = 0; i < OCX_LINK_INTS; i++) {
  1197. reg = readq(ocx->regs + OCX_COM_LINKX_INT(i));
  1198. writeq(reg, ocx->regs + OCX_COM_LINKX_INT(i));
  1199. writeq(OCX_COM_LINKX_INT_ENA_ALL,
  1200. ocx->regs + OCX_COM_LINKX_INT_ENA_W1S(i));
  1201. }
  1202. reg = readq(ocx->regs + OCX_COM_INT);
  1203. writeq(reg, ocx->regs + OCX_COM_INT);
  1204. writeq(OCX_COM_INT_ENA_ALL, ocx->regs + OCX_COM_INT_ENA_W1S);
  1205. return 0;
  1206. err_free:
  1207. edac_device_free_ctl_info(edac_dev);
  1208. return ret;
  1209. }
  1210. static void thunderx_ocx_remove(struct pci_dev *pdev)
  1211. {
  1212. struct edac_device_ctl_info *edac_dev = pci_get_drvdata(pdev);
  1213. struct thunderx_ocx *ocx = edac_dev->pvt_info;
  1214. int i;
  1215. writeq(OCX_COM_INT_ENA_ALL, ocx->regs + OCX_COM_INT_ENA_W1C);
  1216. for (i = 0; i < OCX_INTS; i++) {
  1217. writeq(OCX_COM_LINKX_INT_ENA_ALL,
  1218. ocx->regs + OCX_COM_LINKX_INT_ENA_W1C(i));
  1219. }
  1220. edac_debugfs_remove_recursive(ocx->debugfs);
  1221. edac_device_del_device(&pdev->dev);
  1222. edac_device_free_ctl_info(edac_dev);
  1223. }
  1224. MODULE_DEVICE_TABLE(pci, thunderx_ocx_pci_tbl);
  1225. static struct pci_driver thunderx_ocx_driver = {
  1226. .name = "thunderx_ocx_edac",
  1227. .probe = thunderx_ocx_probe,
  1228. .remove = thunderx_ocx_remove,
  1229. .id_table = thunderx_ocx_pci_tbl,
  1230. };
  1231. /*---------------------- L2C driver ---------------------------------*/
  1232. #define PCI_DEVICE_ID_THUNDER_L2C_TAD 0xa02e
  1233. #define PCI_DEVICE_ID_THUNDER_L2C_CBC 0xa02f
  1234. #define PCI_DEVICE_ID_THUNDER_L2C_MCI 0xa030
  1235. #define L2C_TAD_INT_W1C 0x40000
  1236. #define L2C_TAD_INT_W1S 0x40008
  1237. #define L2C_TAD_INT_ENA_W1C 0x40020
  1238. #define L2C_TAD_INT_ENA_W1S 0x40028
  1239. #define L2C_TAD_INT_L2DDBE BIT(1)
  1240. #define L2C_TAD_INT_SBFSBE BIT(2)
  1241. #define L2C_TAD_INT_SBFDBE BIT(3)
  1242. #define L2C_TAD_INT_FBFSBE BIT(4)
  1243. #define L2C_TAD_INT_FBFDBE BIT(5)
  1244. #define L2C_TAD_INT_TAGDBE BIT(9)
  1245. #define L2C_TAD_INT_RDDISLMC BIT(15)
  1246. #define L2C_TAD_INT_WRDISLMC BIT(16)
  1247. #define L2C_TAD_INT_LFBTO BIT(17)
  1248. #define L2C_TAD_INT_GSYNCTO BIT(18)
  1249. #define L2C_TAD_INT_RTGSBE BIT(32)
  1250. #define L2C_TAD_INT_RTGDBE BIT(33)
  1251. #define L2C_TAD_INT_RDDISOCI BIT(34)
  1252. #define L2C_TAD_INT_WRDISOCI BIT(35)
  1253. #define L2C_TAD_INT_ECC (L2C_TAD_INT_L2DDBE | \
  1254. L2C_TAD_INT_SBFSBE | L2C_TAD_INT_SBFDBE | \
  1255. L2C_TAD_INT_FBFSBE | L2C_TAD_INT_FBFDBE)
  1256. #define L2C_TAD_INT_CE (L2C_TAD_INT_SBFSBE | \
  1257. L2C_TAD_INT_FBFSBE)
  1258. #define L2C_TAD_INT_UE (L2C_TAD_INT_L2DDBE | \
  1259. L2C_TAD_INT_SBFDBE | \
  1260. L2C_TAD_INT_FBFDBE | \
  1261. L2C_TAD_INT_TAGDBE | \
  1262. L2C_TAD_INT_RTGDBE | \
  1263. L2C_TAD_INT_WRDISOCI | \
  1264. L2C_TAD_INT_RDDISOCI | \
  1265. L2C_TAD_INT_WRDISLMC | \
  1266. L2C_TAD_INT_RDDISLMC | \
  1267. L2C_TAD_INT_LFBTO | \
  1268. L2C_TAD_INT_GSYNCTO)
  1269. static const struct error_descr l2_tad_errors[] = {
  1270. {
  1271. .type = ERR_CORRECTED,
  1272. .mask = L2C_TAD_INT_SBFSBE,
  1273. .descr = "SBF single-bit error",
  1274. },
  1275. {
  1276. .type = ERR_CORRECTED,
  1277. .mask = L2C_TAD_INT_FBFSBE,
  1278. .descr = "FBF single-bit error",
  1279. },
  1280. {
  1281. .type = ERR_UNCORRECTED,
  1282. .mask = L2C_TAD_INT_L2DDBE,
  1283. .descr = "L2D double-bit error",
  1284. },
  1285. {
  1286. .type = ERR_UNCORRECTED,
  1287. .mask = L2C_TAD_INT_SBFDBE,
  1288. .descr = "SBF double-bit error",
  1289. },
  1290. {
  1291. .type = ERR_UNCORRECTED,
  1292. .mask = L2C_TAD_INT_FBFDBE,
  1293. .descr = "FBF double-bit error",
  1294. },
  1295. {
  1296. .type = ERR_UNCORRECTED,
  1297. .mask = L2C_TAD_INT_TAGDBE,
  1298. .descr = "TAG double-bit error",
  1299. },
  1300. {
  1301. .type = ERR_UNCORRECTED,
  1302. .mask = L2C_TAD_INT_RTGDBE,
  1303. .descr = "RTG double-bit error",
  1304. },
  1305. {
  1306. .type = ERR_UNCORRECTED,
  1307. .mask = L2C_TAD_INT_WRDISOCI,
  1308. .descr = "Write to a disabled CCPI",
  1309. },
  1310. {
  1311. .type = ERR_UNCORRECTED,
  1312. .mask = L2C_TAD_INT_RDDISOCI,
  1313. .descr = "Read from a disabled CCPI",
  1314. },
  1315. {
  1316. .type = ERR_UNCORRECTED,
  1317. .mask = L2C_TAD_INT_WRDISLMC,
  1318. .descr = "Write to a disabled LMC",
  1319. },
  1320. {
  1321. .type = ERR_UNCORRECTED,
  1322. .mask = L2C_TAD_INT_RDDISLMC,
  1323. .descr = "Read from a disabled LMC",
  1324. },
  1325. {
  1326. .type = ERR_UNCORRECTED,
  1327. .mask = L2C_TAD_INT_LFBTO,
  1328. .descr = "LFB entry timeout",
  1329. },
  1330. {
  1331. .type = ERR_UNCORRECTED,
  1332. .mask = L2C_TAD_INT_GSYNCTO,
  1333. .descr = "Global sync CCPI timeout",
  1334. },
  1335. {0, 0, NULL},
  1336. };
  1337. #define L2C_TAD_INT_TAG (L2C_TAD_INT_TAGDBE)
  1338. #define L2C_TAD_INT_RTG (L2C_TAD_INT_RTGDBE)
  1339. #define L2C_TAD_INT_DISLMC (L2C_TAD_INT_WRDISLMC | L2C_TAD_INT_RDDISLMC)
  1340. #define L2C_TAD_INT_DISOCI (L2C_TAD_INT_WRDISOCI | L2C_TAD_INT_RDDISOCI)
  1341. #define L2C_TAD_INT_ENA_ALL (L2C_TAD_INT_ECC | L2C_TAD_INT_TAG | \
  1342. L2C_TAD_INT_RTG | \
  1343. L2C_TAD_INT_DISLMC | L2C_TAD_INT_DISOCI | \
  1344. L2C_TAD_INT_LFBTO)
  1345. #define L2C_TAD_TIMETWO 0x50000
  1346. #define L2C_TAD_TIMEOUT 0x50100
  1347. #define L2C_TAD_ERR 0x60000
  1348. #define L2C_TAD_TQD_ERR 0x60100
  1349. #define L2C_TAD_TTG_ERR 0x60200
  1350. #define L2C_CBC_INT_W1C 0x60000
  1351. #define L2C_CBC_INT_RSDSBE BIT(0)
  1352. #define L2C_CBC_INT_RSDDBE BIT(1)
  1353. #define L2C_CBC_INT_RSD (L2C_CBC_INT_RSDSBE | L2C_CBC_INT_RSDDBE)
  1354. #define L2C_CBC_INT_MIBSBE BIT(4)
  1355. #define L2C_CBC_INT_MIBDBE BIT(5)
  1356. #define L2C_CBC_INT_MIB (L2C_CBC_INT_MIBSBE | L2C_CBC_INT_MIBDBE)
  1357. #define L2C_CBC_INT_IORDDISOCI BIT(6)
  1358. #define L2C_CBC_INT_IOWRDISOCI BIT(7)
  1359. #define L2C_CBC_INT_IODISOCI (L2C_CBC_INT_IORDDISOCI | \
  1360. L2C_CBC_INT_IOWRDISOCI)
  1361. #define L2C_CBC_INT_CE (L2C_CBC_INT_RSDSBE | L2C_CBC_INT_MIBSBE)
  1362. #define L2C_CBC_INT_UE (L2C_CBC_INT_RSDDBE | L2C_CBC_INT_MIBDBE)
  1363. static const struct error_descr l2_cbc_errors[] = {
  1364. {
  1365. .type = ERR_CORRECTED,
  1366. .mask = L2C_CBC_INT_RSDSBE,
  1367. .descr = "RSD single-bit error",
  1368. },
  1369. {
  1370. .type = ERR_CORRECTED,
  1371. .mask = L2C_CBC_INT_MIBSBE,
  1372. .descr = "MIB single-bit error",
  1373. },
  1374. {
  1375. .type = ERR_UNCORRECTED,
  1376. .mask = L2C_CBC_INT_RSDDBE,
  1377. .descr = "RSD double-bit error",
  1378. },
  1379. {
  1380. .type = ERR_UNCORRECTED,
  1381. .mask = L2C_CBC_INT_MIBDBE,
  1382. .descr = "MIB double-bit error",
  1383. },
  1384. {
  1385. .type = ERR_UNCORRECTED,
  1386. .mask = L2C_CBC_INT_IORDDISOCI,
  1387. .descr = "Read from a disabled CCPI",
  1388. },
  1389. {
  1390. .type = ERR_UNCORRECTED,
  1391. .mask = L2C_CBC_INT_IOWRDISOCI,
  1392. .descr = "Write to a disabled CCPI",
  1393. },
  1394. {0, 0, NULL},
  1395. };
  1396. #define L2C_CBC_INT_W1S 0x60008
  1397. #define L2C_CBC_INT_ENA_W1C 0x60020
  1398. #define L2C_CBC_INT_ENA_ALL (L2C_CBC_INT_RSD | L2C_CBC_INT_MIB | \
  1399. L2C_CBC_INT_IODISOCI)
  1400. #define L2C_CBC_INT_ENA_W1S 0x60028
  1401. #define L2C_CBC_IODISOCIERR 0x80008
  1402. #define L2C_CBC_IOCERR 0x80010
  1403. #define L2C_CBC_RSDERR 0x80018
  1404. #define L2C_CBC_MIBERR 0x80020
  1405. #define L2C_MCI_INT_W1C 0x0
  1406. #define L2C_MCI_INT_VBFSBE BIT(0)
  1407. #define L2C_MCI_INT_VBFDBE BIT(1)
  1408. static const struct error_descr l2_mci_errors[] = {
  1409. {
  1410. .type = ERR_CORRECTED,
  1411. .mask = L2C_MCI_INT_VBFSBE,
  1412. .descr = "VBF single-bit error",
  1413. },
  1414. {
  1415. .type = ERR_UNCORRECTED,
  1416. .mask = L2C_MCI_INT_VBFDBE,
  1417. .descr = "VBF double-bit error",
  1418. },
  1419. {0, 0, NULL},
  1420. };
  1421. #define L2C_MCI_INT_W1S 0x8
  1422. #define L2C_MCI_INT_ENA_W1C 0x20
  1423. #define L2C_MCI_INT_ENA_ALL (L2C_MCI_INT_VBFSBE | L2C_MCI_INT_VBFDBE)
  1424. #define L2C_MCI_INT_ENA_W1S 0x28
  1425. #define L2C_MCI_ERR 0x10000
  1426. #define L2C_MESSAGE_SIZE SZ_1K
  1427. #define L2C_OTHER_SIZE (50 * ARRAY_SIZE(l2_tad_errors))
  1428. struct l2c_err_ctx {
  1429. char *reg_ext_name;
  1430. u64 reg_int;
  1431. u64 reg_ext;
  1432. };
  1433. struct thunderx_l2c {
  1434. void __iomem *regs;
  1435. struct pci_dev *pdev;
  1436. struct edac_device_ctl_info *edac_dev;
  1437. struct dentry *debugfs;
  1438. int index;
  1439. struct msix_entry msix_ent;
  1440. struct l2c_err_ctx err_ctx[RING_ENTRIES];
  1441. unsigned long ring_head;
  1442. unsigned long ring_tail;
  1443. };
  1444. static irqreturn_t thunderx_l2c_tad_isr(int irq, void *irq_id)
  1445. {
  1446. struct msix_entry *msix = irq_id;
  1447. struct thunderx_l2c *tad = container_of(msix, struct thunderx_l2c,
  1448. msix_ent);
  1449. unsigned long head = ring_pos(tad->ring_head, ARRAY_SIZE(tad->err_ctx));
  1450. struct l2c_err_ctx *ctx = &tad->err_ctx[head];
  1451. ctx->reg_int = readq(tad->regs + L2C_TAD_INT_W1C);
  1452. if (ctx->reg_int & L2C_TAD_INT_ECC) {
  1453. ctx->reg_ext_name = "TQD_ERR";
  1454. ctx->reg_ext = readq(tad->regs + L2C_TAD_TQD_ERR);
  1455. } else if (ctx->reg_int & L2C_TAD_INT_TAG) {
  1456. ctx->reg_ext_name = "TTG_ERR";
  1457. ctx->reg_ext = readq(tad->regs + L2C_TAD_TTG_ERR);
  1458. } else if (ctx->reg_int & L2C_TAD_INT_LFBTO) {
  1459. ctx->reg_ext_name = "TIMEOUT";
  1460. ctx->reg_ext = readq(tad->regs + L2C_TAD_TIMEOUT);
  1461. } else if (ctx->reg_int & L2C_TAD_INT_DISOCI) {
  1462. ctx->reg_ext_name = "ERR";
  1463. ctx->reg_ext = readq(tad->regs + L2C_TAD_ERR);
  1464. }
  1465. writeq(ctx->reg_int, tad->regs + L2C_TAD_INT_W1C);
  1466. tad->ring_head++;
  1467. return IRQ_WAKE_THREAD;
  1468. }
  1469. static irqreturn_t thunderx_l2c_cbc_isr(int irq, void *irq_id)
  1470. {
  1471. struct msix_entry *msix = irq_id;
  1472. struct thunderx_l2c *cbc = container_of(msix, struct thunderx_l2c,
  1473. msix_ent);
  1474. unsigned long head = ring_pos(cbc->ring_head, ARRAY_SIZE(cbc->err_ctx));
  1475. struct l2c_err_ctx *ctx = &cbc->err_ctx[head];
  1476. ctx->reg_int = readq(cbc->regs + L2C_CBC_INT_W1C);
  1477. if (ctx->reg_int & L2C_CBC_INT_RSD) {
  1478. ctx->reg_ext_name = "RSDERR";
  1479. ctx->reg_ext = readq(cbc->regs + L2C_CBC_RSDERR);
  1480. } else if (ctx->reg_int & L2C_CBC_INT_MIB) {
  1481. ctx->reg_ext_name = "MIBERR";
  1482. ctx->reg_ext = readq(cbc->regs + L2C_CBC_MIBERR);
  1483. } else if (ctx->reg_int & L2C_CBC_INT_IODISOCI) {
  1484. ctx->reg_ext_name = "IODISOCIERR";
  1485. ctx->reg_ext = readq(cbc->regs + L2C_CBC_IODISOCIERR);
  1486. }
  1487. writeq(ctx->reg_int, cbc->regs + L2C_CBC_INT_W1C);
  1488. cbc->ring_head++;
  1489. return IRQ_WAKE_THREAD;
  1490. }
  1491. static irqreturn_t thunderx_l2c_mci_isr(int irq, void *irq_id)
  1492. {
  1493. struct msix_entry *msix = irq_id;
  1494. struct thunderx_l2c *mci = container_of(msix, struct thunderx_l2c,
  1495. msix_ent);
  1496. unsigned long head = ring_pos(mci->ring_head, ARRAY_SIZE(mci->err_ctx));
  1497. struct l2c_err_ctx *ctx = &mci->err_ctx[head];
  1498. ctx->reg_int = readq(mci->regs + L2C_MCI_INT_W1C);
  1499. ctx->reg_ext = readq(mci->regs + L2C_MCI_ERR);
  1500. writeq(ctx->reg_int, mci->regs + L2C_MCI_INT_W1C);
  1501. ctx->reg_ext_name = "ERR";
  1502. mci->ring_head++;
  1503. return IRQ_WAKE_THREAD;
  1504. }
  1505. static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
  1506. {
  1507. struct msix_entry *msix = irq_id;
  1508. struct thunderx_l2c *l2c = container_of(msix, struct thunderx_l2c,
  1509. msix_ent);
  1510. unsigned long tail = ring_pos(l2c->ring_tail, ARRAY_SIZE(l2c->err_ctx));
  1511. struct l2c_err_ctx *ctx = &l2c->err_ctx[tail];
  1512. irqreturn_t ret = IRQ_NONE;
  1513. u64 mask_ue, mask_ce;
  1514. const struct error_descr *l2_errors;
  1515. char *reg_int_name;
  1516. char *msg;
  1517. char *other;
  1518. msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
  1519. other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
  1520. if (!msg || !other)
  1521. goto err_free;
  1522. switch (l2c->pdev->device) {
  1523. case PCI_DEVICE_ID_THUNDER_L2C_TAD:
  1524. reg_int_name = "L2C_TAD_INT";
  1525. mask_ue = L2C_TAD_INT_UE;
  1526. mask_ce = L2C_TAD_INT_CE;
  1527. l2_errors = l2_tad_errors;
  1528. break;
  1529. case PCI_DEVICE_ID_THUNDER_L2C_CBC:
  1530. reg_int_name = "L2C_CBC_INT";
  1531. mask_ue = L2C_CBC_INT_UE;
  1532. mask_ce = L2C_CBC_INT_CE;
  1533. l2_errors = l2_cbc_errors;
  1534. break;
  1535. case PCI_DEVICE_ID_THUNDER_L2C_MCI:
  1536. reg_int_name = "L2C_MCI_INT";
  1537. mask_ue = L2C_MCI_INT_VBFDBE;
  1538. mask_ce = L2C_MCI_INT_VBFSBE;
  1539. l2_errors = l2_mci_errors;
  1540. break;
  1541. default:
  1542. dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n",
  1543. l2c->pdev->device);
  1544. return IRQ_NONE;
  1545. }
  1546. while (CIRC_CNT(l2c->ring_head, l2c->ring_tail,
  1547. ARRAY_SIZE(l2c->err_ctx))) {
  1548. snprintf(msg, L2C_MESSAGE_SIZE,
  1549. "%s: %s: %016llx, %s: %016llx",
  1550. l2c->edac_dev->ctl_name, reg_int_name, ctx->reg_int,
  1551. ctx->reg_ext_name, ctx->reg_ext);
  1552. decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
  1553. strncat(msg, other, L2C_MESSAGE_SIZE);
  1554. if (ctx->reg_int & mask_ue)
  1555. edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
  1556. else if (ctx->reg_int & mask_ce)
  1557. edac_device_handle_ce(l2c->edac_dev, 0, 0, msg);
  1558. l2c->ring_tail++;
  1559. }
  1560. return IRQ_HANDLED;
  1561. err_free:
  1562. kfree(other);
  1563. kfree(msg);
  1564. return ret;
  1565. }
  1566. #define L2C_DEBUGFS_ATTR(_name, _reg) DEBUGFS_REG_ATTR(l2c, _name, _reg)
  1567. L2C_DEBUGFS_ATTR(tad_int, L2C_TAD_INT_W1S);
  1568. struct debugfs_entry *l2c_tad_dfs_ents[] = {
  1569. &debugfs_tad_int,
  1570. };
  1571. L2C_DEBUGFS_ATTR(cbc_int, L2C_CBC_INT_W1S);
  1572. struct debugfs_entry *l2c_cbc_dfs_ents[] = {
  1573. &debugfs_cbc_int,
  1574. };
  1575. L2C_DEBUGFS_ATTR(mci_int, L2C_MCI_INT_W1S);
  1576. struct debugfs_entry *l2c_mci_dfs_ents[] = {
  1577. &debugfs_mci_int,
  1578. };
  1579. static const struct pci_device_id thunderx_l2c_pci_tbl[] = {
  1580. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_TAD), },
  1581. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_CBC), },
  1582. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_MCI), },
  1583. { 0, },
  1584. };
  1585. static int thunderx_l2c_probe(struct pci_dev *pdev,
  1586. const struct pci_device_id *id)
  1587. {
  1588. struct thunderx_l2c *l2c;
  1589. struct edac_device_ctl_info *edac_dev;
  1590. struct debugfs_entry **l2c_devattr;
  1591. size_t dfs_entries;
  1592. irqreturn_t (*thunderx_l2c_isr)(int, void *) = NULL;
  1593. char name[32];
  1594. const char *fmt;
  1595. u64 reg_en_offs, reg_en_mask;
  1596. int idx;
  1597. int ret;
  1598. ret = pcim_enable_device(pdev);
  1599. if (ret) {
  1600. dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
  1601. return ret;
  1602. }
  1603. ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_l2c");
  1604. if (ret) {
  1605. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  1606. return ret;
  1607. }
  1608. switch (pdev->device) {
  1609. case PCI_DEVICE_ID_THUNDER_L2C_TAD:
  1610. thunderx_l2c_isr = thunderx_l2c_tad_isr;
  1611. l2c_devattr = l2c_tad_dfs_ents;
  1612. dfs_entries = ARRAY_SIZE(l2c_tad_dfs_ents);
  1613. fmt = "L2C-TAD%d";
  1614. reg_en_offs = L2C_TAD_INT_ENA_W1S;
  1615. reg_en_mask = L2C_TAD_INT_ENA_ALL;
  1616. break;
  1617. case PCI_DEVICE_ID_THUNDER_L2C_CBC:
  1618. thunderx_l2c_isr = thunderx_l2c_cbc_isr;
  1619. l2c_devattr = l2c_cbc_dfs_ents;
  1620. dfs_entries = ARRAY_SIZE(l2c_cbc_dfs_ents);
  1621. fmt = "L2C-CBC%d";
  1622. reg_en_offs = L2C_CBC_INT_ENA_W1S;
  1623. reg_en_mask = L2C_CBC_INT_ENA_ALL;
  1624. break;
  1625. case PCI_DEVICE_ID_THUNDER_L2C_MCI:
  1626. thunderx_l2c_isr = thunderx_l2c_mci_isr;
  1627. l2c_devattr = l2c_mci_dfs_ents;
  1628. dfs_entries = ARRAY_SIZE(l2c_mci_dfs_ents);
  1629. fmt = "L2C-MCI%d";
  1630. reg_en_offs = L2C_MCI_INT_ENA_W1S;
  1631. reg_en_mask = L2C_MCI_INT_ENA_ALL;
  1632. break;
  1633. default:
  1634. //Should never ever get here
  1635. dev_err(&pdev->dev, "Unsupported PCI device: %04x\n",
  1636. pdev->device);
  1637. return -EINVAL;
  1638. }
  1639. idx = edac_device_alloc_index();
  1640. snprintf(name, sizeof(name), fmt, idx);
  1641. edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_l2c),
  1642. name, 1, "L2C", 1, 0,
  1643. NULL, 0, idx);
  1644. if (!edac_dev) {
  1645. dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
  1646. return -ENOMEM;
  1647. }
  1648. l2c = edac_dev->pvt_info;
  1649. l2c->edac_dev = edac_dev;
  1650. l2c->regs = pcim_iomap_table(pdev)[0];
  1651. if (!l2c->regs) {
  1652. dev_err(&pdev->dev, "Cannot map PCI resources\n");
  1653. ret = -ENODEV;
  1654. goto err_free;
  1655. }
  1656. l2c->pdev = pdev;
  1657. l2c->ring_head = 0;
  1658. l2c->ring_tail = 0;
  1659. l2c->msix_ent.entry = 0;
  1660. l2c->msix_ent.vector = 0;
  1661. ret = pci_enable_msix_exact(pdev, &l2c->msix_ent, 1);
  1662. if (ret) {
  1663. dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
  1664. goto err_free;
  1665. }
  1666. ret = devm_request_threaded_irq(&pdev->dev, l2c->msix_ent.vector,
  1667. thunderx_l2c_isr,
  1668. thunderx_l2c_threaded_isr,
  1669. 0, "[EDAC] ThunderX L2C",
  1670. &l2c->msix_ent);
  1671. if (ret)
  1672. goto err_free;
  1673. edac_dev->dev = &pdev->dev;
  1674. edac_dev->dev_name = dev_name(&pdev->dev);
  1675. edac_dev->mod_name = "thunderx-l2c";
  1676. edac_dev->ctl_name = "thunderx-l2c";
  1677. ret = edac_device_add_device(edac_dev);
  1678. if (ret) {
  1679. dev_err(&pdev->dev, "Cannot add EDAC device: %d\n", ret);
  1680. goto err_free;
  1681. }
  1682. if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
  1683. l2c->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
  1684. thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr,
  1685. l2c, dfs_entries);
  1686. if (ret != dfs_entries) {
  1687. dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
  1688. ret, ret >= 0 ? " created" : "");
  1689. }
  1690. }
  1691. pci_set_drvdata(pdev, edac_dev);
  1692. writeq(reg_en_mask, l2c->regs + reg_en_offs);
  1693. return 0;
  1694. err_free:
  1695. edac_device_free_ctl_info(edac_dev);
  1696. return ret;
  1697. }
  1698. static void thunderx_l2c_remove(struct pci_dev *pdev)
  1699. {
  1700. struct edac_device_ctl_info *edac_dev = pci_get_drvdata(pdev);
  1701. struct thunderx_l2c *l2c = edac_dev->pvt_info;
  1702. switch (pdev->device) {
  1703. case PCI_DEVICE_ID_THUNDER_L2C_TAD:
  1704. writeq(L2C_TAD_INT_ENA_ALL, l2c->regs + L2C_TAD_INT_ENA_W1C);
  1705. break;
  1706. case PCI_DEVICE_ID_THUNDER_L2C_CBC:
  1707. writeq(L2C_CBC_INT_ENA_ALL, l2c->regs + L2C_CBC_INT_ENA_W1C);
  1708. break;
  1709. case PCI_DEVICE_ID_THUNDER_L2C_MCI:
  1710. writeq(L2C_MCI_INT_ENA_ALL, l2c->regs + L2C_MCI_INT_ENA_W1C);
  1711. break;
  1712. }
  1713. edac_debugfs_remove_recursive(l2c->debugfs);
  1714. edac_device_del_device(&pdev->dev);
  1715. edac_device_free_ctl_info(edac_dev);
  1716. }
  1717. MODULE_DEVICE_TABLE(pci, thunderx_l2c_pci_tbl);
  1718. static struct pci_driver thunderx_l2c_driver = {
  1719. .name = "thunderx_l2c_edac",
  1720. .probe = thunderx_l2c_probe,
  1721. .remove = thunderx_l2c_remove,
  1722. .id_table = thunderx_l2c_pci_tbl,
  1723. };
  1724. static int __init thunderx_edac_init(void)
  1725. {
  1726. int rc = 0;
  1727. rc = pci_register_driver(&thunderx_lmc_driver);
  1728. if (rc)
  1729. return rc;
  1730. rc = pci_register_driver(&thunderx_ocx_driver);
  1731. if (rc)
  1732. goto err_lmc;
  1733. rc = pci_register_driver(&thunderx_l2c_driver);
  1734. if (rc)
  1735. goto err_ocx;
  1736. return rc;
  1737. err_ocx:
  1738. pci_unregister_driver(&thunderx_ocx_driver);
  1739. err_lmc:
  1740. pci_unregister_driver(&thunderx_lmc_driver);
  1741. return rc;
  1742. }
  1743. static void __exit thunderx_edac_exit(void)
  1744. {
  1745. pci_unregister_driver(&thunderx_l2c_driver);
  1746. pci_unregister_driver(&thunderx_ocx_driver);
  1747. pci_unregister_driver(&thunderx_lmc_driver);
  1748. }
  1749. module_init(thunderx_edac_init);
  1750. module_exit(thunderx_edac_exit);
  1751. MODULE_LICENSE("GPL v2");
  1752. MODULE_AUTHOR("Cavium, Inc.");
  1753. MODULE_DESCRIPTION("EDAC Driver for Cavium ThunderX");