thunderx_edac.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147
  1. /*
  2. * Cavium ThunderX memory controller kernel module
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright Cavium, Inc. (C) 2015-2017. All rights reserved.
  9. *
  10. */
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/edac.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/string.h>
  16. #include <linux/stop_machine.h>
  17. #include <linux/delay.h>
  18. #include <linux/sizes.h>
  19. #include <linux/atomic.h>
  20. #include <linux/bitfield.h>
  21. #include <linux/circ_buf.h>
  22. #include <asm/page.h>
  23. #include "edac_module.h"
  24. #define phys_to_pfn(phys) (PFN_DOWN(phys))
  25. #define THUNDERX_NODE GENMASK(45, 44)
  26. enum {
  27. ERR_CORRECTED = 1,
  28. ERR_UNCORRECTED = 2,
  29. ERR_UNKNOWN = 3,
  30. };
  31. #define MAX_SYNDROME_REGS 4
  32. struct error_syndrome {
  33. u64 reg[MAX_SYNDROME_REGS];
  34. };
  35. struct error_descr {
  36. int type;
  37. u64 mask;
  38. char *descr;
  39. };
  40. static void decode_register(char *str, size_t size,
  41. const struct error_descr *descr,
  42. const uint64_t reg)
  43. {
  44. int ret = 0;
  45. while (descr->type && descr->mask && descr->descr) {
  46. if (reg & descr->mask) {
  47. ret = snprintf(str, size, "\n\t%s, %s",
  48. descr->type == ERR_CORRECTED ?
  49. "Corrected" : "Uncorrected",
  50. descr->descr);
  51. str += ret;
  52. size -= ret;
  53. }
  54. descr++;
  55. }
  56. }
  57. static unsigned long get_bits(unsigned long data, int pos, int width)
  58. {
  59. return (data >> pos) & ((1 << width) - 1);
  60. }
  61. #define L2C_CTL 0x87E080800000
  62. #define L2C_CTL_DISIDXALIAS BIT(0)
  63. #define PCI_DEVICE_ID_THUNDER_LMC 0xa022
  64. #define LMC_FADR 0x20
  65. #define LMC_FADR_FDIMM(x) ((x >> 37) & 0x1)
  66. #define LMC_FADR_FBUNK(x) ((x >> 36) & 0x1)
  67. #define LMC_FADR_FBANK(x) ((x >> 32) & 0xf)
  68. #define LMC_FADR_FROW(x) ((x >> 14) & 0xffff)
  69. #define LMC_FADR_FCOL(x) ((x >> 0) & 0x1fff)
  70. #define LMC_NXM_FADR 0x28
  71. #define LMC_ECC_SYND 0x38
  72. #define LMC_ECC_PARITY_TEST 0x108
  73. #define LMC_INT_W1S 0x150
  74. #define LMC_INT_ENA_W1C 0x158
  75. #define LMC_INT_ENA_W1S 0x160
  76. #define LMC_CONFIG 0x188
  77. #define LMC_CONFIG_BG2 BIT(62)
  78. #define LMC_CONFIG_RANK_ENA BIT(42)
  79. #define LMC_CONFIG_PBANK_LSB(x) (((x) >> 5) & 0xF)
  80. #define LMC_CONFIG_ROW_LSB(x) (((x) >> 2) & 0x7)
  81. #define LMC_CONTROL 0x190
  82. #define LMC_CONTROL_XOR_BANK BIT(16)
  83. #define LMC_INT 0x1F0
  84. #define LMC_INT_DDR_ERR BIT(11)
  85. #define LMC_INT_DED_ERR (0xFUL << 5)
  86. #define LMC_INT_SEC_ERR (0xFUL << 1)
  87. #define LMC_INT_NXM_WR_MASK BIT(0)
  88. #define LMC_DDR_PLL_CTL 0x258
  89. #define LMC_DDR_PLL_CTL_DDR4 BIT(29)
  90. #define LMC_FADR_SCRAMBLED 0x330
  91. #define LMC_INT_UE (LMC_INT_DDR_ERR | LMC_INT_DED_ERR | \
  92. LMC_INT_NXM_WR_MASK)
  93. #define LMC_INT_CE (LMC_INT_SEC_ERR)
  94. static const struct error_descr lmc_errors[] = {
  95. {
  96. .type = ERR_CORRECTED,
  97. .mask = LMC_INT_SEC_ERR,
  98. .descr = "Single-bit ECC error",
  99. },
  100. {
  101. .type = ERR_UNCORRECTED,
  102. .mask = LMC_INT_DDR_ERR,
  103. .descr = "DDR chip error",
  104. },
  105. {
  106. .type = ERR_UNCORRECTED,
  107. .mask = LMC_INT_DED_ERR,
  108. .descr = "Double-bit ECC error",
  109. },
  110. {
  111. .type = ERR_UNCORRECTED,
  112. .mask = LMC_INT_NXM_WR_MASK,
  113. .descr = "Non-existent memory write",
  114. },
  115. {0, 0, NULL},
  116. };
  117. #define LMC_INT_EN_DDR_ERROR_ALERT_ENA BIT(5)
  118. #define LMC_INT_EN_DLCRAM_DED_ERR BIT(4)
  119. #define LMC_INT_EN_DLCRAM_SEC_ERR BIT(3)
  120. #define LMC_INT_INTR_DED_ENA BIT(2)
  121. #define LMC_INT_INTR_SEC_ENA BIT(1)
  122. #define LMC_INT_INTR_NXM_WR_ENA BIT(0)
  123. #define LMC_INT_ENA_ALL GENMASK(5, 0)
  124. #define LMC_DDR_PLL_CTL 0x258
  125. #define LMC_DDR_PLL_CTL_DDR4 BIT(29)
  126. #define LMC_CONTROL 0x190
  127. #define LMC_CONTROL_RDIMM BIT(0)
  128. #define LMC_SCRAM_FADR 0x330
  129. #define LMC_CHAR_MASK0 0x228
  130. #define LMC_CHAR_MASK2 0x238
  131. #define RING_ENTRIES 8
  132. struct debugfs_entry {
  133. const char *name;
  134. umode_t mode;
  135. const struct file_operations fops;
  136. };
  137. struct lmc_err_ctx {
  138. u64 reg_int;
  139. u64 reg_fadr;
  140. u64 reg_nxm_fadr;
  141. u64 reg_scram_fadr;
  142. u64 reg_ecc_synd;
  143. };
  144. struct thunderx_lmc {
  145. void __iomem *regs;
  146. struct pci_dev *pdev;
  147. struct msix_entry msix_ent;
  148. atomic_t ecc_int;
  149. u64 mask0;
  150. u64 mask2;
  151. u64 parity_test;
  152. u64 node;
  153. int xbits;
  154. int bank_width;
  155. int pbank_lsb;
  156. int dimm_lsb;
  157. int rank_lsb;
  158. int bank_lsb;
  159. int row_lsb;
  160. int col_hi_lsb;
  161. int xor_bank;
  162. int l2c_alias;
  163. struct page *mem;
  164. struct lmc_err_ctx err_ctx[RING_ENTRIES];
  165. unsigned long ring_head;
  166. unsigned long ring_tail;
  167. };
  168. #define ring_pos(pos, size) ((pos) & (size - 1))
  169. #define DEBUGFS_STRUCT(_name, _mode, _write, _read) \
  170. static struct debugfs_entry debugfs_##_name = { \
  171. .name = __stringify(_name), \
  172. .mode = VERIFY_OCTAL_PERMISSIONS(_mode), \
  173. .fops = { \
  174. .open = simple_open, \
  175. .write = _write, \
  176. .read = _read, \
  177. .llseek = generic_file_llseek, \
  178. }, \
  179. }
  180. #define DEBUGFS_FIELD_ATTR(_type, _field) \
  181. static ssize_t thunderx_##_type##_##_field##_read(struct file *file, \
  182. char __user *data, \
  183. size_t count, loff_t *ppos) \
  184. { \
  185. struct thunderx_##_type *pdata = file->private_data; \
  186. char buf[20]; \
  187. \
  188. snprintf(buf, count, "0x%016llx", pdata->_field); \
  189. return simple_read_from_buffer(data, count, ppos, \
  190. buf, sizeof(buf)); \
  191. } \
  192. \
  193. static ssize_t thunderx_##_type##_##_field##_write(struct file *file, \
  194. const char __user *data, \
  195. size_t count, loff_t *ppos) \
  196. { \
  197. struct thunderx_##_type *pdata = file->private_data; \
  198. int res; \
  199. \
  200. res = kstrtoull_from_user(data, count, 0, &pdata->_field); \
  201. \
  202. return res ? res : count; \
  203. } \
  204. \
  205. DEBUGFS_STRUCT(_field, 0600, \
  206. thunderx_##_type##_##_field##_write, \
  207. thunderx_##_type##_##_field##_read) \
  208. #define DEBUGFS_REG_ATTR(_type, _name, _reg) \
  209. static ssize_t thunderx_##_type##_##_name##_read(struct file *file, \
  210. char __user *data, \
  211. size_t count, loff_t *ppos) \
  212. { \
  213. struct thunderx_##_type *pdata = file->private_data; \
  214. char buf[20]; \
  215. \
  216. sprintf(buf, "0x%016llx", readq(pdata->regs + _reg)); \
  217. return simple_read_from_buffer(data, count, ppos, \
  218. buf, sizeof(buf)); \
  219. } \
  220. \
  221. static ssize_t thunderx_##_type##_##_name##_write(struct file *file, \
  222. const char __user *data, \
  223. size_t count, loff_t *ppos) \
  224. { \
  225. struct thunderx_##_type *pdata = file->private_data; \
  226. u64 val; \
  227. int res; \
  228. \
  229. res = kstrtoull_from_user(data, count, 0, &val); \
  230. \
  231. if (!res) { \
  232. writeq(val, pdata->regs + _reg); \
  233. res = count; \
  234. } \
  235. \
  236. return res; \
  237. } \
  238. \
  239. DEBUGFS_STRUCT(_name, 0600, \
  240. thunderx_##_type##_##_name##_write, \
  241. thunderx_##_type##_##_name##_read)
  242. #define LMC_DEBUGFS_ENT(_field) DEBUGFS_FIELD_ATTR(lmc, _field)
  243. /*
  244. * To get an ECC error injected, the following steps are needed:
  245. * - Setup the ECC injection by writing the appropriate parameters:
  246. * echo <bit mask value> > /sys/kernel/debug/<device number>/ecc_mask0
  247. * echo <bit mask value> > /sys/kernel/debug/<device number>/ecc_mask2
  248. * echo 0x802 > /sys/kernel/debug/<device number>/ecc_parity_test
  249. * - Do the actual injection:
  250. * echo 1 > /sys/kernel/debug/<device number>/inject_ecc
  251. */
  252. static ssize_t thunderx_lmc_inject_int_write(struct file *file,
  253. const char __user *data,
  254. size_t count, loff_t *ppos)
  255. {
  256. struct thunderx_lmc *lmc = file->private_data;
  257. u64 val;
  258. int res;
  259. res = kstrtoull_from_user(data, count, 0, &val);
  260. if (!res) {
  261. /* Trigger the interrupt */
  262. writeq(val, lmc->regs + LMC_INT_W1S);
  263. res = count;
  264. }
  265. return res;
  266. }
  267. static ssize_t thunderx_lmc_int_read(struct file *file,
  268. char __user *data,
  269. size_t count, loff_t *ppos)
  270. {
  271. struct thunderx_lmc *lmc = file->private_data;
  272. char buf[20];
  273. u64 lmc_int = readq(lmc->regs + LMC_INT);
  274. snprintf(buf, sizeof(buf), "0x%016llx", lmc_int);
  275. return simple_read_from_buffer(data, count, ppos, buf, sizeof(buf));
  276. }
  277. #define TEST_PATTERN 0xa5
  278. static int inject_ecc_fn(void *arg)
  279. {
  280. struct thunderx_lmc *lmc = arg;
  281. uintptr_t addr, phys;
  282. unsigned int cline_size = cache_line_size();
  283. const unsigned int lines = PAGE_SIZE / cline_size;
  284. unsigned int i, cl_idx;
  285. addr = (uintptr_t)page_address(lmc->mem);
  286. phys = (uintptr_t)page_to_phys(lmc->mem);
  287. cl_idx = (phys & 0x7f) >> 4;
  288. lmc->parity_test &= ~(7ULL << 8);
  289. lmc->parity_test |= (cl_idx << 8);
  290. writeq(lmc->mask0, lmc->regs + LMC_CHAR_MASK0);
  291. writeq(lmc->mask2, lmc->regs + LMC_CHAR_MASK2);
  292. writeq(lmc->parity_test, lmc->regs + LMC_ECC_PARITY_TEST);
  293. readq(lmc->regs + LMC_CHAR_MASK0);
  294. readq(lmc->regs + LMC_CHAR_MASK2);
  295. readq(lmc->regs + LMC_ECC_PARITY_TEST);
  296. for (i = 0; i < lines; i++) {
  297. memset((void *)addr, TEST_PATTERN, cline_size);
  298. barrier();
  299. /*
  300. * Flush L1 cachelines to the PoC (L2).
  301. * This will cause cacheline eviction to the L2.
  302. */
  303. asm volatile("dc civac, %0\n"
  304. "dsb sy\n"
  305. : : "r"(addr + i * cline_size));
  306. }
  307. for (i = 0; i < lines; i++) {
  308. /*
  309. * Flush L2 cachelines to the DRAM.
  310. * This will cause cacheline eviction to the DRAM
  311. * and ECC corruption according to the masks set.
  312. */
  313. __asm__ volatile("sys #0,c11,C1,#2, %0\n"
  314. : : "r"(phys + i * cline_size));
  315. }
  316. for (i = 0; i < lines; i++) {
  317. /*
  318. * Invalidate L2 cachelines.
  319. * The subsequent load will cause cacheline fetch
  320. * from the DRAM and an error interrupt
  321. */
  322. __asm__ volatile("sys #0,c11,C1,#1, %0"
  323. : : "r"(phys + i * cline_size));
  324. }
  325. for (i = 0; i < lines; i++) {
  326. /*
  327. * Invalidate L1 cachelines.
  328. * The subsequent load will cause cacheline fetch
  329. * from the L2 and/or DRAM
  330. */
  331. asm volatile("dc ivac, %0\n"
  332. "dsb sy\n"
  333. : : "r"(addr + i * cline_size));
  334. }
  335. return 0;
  336. }
  337. static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
  338. const char __user *data,
  339. size_t count, loff_t *ppos)
  340. {
  341. struct thunderx_lmc *lmc = file->private_data;
  342. unsigned int cline_size = cache_line_size();
  343. u8 tmp[cline_size];
  344. void __iomem *addr;
  345. unsigned int offs, timeout = 100000;
  346. atomic_set(&lmc->ecc_int, 0);
  347. lmc->mem = alloc_pages_node(lmc->node, GFP_KERNEL, 0);
  348. if (!lmc->mem)
  349. return -ENOMEM;
  350. addr = page_address(lmc->mem);
  351. while (!atomic_read(&lmc->ecc_int) && timeout--) {
  352. stop_machine(inject_ecc_fn, lmc, NULL);
  353. for (offs = 0; offs < PAGE_SIZE; offs += sizeof(tmp)) {
  354. /*
  355. * Do a load from the previously rigged location
  356. * This should generate an error interrupt.
  357. */
  358. memcpy(tmp, addr + offs, cline_size);
  359. asm volatile("dsb ld\n");
  360. }
  361. }
  362. __free_pages(lmc->mem, 0);
  363. return count;
  364. }
  365. LMC_DEBUGFS_ENT(mask0);
  366. LMC_DEBUGFS_ENT(mask2);
  367. LMC_DEBUGFS_ENT(parity_test);
  368. DEBUGFS_STRUCT(inject_int, 0200, thunderx_lmc_inject_int_write, NULL);
  369. DEBUGFS_STRUCT(inject_ecc, 0200, thunderx_lmc_inject_ecc_write, NULL);
  370. DEBUGFS_STRUCT(int_w1c, 0400, NULL, thunderx_lmc_int_read);
  371. struct debugfs_entry *lmc_dfs_ents[] = {
  372. &debugfs_mask0,
  373. &debugfs_mask2,
  374. &debugfs_parity_test,
  375. &debugfs_inject_ecc,
  376. &debugfs_inject_int,
  377. &debugfs_int_w1c,
  378. };
  379. static int thunderx_create_debugfs_nodes(struct dentry *parent,
  380. struct debugfs_entry *attrs[],
  381. void *data,
  382. size_t num)
  383. {
  384. int i;
  385. struct dentry *ent;
  386. if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
  387. return 0;
  388. if (!parent)
  389. return -ENOENT;
  390. for (i = 0; i < num; i++) {
  391. ent = edac_debugfs_create_file(attrs[i]->name, attrs[i]->mode,
  392. parent, data, &attrs[i]->fops);
  393. if (!ent)
  394. break;
  395. }
  396. return i;
  397. }
  398. static phys_addr_t thunderx_faddr_to_phys(u64 faddr, struct thunderx_lmc *lmc)
  399. {
  400. phys_addr_t addr = 0;
  401. int bank, xbits;
  402. addr |= lmc->node << 40;
  403. addr |= LMC_FADR_FDIMM(faddr) << lmc->dimm_lsb;
  404. addr |= LMC_FADR_FBUNK(faddr) << lmc->rank_lsb;
  405. addr |= LMC_FADR_FROW(faddr) << lmc->row_lsb;
  406. addr |= (LMC_FADR_FCOL(faddr) >> 4) << lmc->col_hi_lsb;
  407. bank = LMC_FADR_FBANK(faddr) << lmc->bank_lsb;
  408. if (lmc->xor_bank)
  409. bank ^= get_bits(addr, 12 + lmc->xbits, lmc->bank_width);
  410. addr |= bank << lmc->bank_lsb;
  411. xbits = PCI_FUNC(lmc->pdev->devfn);
  412. if (lmc->l2c_alias)
  413. xbits ^= get_bits(addr, 20, lmc->xbits) ^
  414. get_bits(addr, 12, lmc->xbits);
  415. addr |= xbits << 7;
  416. return addr;
  417. }
  418. static unsigned int thunderx_get_num_lmcs(unsigned int node)
  419. {
  420. unsigned int number = 0;
  421. struct pci_dev *pdev = NULL;
  422. do {
  423. pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
  424. PCI_DEVICE_ID_THUNDER_LMC,
  425. pdev);
  426. if (pdev) {
  427. #ifdef CONFIG_NUMA
  428. if (pdev->dev.numa_node == node)
  429. number++;
  430. #else
  431. number++;
  432. #endif
  433. }
  434. } while (pdev);
  435. return number;
  436. }
  437. #define LMC_MESSAGE_SIZE 120
  438. #define LMC_OTHER_SIZE (50 * ARRAY_SIZE(lmc_errors))
  439. static irqreturn_t thunderx_lmc_err_isr(int irq, void *dev_id)
  440. {
  441. struct mem_ctl_info *mci = dev_id;
  442. struct thunderx_lmc *lmc = mci->pvt_info;
  443. unsigned long head = ring_pos(lmc->ring_head, ARRAY_SIZE(lmc->err_ctx));
  444. struct lmc_err_ctx *ctx = &lmc->err_ctx[head];
  445. writeq(0, lmc->regs + LMC_CHAR_MASK0);
  446. writeq(0, lmc->regs + LMC_CHAR_MASK2);
  447. writeq(0x2, lmc->regs + LMC_ECC_PARITY_TEST);
  448. ctx->reg_int = readq(lmc->regs + LMC_INT);
  449. ctx->reg_fadr = readq(lmc->regs + LMC_FADR);
  450. ctx->reg_nxm_fadr = readq(lmc->regs + LMC_NXM_FADR);
  451. ctx->reg_scram_fadr = readq(lmc->regs + LMC_SCRAM_FADR);
  452. ctx->reg_ecc_synd = readq(lmc->regs + LMC_ECC_SYND);
  453. lmc->ring_head++;
  454. atomic_set(&lmc->ecc_int, 1);
  455. /* Clear the interrupt */
  456. writeq(ctx->reg_int, lmc->regs + LMC_INT);
  457. return IRQ_WAKE_THREAD;
  458. }
  459. static irqreturn_t thunderx_lmc_threaded_isr(int irq, void *dev_id)
  460. {
  461. struct mem_ctl_info *mci = dev_id;
  462. struct thunderx_lmc *lmc = mci->pvt_info;
  463. phys_addr_t phys_addr;
  464. unsigned long tail;
  465. struct lmc_err_ctx *ctx;
  466. irqreturn_t ret = IRQ_NONE;
  467. char *msg;
  468. char *other;
  469. msg = kmalloc(LMC_MESSAGE_SIZE, GFP_KERNEL);
  470. other = kmalloc(LMC_OTHER_SIZE, GFP_KERNEL);
  471. if (!msg || !other)
  472. goto err_free;
  473. while (CIRC_CNT(lmc->ring_head, lmc->ring_tail,
  474. ARRAY_SIZE(lmc->err_ctx))) {
  475. tail = ring_pos(lmc->ring_tail, ARRAY_SIZE(lmc->err_ctx));
  476. ctx = &lmc->err_ctx[tail];
  477. dev_dbg(&lmc->pdev->dev, "LMC_INT: %016llx\n",
  478. ctx->reg_int);
  479. dev_dbg(&lmc->pdev->dev, "LMC_FADR: %016llx\n",
  480. ctx->reg_fadr);
  481. dev_dbg(&lmc->pdev->dev, "LMC_NXM_FADR: %016llx\n",
  482. ctx->reg_nxm_fadr);
  483. dev_dbg(&lmc->pdev->dev, "LMC_SCRAM_FADR: %016llx\n",
  484. ctx->reg_scram_fadr);
  485. dev_dbg(&lmc->pdev->dev, "LMC_ECC_SYND: %016llx\n",
  486. ctx->reg_ecc_synd);
  487. snprintf(msg, LMC_MESSAGE_SIZE,
  488. "DIMM %lld rank %lld bank %lld row %lld col %lld",
  489. LMC_FADR_FDIMM(ctx->reg_scram_fadr),
  490. LMC_FADR_FBUNK(ctx->reg_scram_fadr),
  491. LMC_FADR_FBANK(ctx->reg_scram_fadr),
  492. LMC_FADR_FROW(ctx->reg_scram_fadr),
  493. LMC_FADR_FCOL(ctx->reg_scram_fadr));
  494. decode_register(other, LMC_OTHER_SIZE, lmc_errors,
  495. ctx->reg_int);
  496. phys_addr = thunderx_faddr_to_phys(ctx->reg_fadr, lmc);
  497. if (ctx->reg_int & LMC_INT_UE)
  498. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  499. phys_to_pfn(phys_addr),
  500. offset_in_page(phys_addr),
  501. 0, -1, -1, -1, msg, other);
  502. else if (ctx->reg_int & LMC_INT_CE)
  503. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  504. phys_to_pfn(phys_addr),
  505. offset_in_page(phys_addr),
  506. 0, -1, -1, -1, msg, other);
  507. lmc->ring_tail++;
  508. }
  509. ret = IRQ_HANDLED;
  510. err_free:
  511. kfree(msg);
  512. kfree(other);
  513. return ret;
  514. }
  515. static const struct pci_device_id thunderx_lmc_pci_tbl[] = {
  516. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_LMC) },
  517. { 0, },
  518. };
  519. static inline int pci_dev_to_mc_idx(struct pci_dev *pdev)
  520. {
  521. int node = dev_to_node(&pdev->dev);
  522. int ret = PCI_FUNC(pdev->devfn);
  523. ret += max(node, 0) << 3;
  524. return ret;
  525. }
  526. static int thunderx_lmc_probe(struct pci_dev *pdev,
  527. const struct pci_device_id *id)
  528. {
  529. struct thunderx_lmc *lmc;
  530. struct edac_mc_layer layer;
  531. struct mem_ctl_info *mci;
  532. u64 lmc_control, lmc_ddr_pll_ctl, lmc_config;
  533. int ret;
  534. u64 lmc_int;
  535. void *l2c_ioaddr;
  536. layer.type = EDAC_MC_LAYER_SLOT;
  537. layer.size = 2;
  538. layer.is_virt_csrow = false;
  539. ret = pcim_enable_device(pdev);
  540. if (ret) {
  541. dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
  542. return ret;
  543. }
  544. ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_lmc");
  545. if (ret) {
  546. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  547. return ret;
  548. }
  549. mci = edac_mc_alloc(pci_dev_to_mc_idx(pdev), 1, &layer,
  550. sizeof(struct thunderx_lmc));
  551. if (!mci)
  552. return -ENOMEM;
  553. mci->pdev = &pdev->dev;
  554. lmc = mci->pvt_info;
  555. pci_set_drvdata(pdev, mci);
  556. lmc->regs = pcim_iomap_table(pdev)[0];
  557. lmc_control = readq(lmc->regs + LMC_CONTROL);
  558. lmc_ddr_pll_ctl = readq(lmc->regs + LMC_DDR_PLL_CTL);
  559. lmc_config = readq(lmc->regs + LMC_CONFIG);
  560. if (lmc_control & LMC_CONTROL_RDIMM) {
  561. mci->mtype_cap = FIELD_GET(LMC_DDR_PLL_CTL_DDR4,
  562. lmc_ddr_pll_ctl) ?
  563. MEM_RDDR4 : MEM_RDDR3;
  564. } else {
  565. mci->mtype_cap = FIELD_GET(LMC_DDR_PLL_CTL_DDR4,
  566. lmc_ddr_pll_ctl) ?
  567. MEM_DDR4 : MEM_DDR3;
  568. }
  569. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  570. mci->edac_cap = EDAC_FLAG_SECDED;
  571. mci->mod_name = "thunderx-lmc";
  572. mci->ctl_name = "thunderx-lmc";
  573. mci->dev_name = dev_name(&pdev->dev);
  574. mci->scrub_mode = SCRUB_NONE;
  575. lmc->pdev = pdev;
  576. lmc->msix_ent.entry = 0;
  577. lmc->ring_head = 0;
  578. lmc->ring_tail = 0;
  579. ret = pci_enable_msix_exact(pdev, &lmc->msix_ent, 1);
  580. if (ret) {
  581. dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
  582. goto err_free;
  583. }
  584. ret = devm_request_threaded_irq(&pdev->dev, lmc->msix_ent.vector,
  585. thunderx_lmc_err_isr,
  586. thunderx_lmc_threaded_isr, 0,
  587. "[EDAC] ThunderX LMC", mci);
  588. if (ret) {
  589. dev_err(&pdev->dev, "Cannot set ISR: %d\n", ret);
  590. goto err_free;
  591. }
  592. lmc->node = FIELD_GET(THUNDERX_NODE, pci_resource_start(pdev, 0));
  593. lmc->xbits = thunderx_get_num_lmcs(lmc->node) >> 1;
  594. lmc->bank_width = (FIELD_GET(LMC_DDR_PLL_CTL_DDR4, lmc_ddr_pll_ctl) &&
  595. FIELD_GET(LMC_CONFIG_BG2, lmc_config)) ? 4 : 3;
  596. lmc->pbank_lsb = (lmc_config >> 5) & 0xf;
  597. lmc->dimm_lsb = 28 + lmc->pbank_lsb + lmc->xbits;
  598. lmc->rank_lsb = lmc->dimm_lsb;
  599. lmc->rank_lsb -= FIELD_GET(LMC_CONFIG_RANK_ENA, lmc_config) ? 1 : 0;
  600. lmc->bank_lsb = 7 + lmc->xbits;
  601. lmc->row_lsb = 14 + LMC_CONFIG_ROW_LSB(lmc_config) + lmc->xbits;
  602. lmc->col_hi_lsb = lmc->bank_lsb + lmc->bank_width;
  603. lmc->xor_bank = lmc_control & LMC_CONTROL_XOR_BANK;
  604. l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node), PAGE_SIZE);
  605. if (!l2c_ioaddr) {
  606. dev_err(&pdev->dev, "Cannot map L2C_CTL\n");
  607. ret = -ENOMEM;
  608. goto err_free;
  609. }
  610. lmc->l2c_alias = !(readq(l2c_ioaddr) & L2C_CTL_DISIDXALIAS);
  611. iounmap(l2c_ioaddr);
  612. ret = edac_mc_add_mc(mci);
  613. if (ret) {
  614. dev_err(&pdev->dev, "Cannot add the MC: %d\n", ret);
  615. goto err_free;
  616. }
  617. lmc_int = readq(lmc->regs + LMC_INT);
  618. writeq(lmc_int, lmc->regs + LMC_INT);
  619. writeq(LMC_INT_ENA_ALL, lmc->regs + LMC_INT_ENA_W1S);
  620. if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
  621. ret = thunderx_create_debugfs_nodes(mci->debugfs,
  622. lmc_dfs_ents,
  623. lmc,
  624. ARRAY_SIZE(lmc_dfs_ents));
  625. if (ret != ARRAY_SIZE(lmc_dfs_ents)) {
  626. dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
  627. ret, ret >= 0 ? " created" : "");
  628. }
  629. }
  630. return 0;
  631. err_free:
  632. pci_set_drvdata(pdev, NULL);
  633. edac_mc_free(mci);
  634. return ret;
  635. }
  636. static void thunderx_lmc_remove(struct pci_dev *pdev)
  637. {
  638. struct mem_ctl_info *mci = pci_get_drvdata(pdev);
  639. struct thunderx_lmc *lmc = mci->pvt_info;
  640. writeq(LMC_INT_ENA_ALL, lmc->regs + LMC_INT_ENA_W1C);
  641. edac_mc_del_mc(&pdev->dev);
  642. edac_mc_free(mci);
  643. }
  644. MODULE_DEVICE_TABLE(pci, thunderx_lmc_pci_tbl);
  645. static struct pci_driver thunderx_lmc_driver = {
  646. .name = "thunderx_lmc_edac",
  647. .probe = thunderx_lmc_probe,
  648. .remove = thunderx_lmc_remove,
  649. .id_table = thunderx_lmc_pci_tbl,
  650. };
  651. /*---------------------- OCX driver ---------------------------------*/
  652. #define PCI_DEVICE_ID_THUNDER_OCX 0xa013
  653. #define OCX_LINK_INTS 3
  654. #define OCX_INTS (OCX_LINK_INTS + 1)
  655. #define OCX_RX_LANES 24
  656. #define OCX_RX_LANE_STATS 15
  657. #define OCX_COM_INT 0x100
  658. #define OCX_COM_INT_W1S 0x108
  659. #define OCX_COM_INT_ENA_W1S 0x110
  660. #define OCX_COM_INT_ENA_W1C 0x118
  661. #define OCX_COM_IO_BADID BIT(54)
  662. #define OCX_COM_MEM_BADID BIT(53)
  663. #define OCX_COM_COPR_BADID BIT(52)
  664. #define OCX_COM_WIN_REQ_BADID BIT(51)
  665. #define OCX_COM_WIN_REQ_TOUT BIT(50)
  666. #define OCX_COM_RX_LANE GENMASK(23, 0)
  667. #define OCX_COM_INT_CE (OCX_COM_IO_BADID | \
  668. OCX_COM_MEM_BADID | \
  669. OCX_COM_COPR_BADID | \
  670. OCX_COM_WIN_REQ_BADID | \
  671. OCX_COM_WIN_REQ_TOUT)
  672. static const struct error_descr ocx_com_errors[] = {
  673. {
  674. .type = ERR_CORRECTED,
  675. .mask = OCX_COM_IO_BADID,
  676. .descr = "Invalid IO transaction node ID",
  677. },
  678. {
  679. .type = ERR_CORRECTED,
  680. .mask = OCX_COM_MEM_BADID,
  681. .descr = "Invalid memory transaction node ID",
  682. },
  683. {
  684. .type = ERR_CORRECTED,
  685. .mask = OCX_COM_COPR_BADID,
  686. .descr = "Invalid coprocessor transaction node ID",
  687. },
  688. {
  689. .type = ERR_CORRECTED,
  690. .mask = OCX_COM_WIN_REQ_BADID,
  691. .descr = "Invalid SLI transaction node ID",
  692. },
  693. {
  694. .type = ERR_CORRECTED,
  695. .mask = OCX_COM_WIN_REQ_TOUT,
  696. .descr = "Window/core request timeout",
  697. },
  698. {0, 0, NULL},
  699. };
  700. #define OCX_COM_LINKX_INT(x) (0x120 + (x) * 8)
  701. #define OCX_COM_LINKX_INT_W1S(x) (0x140 + (x) * 8)
  702. #define OCX_COM_LINKX_INT_ENA_W1S(x) (0x160 + (x) * 8)
  703. #define OCX_COM_LINKX_INT_ENA_W1C(x) (0x180 + (x) * 8)
  704. #define OCX_COM_LINK_BAD_WORD BIT(13)
  705. #define OCX_COM_LINK_ALIGN_FAIL BIT(12)
  706. #define OCX_COM_LINK_ALIGN_DONE BIT(11)
  707. #define OCX_COM_LINK_UP BIT(10)
  708. #define OCX_COM_LINK_STOP BIT(9)
  709. #define OCX_COM_LINK_BLK_ERR BIT(8)
  710. #define OCX_COM_LINK_REINIT BIT(7)
  711. #define OCX_COM_LINK_LNK_DATA BIT(6)
  712. #define OCX_COM_LINK_RXFIFO_DBE BIT(5)
  713. #define OCX_COM_LINK_RXFIFO_SBE BIT(4)
  714. #define OCX_COM_LINK_TXFIFO_DBE BIT(3)
  715. #define OCX_COM_LINK_TXFIFO_SBE BIT(2)
  716. #define OCX_COM_LINK_REPLAY_DBE BIT(1)
  717. #define OCX_COM_LINK_REPLAY_SBE BIT(0)
  718. static const struct error_descr ocx_com_link_errors[] = {
  719. {
  720. .type = ERR_CORRECTED,
  721. .mask = OCX_COM_LINK_REPLAY_SBE,
  722. .descr = "Replay buffer single-bit error",
  723. },
  724. {
  725. .type = ERR_CORRECTED,
  726. .mask = OCX_COM_LINK_TXFIFO_SBE,
  727. .descr = "TX FIFO single-bit error",
  728. },
  729. {
  730. .type = ERR_CORRECTED,
  731. .mask = OCX_COM_LINK_RXFIFO_SBE,
  732. .descr = "RX FIFO single-bit error",
  733. },
  734. {
  735. .type = ERR_CORRECTED,
  736. .mask = OCX_COM_LINK_BLK_ERR,
  737. .descr = "Block code error",
  738. },
  739. {
  740. .type = ERR_CORRECTED,
  741. .mask = OCX_COM_LINK_ALIGN_FAIL,
  742. .descr = "Link alignment failure",
  743. },
  744. {
  745. .type = ERR_CORRECTED,
  746. .mask = OCX_COM_LINK_BAD_WORD,
  747. .descr = "Bad code word",
  748. },
  749. {
  750. .type = ERR_UNCORRECTED,
  751. .mask = OCX_COM_LINK_REPLAY_DBE,
  752. .descr = "Replay buffer double-bit error",
  753. },
  754. {
  755. .type = ERR_UNCORRECTED,
  756. .mask = OCX_COM_LINK_TXFIFO_DBE,
  757. .descr = "TX FIFO double-bit error",
  758. },
  759. {
  760. .type = ERR_UNCORRECTED,
  761. .mask = OCX_COM_LINK_RXFIFO_DBE,
  762. .descr = "RX FIFO double-bit error",
  763. },
  764. {
  765. .type = ERR_UNCORRECTED,
  766. .mask = OCX_COM_LINK_STOP,
  767. .descr = "Link stopped",
  768. },
  769. {0, 0, NULL},
  770. };
  771. #define OCX_COM_LINK_INT_UE (OCX_COM_LINK_REPLAY_DBE | \
  772. OCX_COM_LINK_TXFIFO_DBE | \
  773. OCX_COM_LINK_RXFIFO_DBE | \
  774. OCX_COM_LINK_STOP)
  775. #define OCX_COM_LINK_INT_CE (OCX_COM_LINK_REPLAY_SBE | \
  776. OCX_COM_LINK_TXFIFO_SBE | \
  777. OCX_COM_LINK_RXFIFO_SBE | \
  778. OCX_COM_LINK_BLK_ERR | \
  779. OCX_COM_LINK_ALIGN_FAIL | \
  780. OCX_COM_LINK_BAD_WORD)
  781. #define OCX_LNE_INT(x) (0x8018 + (x) * 0x100)
  782. #define OCX_LNE_INT_EN(x) (0x8020 + (x) * 0x100)
  783. #define OCX_LNE_BAD_CNT(x) (0x8028 + (x) * 0x100)
  784. #define OCX_LNE_CFG(x) (0x8000 + (x) * 0x100)
  785. #define OCX_LNE_STAT(x, y) (0x8040 + (x) * 0x100 + (y) * 8)
  786. #define OCX_LNE_CFG_RX_BDRY_LOCK_DIS BIT(8)
  787. #define OCX_LNE_CFG_RX_STAT_WRAP_DIS BIT(2)
  788. #define OCX_LNE_CFG_RX_STAT_RDCLR BIT(1)
  789. #define OCX_LNE_CFG_RX_STAT_ENA BIT(0)
  790. #define OCX_LANE_BAD_64B67B BIT(8)
  791. #define OCX_LANE_DSKEW_FIFO_OVFL BIT(5)
  792. #define OCX_LANE_SCRM_SYNC_LOSS BIT(4)
  793. #define OCX_LANE_UKWN_CNTL_WORD BIT(3)
  794. #define OCX_LANE_CRC32_ERR BIT(2)
  795. #define OCX_LANE_BDRY_SYNC_LOSS BIT(1)
  796. #define OCX_LANE_SERDES_LOCK_LOSS BIT(0)
  797. #define OCX_COM_LANE_INT_UE (0)
  798. #define OCX_COM_LANE_INT_CE (OCX_LANE_SERDES_LOCK_LOSS | \
  799. OCX_LANE_BDRY_SYNC_LOSS | \
  800. OCX_LANE_CRC32_ERR | \
  801. OCX_LANE_UKWN_CNTL_WORD | \
  802. OCX_LANE_SCRM_SYNC_LOSS | \
  803. OCX_LANE_DSKEW_FIFO_OVFL | \
  804. OCX_LANE_BAD_64B67B)
  805. static const struct error_descr ocx_lane_errors[] = {
  806. {
  807. .type = ERR_CORRECTED,
  808. .mask = OCX_LANE_SERDES_LOCK_LOSS,
  809. .descr = "RX SerDes lock lost",
  810. },
  811. {
  812. .type = ERR_CORRECTED,
  813. .mask = OCX_LANE_BDRY_SYNC_LOSS,
  814. .descr = "RX word boundary lost",
  815. },
  816. {
  817. .type = ERR_CORRECTED,
  818. .mask = OCX_LANE_CRC32_ERR,
  819. .descr = "CRC32 error",
  820. },
  821. {
  822. .type = ERR_CORRECTED,
  823. .mask = OCX_LANE_UKWN_CNTL_WORD,
  824. .descr = "Unknown control word",
  825. },
  826. {
  827. .type = ERR_CORRECTED,
  828. .mask = OCX_LANE_SCRM_SYNC_LOSS,
  829. .descr = "Scrambler synchronization lost",
  830. },
  831. {
  832. .type = ERR_CORRECTED,
  833. .mask = OCX_LANE_DSKEW_FIFO_OVFL,
  834. .descr = "RX deskew FIFO overflow",
  835. },
  836. {
  837. .type = ERR_CORRECTED,
  838. .mask = OCX_LANE_BAD_64B67B,
  839. .descr = "Bad 64B/67B codeword",
  840. },
  841. {0, 0, NULL},
  842. };
  843. #define OCX_LNE_INT_ENA_ALL (GENMASK(9, 8) | GENMASK(6, 0))
  844. #define OCX_COM_INT_ENA_ALL (GENMASK(54, 50) | GENMASK(23, 0))
  845. #define OCX_COM_LINKX_INT_ENA_ALL (GENMASK(13, 12) | \
  846. GENMASK(9, 7) | GENMASK(5, 0))
  847. #define OCX_TLKX_ECC_CTL(x) (0x10018 + (x) * 0x2000)
  848. #define OCX_RLKX_ECC_CTL(x) (0x18018 + (x) * 0x2000)
  849. struct ocx_com_err_ctx {
  850. u64 reg_com_int;
  851. u64 reg_lane_int[OCX_RX_LANES];
  852. u64 reg_lane_stat11[OCX_RX_LANES];
  853. };
  854. struct ocx_link_err_ctx {
  855. u64 reg_com_link_int;
  856. int link;
  857. };
  858. struct thunderx_ocx {
  859. void __iomem *regs;
  860. int com_link;
  861. struct pci_dev *pdev;
  862. struct edac_device_ctl_info *edac_dev;
  863. struct dentry *debugfs;
  864. struct msix_entry msix_ent[OCX_INTS];
  865. struct ocx_com_err_ctx com_err_ctx[RING_ENTRIES];
  866. struct ocx_link_err_ctx link_err_ctx[RING_ENTRIES];
  867. unsigned long com_ring_head;
  868. unsigned long com_ring_tail;
  869. unsigned long link_ring_head;
  870. unsigned long link_ring_tail;
  871. };
  872. #define OCX_MESSAGE_SIZE SZ_1K
  873. #define OCX_OTHER_SIZE (50 * ARRAY_SIZE(ocx_com_link_errors))
  874. /* This handler is threaded */
  875. static irqreturn_t thunderx_ocx_com_isr(int irq, void *irq_id)
  876. {
  877. struct msix_entry *msix = irq_id;
  878. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  879. msix_ent[msix->entry]);
  880. int lane;
  881. unsigned long head = ring_pos(ocx->com_ring_head,
  882. ARRAY_SIZE(ocx->com_err_ctx));
  883. struct ocx_com_err_ctx *ctx = &ocx->com_err_ctx[head];
  884. ctx->reg_com_int = readq(ocx->regs + OCX_COM_INT);
  885. for (lane = 0; lane < OCX_RX_LANES; lane++) {
  886. ctx->reg_lane_int[lane] =
  887. readq(ocx->regs + OCX_LNE_INT(lane));
  888. ctx->reg_lane_stat11[lane] =
  889. readq(ocx->regs + OCX_LNE_STAT(lane, 11));
  890. writeq(ctx->reg_lane_int[lane], ocx->regs + OCX_LNE_INT(lane));
  891. }
  892. writeq(ctx->reg_com_int, ocx->regs + OCX_COM_INT);
  893. ocx->com_ring_head++;
  894. return IRQ_WAKE_THREAD;
  895. }
  896. static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
  897. {
  898. struct msix_entry *msix = irq_id;
  899. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  900. msix_ent[msix->entry]);
  901. irqreturn_t ret = IRQ_NONE;
  902. unsigned long tail;
  903. struct ocx_com_err_ctx *ctx;
  904. int lane;
  905. char *msg;
  906. char *other;
  907. msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
  908. other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
  909. if (!msg || !other)
  910. goto err_free;
  911. while (CIRC_CNT(ocx->com_ring_head, ocx->com_ring_tail,
  912. ARRAY_SIZE(ocx->com_err_ctx))) {
  913. tail = ring_pos(ocx->com_ring_tail,
  914. ARRAY_SIZE(ocx->com_err_ctx));
  915. ctx = &ocx->com_err_ctx[tail];
  916. snprintf(msg, OCX_MESSAGE_SIZE, "%s: OCX_COM_INT: %016llx",
  917. ocx->edac_dev->ctl_name, ctx->reg_com_int);
  918. decode_register(other, OCX_OTHER_SIZE,
  919. ocx_com_errors, ctx->reg_com_int);
  920. strncat(msg, other, OCX_MESSAGE_SIZE);
  921. for (lane = 0; lane < OCX_RX_LANES; lane++)
  922. if (ctx->reg_com_int & BIT(lane)) {
  923. snprintf(other, OCX_OTHER_SIZE,
  924. "\n\tOCX_LNE_INT[%02d]: %016llx OCX_LNE_STAT11[%02d]: %016llx",
  925. lane, ctx->reg_lane_int[lane],
  926. lane, ctx->reg_lane_stat11[lane]);
  927. strncat(msg, other, OCX_MESSAGE_SIZE);
  928. decode_register(other, OCX_OTHER_SIZE,
  929. ocx_lane_errors,
  930. ctx->reg_lane_int[lane]);
  931. strncat(msg, other, OCX_MESSAGE_SIZE);
  932. }
  933. if (ctx->reg_com_int & OCX_COM_INT_CE)
  934. edac_device_handle_ce(ocx->edac_dev, 0, 0, msg);
  935. ocx->com_ring_tail++;
  936. }
  937. ret = IRQ_HANDLED;
  938. err_free:
  939. kfree(other);
  940. kfree(msg);
  941. return ret;
  942. }
  943. static irqreturn_t thunderx_ocx_lnk_isr(int irq, void *irq_id)
  944. {
  945. struct msix_entry *msix = irq_id;
  946. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  947. msix_ent[msix->entry]);
  948. unsigned long head = ring_pos(ocx->link_ring_head,
  949. ARRAY_SIZE(ocx->link_err_ctx));
  950. struct ocx_link_err_ctx *ctx = &ocx->link_err_ctx[head];
  951. ctx->link = msix->entry;
  952. ctx->reg_com_link_int = readq(ocx->regs + OCX_COM_LINKX_INT(ctx->link));
  953. writeq(ctx->reg_com_link_int, ocx->regs + OCX_COM_LINKX_INT(ctx->link));
  954. ocx->link_ring_head++;
  955. return IRQ_WAKE_THREAD;
  956. }
  957. static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
  958. {
  959. struct msix_entry *msix = irq_id;
  960. struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
  961. msix_ent[msix->entry]);
  962. irqreturn_t ret = IRQ_NONE;
  963. unsigned long tail;
  964. struct ocx_link_err_ctx *ctx;
  965. char *msg;
  966. char *other;
  967. msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
  968. other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
  969. if (!msg || !other)
  970. goto err_free;
  971. while (CIRC_CNT(ocx->link_ring_head, ocx->link_ring_tail,
  972. ARRAY_SIZE(ocx->link_err_ctx))) {
  973. tail = ring_pos(ocx->link_ring_head,
  974. ARRAY_SIZE(ocx->link_err_ctx));
  975. ctx = &ocx->link_err_ctx[tail];
  976. snprintf(msg, OCX_MESSAGE_SIZE,
  977. "%s: OCX_COM_LINK_INT[%d]: %016llx",
  978. ocx->edac_dev->ctl_name,
  979. ctx->link, ctx->reg_com_link_int);
  980. decode_register(other, OCX_OTHER_SIZE,
  981. ocx_com_link_errors, ctx->reg_com_link_int);
  982. strncat(msg, other, OCX_MESSAGE_SIZE);
  983. if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
  984. edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
  985. else if (ctx->reg_com_link_int & OCX_COM_LINK_INT_CE)
  986. edac_device_handle_ce(ocx->edac_dev, 0, 0, msg);
  987. ocx->link_ring_tail++;
  988. }
  989. ret = IRQ_HANDLED;
  990. err_free:
  991. kfree(other);
  992. kfree(msg);
  993. return ret;
  994. }
  995. #define OCX_DEBUGFS_ATTR(_name, _reg) DEBUGFS_REG_ATTR(ocx, _name, _reg)
  996. OCX_DEBUGFS_ATTR(tlk0_ecc_ctl, OCX_TLKX_ECC_CTL(0));
  997. OCX_DEBUGFS_ATTR(tlk1_ecc_ctl, OCX_TLKX_ECC_CTL(1));
  998. OCX_DEBUGFS_ATTR(tlk2_ecc_ctl, OCX_TLKX_ECC_CTL(2));
  999. OCX_DEBUGFS_ATTR(rlk0_ecc_ctl, OCX_RLKX_ECC_CTL(0));
  1000. OCX_DEBUGFS_ATTR(rlk1_ecc_ctl, OCX_RLKX_ECC_CTL(1));
  1001. OCX_DEBUGFS_ATTR(rlk2_ecc_ctl, OCX_RLKX_ECC_CTL(2));
  1002. OCX_DEBUGFS_ATTR(com_link0_int, OCX_COM_LINKX_INT_W1S(0));
  1003. OCX_DEBUGFS_ATTR(com_link1_int, OCX_COM_LINKX_INT_W1S(1));
  1004. OCX_DEBUGFS_ATTR(com_link2_int, OCX_COM_LINKX_INT_W1S(2));
  1005. OCX_DEBUGFS_ATTR(lne00_badcnt, OCX_LNE_BAD_CNT(0));
  1006. OCX_DEBUGFS_ATTR(lne01_badcnt, OCX_LNE_BAD_CNT(1));
  1007. OCX_DEBUGFS_ATTR(lne02_badcnt, OCX_LNE_BAD_CNT(2));
  1008. OCX_DEBUGFS_ATTR(lne03_badcnt, OCX_LNE_BAD_CNT(3));
  1009. OCX_DEBUGFS_ATTR(lne04_badcnt, OCX_LNE_BAD_CNT(4));
  1010. OCX_DEBUGFS_ATTR(lne05_badcnt, OCX_LNE_BAD_CNT(5));
  1011. OCX_DEBUGFS_ATTR(lne06_badcnt, OCX_LNE_BAD_CNT(6));
  1012. OCX_DEBUGFS_ATTR(lne07_badcnt, OCX_LNE_BAD_CNT(7));
  1013. OCX_DEBUGFS_ATTR(lne08_badcnt, OCX_LNE_BAD_CNT(8));
  1014. OCX_DEBUGFS_ATTR(lne09_badcnt, OCX_LNE_BAD_CNT(9));
  1015. OCX_DEBUGFS_ATTR(lne10_badcnt, OCX_LNE_BAD_CNT(10));
  1016. OCX_DEBUGFS_ATTR(lne11_badcnt, OCX_LNE_BAD_CNT(11));
  1017. OCX_DEBUGFS_ATTR(lne12_badcnt, OCX_LNE_BAD_CNT(12));
  1018. OCX_DEBUGFS_ATTR(lne13_badcnt, OCX_LNE_BAD_CNT(13));
  1019. OCX_DEBUGFS_ATTR(lne14_badcnt, OCX_LNE_BAD_CNT(14));
  1020. OCX_DEBUGFS_ATTR(lne15_badcnt, OCX_LNE_BAD_CNT(15));
  1021. OCX_DEBUGFS_ATTR(lne16_badcnt, OCX_LNE_BAD_CNT(16));
  1022. OCX_DEBUGFS_ATTR(lne17_badcnt, OCX_LNE_BAD_CNT(17));
  1023. OCX_DEBUGFS_ATTR(lne18_badcnt, OCX_LNE_BAD_CNT(18));
  1024. OCX_DEBUGFS_ATTR(lne19_badcnt, OCX_LNE_BAD_CNT(19));
  1025. OCX_DEBUGFS_ATTR(lne20_badcnt, OCX_LNE_BAD_CNT(20));
  1026. OCX_DEBUGFS_ATTR(lne21_badcnt, OCX_LNE_BAD_CNT(21));
  1027. OCX_DEBUGFS_ATTR(lne22_badcnt, OCX_LNE_BAD_CNT(22));
  1028. OCX_DEBUGFS_ATTR(lne23_badcnt, OCX_LNE_BAD_CNT(23));
  1029. OCX_DEBUGFS_ATTR(com_int, OCX_COM_INT_W1S);
  1030. struct debugfs_entry *ocx_dfs_ents[] = {
  1031. &debugfs_tlk0_ecc_ctl,
  1032. &debugfs_tlk1_ecc_ctl,
  1033. &debugfs_tlk2_ecc_ctl,
  1034. &debugfs_rlk0_ecc_ctl,
  1035. &debugfs_rlk1_ecc_ctl,
  1036. &debugfs_rlk2_ecc_ctl,
  1037. &debugfs_com_link0_int,
  1038. &debugfs_com_link1_int,
  1039. &debugfs_com_link2_int,
  1040. &debugfs_lne00_badcnt,
  1041. &debugfs_lne01_badcnt,
  1042. &debugfs_lne02_badcnt,
  1043. &debugfs_lne03_badcnt,
  1044. &debugfs_lne04_badcnt,
  1045. &debugfs_lne05_badcnt,
  1046. &debugfs_lne06_badcnt,
  1047. &debugfs_lne07_badcnt,
  1048. &debugfs_lne08_badcnt,
  1049. &debugfs_lne09_badcnt,
  1050. &debugfs_lne10_badcnt,
  1051. &debugfs_lne11_badcnt,
  1052. &debugfs_lne12_badcnt,
  1053. &debugfs_lne13_badcnt,
  1054. &debugfs_lne14_badcnt,
  1055. &debugfs_lne15_badcnt,
  1056. &debugfs_lne16_badcnt,
  1057. &debugfs_lne17_badcnt,
  1058. &debugfs_lne18_badcnt,
  1059. &debugfs_lne19_badcnt,
  1060. &debugfs_lne20_badcnt,
  1061. &debugfs_lne21_badcnt,
  1062. &debugfs_lne22_badcnt,
  1063. &debugfs_lne23_badcnt,
  1064. &debugfs_com_int,
  1065. };
  1066. static const struct pci_device_id thunderx_ocx_pci_tbl[] = {
  1067. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_OCX) },
  1068. { 0, },
  1069. };
  1070. static void thunderx_ocx_clearstats(struct thunderx_ocx *ocx)
  1071. {
  1072. int lane, stat, cfg;
  1073. for (lane = 0; lane < OCX_RX_LANES; lane++) {
  1074. cfg = readq(ocx->regs + OCX_LNE_CFG(lane));
  1075. cfg |= OCX_LNE_CFG_RX_STAT_RDCLR;
  1076. cfg &= ~OCX_LNE_CFG_RX_STAT_ENA;
  1077. writeq(cfg, ocx->regs + OCX_LNE_CFG(lane));
  1078. for (stat = 0; stat < OCX_RX_LANE_STATS; stat++)
  1079. readq(ocx->regs + OCX_LNE_STAT(lane, stat));
  1080. }
  1081. }
  1082. static int thunderx_ocx_probe(struct pci_dev *pdev,
  1083. const struct pci_device_id *id)
  1084. {
  1085. struct thunderx_ocx *ocx;
  1086. struct edac_device_ctl_info *edac_dev;
  1087. char name[32];
  1088. int idx;
  1089. int i;
  1090. int ret;
  1091. u64 reg;
  1092. ret = pcim_enable_device(pdev);
  1093. if (ret) {
  1094. dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
  1095. return ret;
  1096. }
  1097. ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_ocx");
  1098. if (ret) {
  1099. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  1100. return ret;
  1101. }
  1102. idx = edac_device_alloc_index();
  1103. snprintf(name, sizeof(name), "OCX%d", idx);
  1104. edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_ocx),
  1105. name, 1, "CCPI", 1,
  1106. 0, NULL, 0, idx);
  1107. if (!edac_dev) {
  1108. dev_err(&pdev->dev, "Cannot allocate EDAC device: %d\n", ret);
  1109. return -ENOMEM;
  1110. }
  1111. ocx = edac_dev->pvt_info;
  1112. ocx->edac_dev = edac_dev;
  1113. ocx->com_ring_head = 0;
  1114. ocx->com_ring_tail = 0;
  1115. ocx->link_ring_head = 0;
  1116. ocx->link_ring_tail = 0;
  1117. ocx->regs = pcim_iomap_table(pdev)[0];
  1118. if (!ocx->regs) {
  1119. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  1120. ret = -ENODEV;
  1121. goto err_free;
  1122. }
  1123. ocx->pdev = pdev;
  1124. for (i = 0; i < OCX_INTS; i++) {
  1125. ocx->msix_ent[i].entry = i;
  1126. ocx->msix_ent[i].vector = 0;
  1127. }
  1128. ret = pci_enable_msix_exact(pdev, ocx->msix_ent, OCX_INTS);
  1129. if (ret) {
  1130. dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
  1131. goto err_free;
  1132. }
  1133. for (i = 0; i < OCX_INTS; i++) {
  1134. ret = devm_request_threaded_irq(&pdev->dev,
  1135. ocx->msix_ent[i].vector,
  1136. (i == 3) ?
  1137. thunderx_ocx_com_isr :
  1138. thunderx_ocx_lnk_isr,
  1139. (i == 3) ?
  1140. thunderx_ocx_com_threaded_isr :
  1141. thunderx_ocx_lnk_threaded_isr,
  1142. 0, "[EDAC] ThunderX OCX",
  1143. &ocx->msix_ent[i]);
  1144. if (ret)
  1145. goto err_free;
  1146. }
  1147. edac_dev->dev = &pdev->dev;
  1148. edac_dev->dev_name = dev_name(&pdev->dev);
  1149. edac_dev->mod_name = "thunderx-ocx";
  1150. edac_dev->ctl_name = "thunderx-ocx";
  1151. ret = edac_device_add_device(edac_dev);
  1152. if (ret) {
  1153. dev_err(&pdev->dev, "Cannot add EDAC device: %d\n", ret);
  1154. goto err_free;
  1155. }
  1156. if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
  1157. ocx->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
  1158. ret = thunderx_create_debugfs_nodes(ocx->debugfs,
  1159. ocx_dfs_ents,
  1160. ocx,
  1161. ARRAY_SIZE(ocx_dfs_ents));
  1162. if (ret != ARRAY_SIZE(ocx_dfs_ents)) {
  1163. dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
  1164. ret, ret >= 0 ? " created" : "");
  1165. }
  1166. }
  1167. pci_set_drvdata(pdev, edac_dev);
  1168. thunderx_ocx_clearstats(ocx);
  1169. for (i = 0; i < OCX_RX_LANES; i++) {
  1170. writeq(OCX_LNE_INT_ENA_ALL,
  1171. ocx->regs + OCX_LNE_INT_EN(i));
  1172. reg = readq(ocx->regs + OCX_LNE_INT(i));
  1173. writeq(reg, ocx->regs + OCX_LNE_INT(i));
  1174. }
  1175. for (i = 0; i < OCX_LINK_INTS; i++) {
  1176. reg = readq(ocx->regs + OCX_COM_LINKX_INT(i));
  1177. writeq(reg, ocx->regs + OCX_COM_LINKX_INT(i));
  1178. writeq(OCX_COM_LINKX_INT_ENA_ALL,
  1179. ocx->regs + OCX_COM_LINKX_INT_ENA_W1S(i));
  1180. }
  1181. reg = readq(ocx->regs + OCX_COM_INT);
  1182. writeq(reg, ocx->regs + OCX_COM_INT);
  1183. writeq(OCX_COM_INT_ENA_ALL, ocx->regs + OCX_COM_INT_ENA_W1S);
  1184. return 0;
  1185. err_free:
  1186. edac_device_free_ctl_info(edac_dev);
  1187. return ret;
  1188. }
  1189. static void thunderx_ocx_remove(struct pci_dev *pdev)
  1190. {
  1191. struct edac_device_ctl_info *edac_dev = pci_get_drvdata(pdev);
  1192. struct thunderx_ocx *ocx = edac_dev->pvt_info;
  1193. int i;
  1194. writeq(OCX_COM_INT_ENA_ALL, ocx->regs + OCX_COM_INT_ENA_W1C);
  1195. for (i = 0; i < OCX_INTS; i++) {
  1196. writeq(OCX_COM_LINKX_INT_ENA_ALL,
  1197. ocx->regs + OCX_COM_LINKX_INT_ENA_W1C(i));
  1198. }
  1199. edac_debugfs_remove_recursive(ocx->debugfs);
  1200. edac_device_del_device(&pdev->dev);
  1201. edac_device_free_ctl_info(edac_dev);
  1202. }
  1203. MODULE_DEVICE_TABLE(pci, thunderx_ocx_pci_tbl);
  1204. static struct pci_driver thunderx_ocx_driver = {
  1205. .name = "thunderx_ocx_edac",
  1206. .probe = thunderx_ocx_probe,
  1207. .remove = thunderx_ocx_remove,
  1208. .id_table = thunderx_ocx_pci_tbl,
  1209. };
  1210. /*---------------------- L2C driver ---------------------------------*/
  1211. #define PCI_DEVICE_ID_THUNDER_L2C_TAD 0xa02e
  1212. #define PCI_DEVICE_ID_THUNDER_L2C_CBC 0xa02f
  1213. #define PCI_DEVICE_ID_THUNDER_L2C_MCI 0xa030
  1214. #define L2C_TAD_INT_W1C 0x40000
  1215. #define L2C_TAD_INT_W1S 0x40008
  1216. #define L2C_TAD_INT_ENA_W1C 0x40020
  1217. #define L2C_TAD_INT_ENA_W1S 0x40028
  1218. #define L2C_TAD_INT_L2DDBE BIT(1)
  1219. #define L2C_TAD_INT_SBFSBE BIT(2)
  1220. #define L2C_TAD_INT_SBFDBE BIT(3)
  1221. #define L2C_TAD_INT_FBFSBE BIT(4)
  1222. #define L2C_TAD_INT_FBFDBE BIT(5)
  1223. #define L2C_TAD_INT_TAGDBE BIT(9)
  1224. #define L2C_TAD_INT_RDDISLMC BIT(15)
  1225. #define L2C_TAD_INT_WRDISLMC BIT(16)
  1226. #define L2C_TAD_INT_LFBTO BIT(17)
  1227. #define L2C_TAD_INT_GSYNCTO BIT(18)
  1228. #define L2C_TAD_INT_RTGSBE BIT(32)
  1229. #define L2C_TAD_INT_RTGDBE BIT(33)
  1230. #define L2C_TAD_INT_RDDISOCI BIT(34)
  1231. #define L2C_TAD_INT_WRDISOCI BIT(35)
  1232. #define L2C_TAD_INT_ECC (L2C_TAD_INT_L2DDBE | \
  1233. L2C_TAD_INT_SBFSBE | L2C_TAD_INT_SBFDBE | \
  1234. L2C_TAD_INT_FBFSBE | L2C_TAD_INT_FBFDBE)
  1235. #define L2C_TAD_INT_CE (L2C_TAD_INT_SBFSBE | \
  1236. L2C_TAD_INT_FBFSBE)
  1237. #define L2C_TAD_INT_UE (L2C_TAD_INT_L2DDBE | \
  1238. L2C_TAD_INT_SBFDBE | \
  1239. L2C_TAD_INT_FBFDBE | \
  1240. L2C_TAD_INT_TAGDBE | \
  1241. L2C_TAD_INT_RTGDBE | \
  1242. L2C_TAD_INT_WRDISOCI | \
  1243. L2C_TAD_INT_RDDISOCI | \
  1244. L2C_TAD_INT_WRDISLMC | \
  1245. L2C_TAD_INT_RDDISLMC | \
  1246. L2C_TAD_INT_LFBTO | \
  1247. L2C_TAD_INT_GSYNCTO)
  1248. static const struct error_descr l2_tad_errors[] = {
  1249. {
  1250. .type = ERR_CORRECTED,
  1251. .mask = L2C_TAD_INT_SBFSBE,
  1252. .descr = "SBF single-bit error",
  1253. },
  1254. {
  1255. .type = ERR_CORRECTED,
  1256. .mask = L2C_TAD_INT_FBFSBE,
  1257. .descr = "FBF single-bit error",
  1258. },
  1259. {
  1260. .type = ERR_UNCORRECTED,
  1261. .mask = L2C_TAD_INT_L2DDBE,
  1262. .descr = "L2D double-bit error",
  1263. },
  1264. {
  1265. .type = ERR_UNCORRECTED,
  1266. .mask = L2C_TAD_INT_SBFDBE,
  1267. .descr = "SBF double-bit error",
  1268. },
  1269. {
  1270. .type = ERR_UNCORRECTED,
  1271. .mask = L2C_TAD_INT_FBFDBE,
  1272. .descr = "FBF double-bit error",
  1273. },
  1274. {
  1275. .type = ERR_UNCORRECTED,
  1276. .mask = L2C_TAD_INT_TAGDBE,
  1277. .descr = "TAG double-bit error",
  1278. },
  1279. {
  1280. .type = ERR_UNCORRECTED,
  1281. .mask = L2C_TAD_INT_RTGDBE,
  1282. .descr = "RTG double-bit error",
  1283. },
  1284. {
  1285. .type = ERR_UNCORRECTED,
  1286. .mask = L2C_TAD_INT_WRDISOCI,
  1287. .descr = "Write to a disabled CCPI",
  1288. },
  1289. {
  1290. .type = ERR_UNCORRECTED,
  1291. .mask = L2C_TAD_INT_RDDISOCI,
  1292. .descr = "Read from a disabled CCPI",
  1293. },
  1294. {
  1295. .type = ERR_UNCORRECTED,
  1296. .mask = L2C_TAD_INT_WRDISLMC,
  1297. .descr = "Write to a disabled LMC",
  1298. },
  1299. {
  1300. .type = ERR_UNCORRECTED,
  1301. .mask = L2C_TAD_INT_RDDISLMC,
  1302. .descr = "Read from a disabled LMC",
  1303. },
  1304. {
  1305. .type = ERR_UNCORRECTED,
  1306. .mask = L2C_TAD_INT_LFBTO,
  1307. .descr = "LFB entry timeout",
  1308. },
  1309. {
  1310. .type = ERR_UNCORRECTED,
  1311. .mask = L2C_TAD_INT_GSYNCTO,
  1312. .descr = "Global sync CCPI timeout",
  1313. },
  1314. {0, 0, NULL},
  1315. };
  1316. #define L2C_TAD_INT_TAG (L2C_TAD_INT_TAGDBE)
  1317. #define L2C_TAD_INT_RTG (L2C_TAD_INT_RTGDBE)
  1318. #define L2C_TAD_INT_DISLMC (L2C_TAD_INT_WRDISLMC | L2C_TAD_INT_RDDISLMC)
  1319. #define L2C_TAD_INT_DISOCI (L2C_TAD_INT_WRDISOCI | L2C_TAD_INT_RDDISOCI)
  1320. #define L2C_TAD_INT_ENA_ALL (L2C_TAD_INT_ECC | L2C_TAD_INT_TAG | \
  1321. L2C_TAD_INT_RTG | \
  1322. L2C_TAD_INT_DISLMC | L2C_TAD_INT_DISOCI | \
  1323. L2C_TAD_INT_LFBTO)
  1324. #define L2C_TAD_TIMETWO 0x50000
  1325. #define L2C_TAD_TIMEOUT 0x50100
  1326. #define L2C_TAD_ERR 0x60000
  1327. #define L2C_TAD_TQD_ERR 0x60100
  1328. #define L2C_TAD_TTG_ERR 0x60200
  1329. #define L2C_CBC_INT_W1C 0x60000
  1330. #define L2C_CBC_INT_RSDSBE BIT(0)
  1331. #define L2C_CBC_INT_RSDDBE BIT(1)
  1332. #define L2C_CBC_INT_RSD (L2C_CBC_INT_RSDSBE | L2C_CBC_INT_RSDDBE)
  1333. #define L2C_CBC_INT_MIBSBE BIT(4)
  1334. #define L2C_CBC_INT_MIBDBE BIT(5)
  1335. #define L2C_CBC_INT_MIB (L2C_CBC_INT_MIBSBE | L2C_CBC_INT_MIBDBE)
  1336. #define L2C_CBC_INT_IORDDISOCI BIT(6)
  1337. #define L2C_CBC_INT_IOWRDISOCI BIT(7)
  1338. #define L2C_CBC_INT_IODISOCI (L2C_CBC_INT_IORDDISOCI | \
  1339. L2C_CBC_INT_IOWRDISOCI)
  1340. #define L2C_CBC_INT_CE (L2C_CBC_INT_RSDSBE | L2C_CBC_INT_MIBSBE)
  1341. #define L2C_CBC_INT_UE (L2C_CBC_INT_RSDDBE | L2C_CBC_INT_MIBDBE)
  1342. static const struct error_descr l2_cbc_errors[] = {
  1343. {
  1344. .type = ERR_CORRECTED,
  1345. .mask = L2C_CBC_INT_RSDSBE,
  1346. .descr = "RSD single-bit error",
  1347. },
  1348. {
  1349. .type = ERR_CORRECTED,
  1350. .mask = L2C_CBC_INT_MIBSBE,
  1351. .descr = "MIB single-bit error",
  1352. },
  1353. {
  1354. .type = ERR_UNCORRECTED,
  1355. .mask = L2C_CBC_INT_RSDDBE,
  1356. .descr = "RSD double-bit error",
  1357. },
  1358. {
  1359. .type = ERR_UNCORRECTED,
  1360. .mask = L2C_CBC_INT_MIBDBE,
  1361. .descr = "MIB double-bit error",
  1362. },
  1363. {
  1364. .type = ERR_UNCORRECTED,
  1365. .mask = L2C_CBC_INT_IORDDISOCI,
  1366. .descr = "Read from a disabled CCPI",
  1367. },
  1368. {
  1369. .type = ERR_UNCORRECTED,
  1370. .mask = L2C_CBC_INT_IOWRDISOCI,
  1371. .descr = "Write to a disabled CCPI",
  1372. },
  1373. {0, 0, NULL},
  1374. };
  1375. #define L2C_CBC_INT_W1S 0x60008
  1376. #define L2C_CBC_INT_ENA_W1C 0x60020
  1377. #define L2C_CBC_INT_ENA_ALL (L2C_CBC_INT_RSD | L2C_CBC_INT_MIB | \
  1378. L2C_CBC_INT_IODISOCI)
  1379. #define L2C_CBC_INT_ENA_W1S 0x60028
  1380. #define L2C_CBC_IODISOCIERR 0x80008
  1381. #define L2C_CBC_IOCERR 0x80010
  1382. #define L2C_CBC_RSDERR 0x80018
  1383. #define L2C_CBC_MIBERR 0x80020
  1384. #define L2C_MCI_INT_W1C 0x0
  1385. #define L2C_MCI_INT_VBFSBE BIT(0)
  1386. #define L2C_MCI_INT_VBFDBE BIT(1)
  1387. static const struct error_descr l2_mci_errors[] = {
  1388. {
  1389. .type = ERR_CORRECTED,
  1390. .mask = L2C_MCI_INT_VBFSBE,
  1391. .descr = "VBF single-bit error",
  1392. },
  1393. {
  1394. .type = ERR_UNCORRECTED,
  1395. .mask = L2C_MCI_INT_VBFDBE,
  1396. .descr = "VBF double-bit error",
  1397. },
  1398. {0, 0, NULL},
  1399. };
  1400. #define L2C_MCI_INT_W1S 0x8
  1401. #define L2C_MCI_INT_ENA_W1C 0x20
  1402. #define L2C_MCI_INT_ENA_ALL (L2C_MCI_INT_VBFSBE | L2C_MCI_INT_VBFDBE)
  1403. #define L2C_MCI_INT_ENA_W1S 0x28
  1404. #define L2C_MCI_ERR 0x10000
  1405. #define L2C_MESSAGE_SIZE SZ_1K
  1406. #define L2C_OTHER_SIZE (50 * ARRAY_SIZE(l2_tad_errors))
  1407. struct l2c_err_ctx {
  1408. char *reg_ext_name;
  1409. u64 reg_int;
  1410. u64 reg_ext;
  1411. };
  1412. struct thunderx_l2c {
  1413. void __iomem *regs;
  1414. struct pci_dev *pdev;
  1415. struct edac_device_ctl_info *edac_dev;
  1416. struct dentry *debugfs;
  1417. int index;
  1418. struct msix_entry msix_ent;
  1419. struct l2c_err_ctx err_ctx[RING_ENTRIES];
  1420. unsigned long ring_head;
  1421. unsigned long ring_tail;
  1422. };
  1423. static irqreturn_t thunderx_l2c_tad_isr(int irq, void *irq_id)
  1424. {
  1425. struct msix_entry *msix = irq_id;
  1426. struct thunderx_l2c *tad = container_of(msix, struct thunderx_l2c,
  1427. msix_ent);
  1428. unsigned long head = ring_pos(tad->ring_head, ARRAY_SIZE(tad->err_ctx));
  1429. struct l2c_err_ctx *ctx = &tad->err_ctx[head];
  1430. ctx->reg_int = readq(tad->regs + L2C_TAD_INT_W1C);
  1431. if (ctx->reg_int & L2C_TAD_INT_ECC) {
  1432. ctx->reg_ext_name = "TQD_ERR";
  1433. ctx->reg_ext = readq(tad->regs + L2C_TAD_TQD_ERR);
  1434. } else if (ctx->reg_int & L2C_TAD_INT_TAG) {
  1435. ctx->reg_ext_name = "TTG_ERR";
  1436. ctx->reg_ext = readq(tad->regs + L2C_TAD_TTG_ERR);
  1437. } else if (ctx->reg_int & L2C_TAD_INT_LFBTO) {
  1438. ctx->reg_ext_name = "TIMEOUT";
  1439. ctx->reg_ext = readq(tad->regs + L2C_TAD_TIMEOUT);
  1440. } else if (ctx->reg_int & L2C_TAD_INT_DISOCI) {
  1441. ctx->reg_ext_name = "ERR";
  1442. ctx->reg_ext = readq(tad->regs + L2C_TAD_ERR);
  1443. }
  1444. writeq(ctx->reg_int, tad->regs + L2C_TAD_INT_W1C);
  1445. tad->ring_head++;
  1446. return IRQ_WAKE_THREAD;
  1447. }
  1448. static irqreturn_t thunderx_l2c_cbc_isr(int irq, void *irq_id)
  1449. {
  1450. struct msix_entry *msix = irq_id;
  1451. struct thunderx_l2c *cbc = container_of(msix, struct thunderx_l2c,
  1452. msix_ent);
  1453. unsigned long head = ring_pos(cbc->ring_head, ARRAY_SIZE(cbc->err_ctx));
  1454. struct l2c_err_ctx *ctx = &cbc->err_ctx[head];
  1455. ctx->reg_int = readq(cbc->regs + L2C_CBC_INT_W1C);
  1456. if (ctx->reg_int & L2C_CBC_INT_RSD) {
  1457. ctx->reg_ext_name = "RSDERR";
  1458. ctx->reg_ext = readq(cbc->regs + L2C_CBC_RSDERR);
  1459. } else if (ctx->reg_int & L2C_CBC_INT_MIB) {
  1460. ctx->reg_ext_name = "MIBERR";
  1461. ctx->reg_ext = readq(cbc->regs + L2C_CBC_MIBERR);
  1462. } else if (ctx->reg_int & L2C_CBC_INT_IODISOCI) {
  1463. ctx->reg_ext_name = "IODISOCIERR";
  1464. ctx->reg_ext = readq(cbc->regs + L2C_CBC_IODISOCIERR);
  1465. }
  1466. writeq(ctx->reg_int, cbc->regs + L2C_CBC_INT_W1C);
  1467. cbc->ring_head++;
  1468. return IRQ_WAKE_THREAD;
  1469. }
  1470. static irqreturn_t thunderx_l2c_mci_isr(int irq, void *irq_id)
  1471. {
  1472. struct msix_entry *msix = irq_id;
  1473. struct thunderx_l2c *mci = container_of(msix, struct thunderx_l2c,
  1474. msix_ent);
  1475. unsigned long head = ring_pos(mci->ring_head, ARRAY_SIZE(mci->err_ctx));
  1476. struct l2c_err_ctx *ctx = &mci->err_ctx[head];
  1477. ctx->reg_int = readq(mci->regs + L2C_MCI_INT_W1C);
  1478. ctx->reg_ext = readq(mci->regs + L2C_MCI_ERR);
  1479. writeq(ctx->reg_int, mci->regs + L2C_MCI_INT_W1C);
  1480. ctx->reg_ext_name = "ERR";
  1481. mci->ring_head++;
  1482. return IRQ_WAKE_THREAD;
  1483. }
  1484. static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
  1485. {
  1486. struct msix_entry *msix = irq_id;
  1487. struct thunderx_l2c *l2c = container_of(msix, struct thunderx_l2c,
  1488. msix_ent);
  1489. unsigned long tail = ring_pos(l2c->ring_tail, ARRAY_SIZE(l2c->err_ctx));
  1490. struct l2c_err_ctx *ctx = &l2c->err_ctx[tail];
  1491. irqreturn_t ret = IRQ_NONE;
  1492. u64 mask_ue, mask_ce;
  1493. const struct error_descr *l2_errors;
  1494. char *reg_int_name;
  1495. char *msg;
  1496. char *other;
  1497. msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
  1498. other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
  1499. if (!msg || !other)
  1500. goto err_free;
  1501. switch (l2c->pdev->device) {
  1502. case PCI_DEVICE_ID_THUNDER_L2C_TAD:
  1503. reg_int_name = "L2C_TAD_INT";
  1504. mask_ue = L2C_TAD_INT_UE;
  1505. mask_ce = L2C_TAD_INT_CE;
  1506. l2_errors = l2_tad_errors;
  1507. break;
  1508. case PCI_DEVICE_ID_THUNDER_L2C_CBC:
  1509. reg_int_name = "L2C_CBC_INT";
  1510. mask_ue = L2C_CBC_INT_UE;
  1511. mask_ce = L2C_CBC_INT_CE;
  1512. l2_errors = l2_cbc_errors;
  1513. break;
  1514. case PCI_DEVICE_ID_THUNDER_L2C_MCI:
  1515. reg_int_name = "L2C_MCI_INT";
  1516. mask_ue = L2C_MCI_INT_VBFDBE;
  1517. mask_ce = L2C_MCI_INT_VBFSBE;
  1518. l2_errors = l2_mci_errors;
  1519. break;
  1520. default:
  1521. dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n",
  1522. l2c->pdev->device);
  1523. return IRQ_NONE;
  1524. }
  1525. while (CIRC_CNT(l2c->ring_head, l2c->ring_tail,
  1526. ARRAY_SIZE(l2c->err_ctx))) {
  1527. snprintf(msg, L2C_MESSAGE_SIZE,
  1528. "%s: %s: %016llx, %s: %016llx",
  1529. l2c->edac_dev->ctl_name, reg_int_name, ctx->reg_int,
  1530. ctx->reg_ext_name, ctx->reg_ext);
  1531. decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
  1532. strncat(msg, other, L2C_MESSAGE_SIZE);
  1533. if (ctx->reg_int & mask_ue)
  1534. edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
  1535. else if (ctx->reg_int & mask_ce)
  1536. edac_device_handle_ce(l2c->edac_dev, 0, 0, msg);
  1537. l2c->ring_tail++;
  1538. }
  1539. return IRQ_HANDLED;
  1540. err_free:
  1541. kfree(other);
  1542. kfree(msg);
  1543. return ret;
  1544. }
  1545. #define L2C_DEBUGFS_ATTR(_name, _reg) DEBUGFS_REG_ATTR(l2c, _name, _reg)
  1546. L2C_DEBUGFS_ATTR(tad_int, L2C_TAD_INT_W1S);
  1547. struct debugfs_entry *l2c_tad_dfs_ents[] = {
  1548. &debugfs_tad_int,
  1549. };
  1550. L2C_DEBUGFS_ATTR(cbc_int, L2C_CBC_INT_W1S);
  1551. struct debugfs_entry *l2c_cbc_dfs_ents[] = {
  1552. &debugfs_cbc_int,
  1553. };
  1554. L2C_DEBUGFS_ATTR(mci_int, L2C_MCI_INT_W1S);
  1555. struct debugfs_entry *l2c_mci_dfs_ents[] = {
  1556. &debugfs_mci_int,
  1557. };
  1558. static const struct pci_device_id thunderx_l2c_pci_tbl[] = {
  1559. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_TAD), },
  1560. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_CBC), },
  1561. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_MCI), },
  1562. { 0, },
  1563. };
  1564. static int thunderx_l2c_probe(struct pci_dev *pdev,
  1565. const struct pci_device_id *id)
  1566. {
  1567. struct thunderx_l2c *l2c;
  1568. struct edac_device_ctl_info *edac_dev;
  1569. struct debugfs_entry **l2c_devattr;
  1570. size_t dfs_entries;
  1571. irqreturn_t (*thunderx_l2c_isr)(int, void *) = NULL;
  1572. char name[32];
  1573. const char *fmt;
  1574. u64 reg_en_offs, reg_en_mask;
  1575. int idx;
  1576. int ret;
  1577. ret = pcim_enable_device(pdev);
  1578. if (ret) {
  1579. dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
  1580. return ret;
  1581. }
  1582. ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_l2c");
  1583. if (ret) {
  1584. dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
  1585. return ret;
  1586. }
  1587. switch (pdev->device) {
  1588. case PCI_DEVICE_ID_THUNDER_L2C_TAD:
  1589. thunderx_l2c_isr = thunderx_l2c_tad_isr;
  1590. l2c_devattr = l2c_tad_dfs_ents;
  1591. dfs_entries = ARRAY_SIZE(l2c_tad_dfs_ents);
  1592. fmt = "L2C-TAD%d";
  1593. reg_en_offs = L2C_TAD_INT_ENA_W1S;
  1594. reg_en_mask = L2C_TAD_INT_ENA_ALL;
  1595. break;
  1596. case PCI_DEVICE_ID_THUNDER_L2C_CBC:
  1597. thunderx_l2c_isr = thunderx_l2c_cbc_isr;
  1598. l2c_devattr = l2c_cbc_dfs_ents;
  1599. dfs_entries = ARRAY_SIZE(l2c_cbc_dfs_ents);
  1600. fmt = "L2C-CBC%d";
  1601. reg_en_offs = L2C_CBC_INT_ENA_W1S;
  1602. reg_en_mask = L2C_CBC_INT_ENA_ALL;
  1603. break;
  1604. case PCI_DEVICE_ID_THUNDER_L2C_MCI:
  1605. thunderx_l2c_isr = thunderx_l2c_mci_isr;
  1606. l2c_devattr = l2c_mci_dfs_ents;
  1607. dfs_entries = ARRAY_SIZE(l2c_mci_dfs_ents);
  1608. fmt = "L2C-MCI%d";
  1609. reg_en_offs = L2C_MCI_INT_ENA_W1S;
  1610. reg_en_mask = L2C_MCI_INT_ENA_ALL;
  1611. break;
  1612. default:
  1613. //Should never ever get here
  1614. dev_err(&pdev->dev, "Unsupported PCI device: %04x\n",
  1615. pdev->device);
  1616. return -EINVAL;
  1617. }
  1618. idx = edac_device_alloc_index();
  1619. snprintf(name, sizeof(name), fmt, idx);
  1620. edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_l2c),
  1621. name, 1, "L2C", 1, 0,
  1622. NULL, 0, idx);
  1623. if (!edac_dev) {
  1624. dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
  1625. return -ENOMEM;
  1626. }
  1627. l2c = edac_dev->pvt_info;
  1628. l2c->edac_dev = edac_dev;
  1629. l2c->regs = pcim_iomap_table(pdev)[0];
  1630. if (!l2c->regs) {
  1631. dev_err(&pdev->dev, "Cannot map PCI resources\n");
  1632. ret = -ENODEV;
  1633. goto err_free;
  1634. }
  1635. l2c->pdev = pdev;
  1636. l2c->ring_head = 0;
  1637. l2c->ring_tail = 0;
  1638. l2c->msix_ent.entry = 0;
  1639. l2c->msix_ent.vector = 0;
  1640. ret = pci_enable_msix_exact(pdev, &l2c->msix_ent, 1);
  1641. if (ret) {
  1642. dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
  1643. goto err_free;
  1644. }
  1645. ret = devm_request_threaded_irq(&pdev->dev, l2c->msix_ent.vector,
  1646. thunderx_l2c_isr,
  1647. thunderx_l2c_threaded_isr,
  1648. 0, "[EDAC] ThunderX L2C",
  1649. &l2c->msix_ent);
  1650. if (ret)
  1651. goto err_free;
  1652. edac_dev->dev = &pdev->dev;
  1653. edac_dev->dev_name = dev_name(&pdev->dev);
  1654. edac_dev->mod_name = "thunderx-l2c";
  1655. edac_dev->ctl_name = "thunderx-l2c";
  1656. ret = edac_device_add_device(edac_dev);
  1657. if (ret) {
  1658. dev_err(&pdev->dev, "Cannot add EDAC device: %d\n", ret);
  1659. goto err_free;
  1660. }
  1661. if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
  1662. l2c->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
  1663. ret = thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr,
  1664. l2c, dfs_entries);
  1665. if (ret != dfs_entries) {
  1666. dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
  1667. ret, ret >= 0 ? " created" : "");
  1668. }
  1669. }
  1670. pci_set_drvdata(pdev, edac_dev);
  1671. writeq(reg_en_mask, l2c->regs + reg_en_offs);
  1672. return 0;
  1673. err_free:
  1674. edac_device_free_ctl_info(edac_dev);
  1675. return ret;
  1676. }
  1677. static void thunderx_l2c_remove(struct pci_dev *pdev)
  1678. {
  1679. struct edac_device_ctl_info *edac_dev = pci_get_drvdata(pdev);
  1680. struct thunderx_l2c *l2c = edac_dev->pvt_info;
  1681. switch (pdev->device) {
  1682. case PCI_DEVICE_ID_THUNDER_L2C_TAD:
  1683. writeq(L2C_TAD_INT_ENA_ALL, l2c->regs + L2C_TAD_INT_ENA_W1C);
  1684. break;
  1685. case PCI_DEVICE_ID_THUNDER_L2C_CBC:
  1686. writeq(L2C_CBC_INT_ENA_ALL, l2c->regs + L2C_CBC_INT_ENA_W1C);
  1687. break;
  1688. case PCI_DEVICE_ID_THUNDER_L2C_MCI:
  1689. writeq(L2C_MCI_INT_ENA_ALL, l2c->regs + L2C_MCI_INT_ENA_W1C);
  1690. break;
  1691. }
  1692. edac_debugfs_remove_recursive(l2c->debugfs);
  1693. edac_device_del_device(&pdev->dev);
  1694. edac_device_free_ctl_info(edac_dev);
  1695. }
  1696. MODULE_DEVICE_TABLE(pci, thunderx_l2c_pci_tbl);
  1697. static struct pci_driver thunderx_l2c_driver = {
  1698. .name = "thunderx_l2c_edac",
  1699. .probe = thunderx_l2c_probe,
  1700. .remove = thunderx_l2c_remove,
  1701. .id_table = thunderx_l2c_pci_tbl,
  1702. };
  1703. static int __init thunderx_edac_init(void)
  1704. {
  1705. int rc = 0;
  1706. rc = pci_register_driver(&thunderx_lmc_driver);
  1707. if (rc)
  1708. return rc;
  1709. rc = pci_register_driver(&thunderx_ocx_driver);
  1710. if (rc)
  1711. goto err_lmc;
  1712. rc = pci_register_driver(&thunderx_l2c_driver);
  1713. if (rc)
  1714. goto err_ocx;
  1715. return rc;
  1716. err_ocx:
  1717. pci_unregister_driver(&thunderx_ocx_driver);
  1718. err_lmc:
  1719. pci_unregister_driver(&thunderx_lmc_driver);
  1720. return rc;
  1721. }
  1722. static void __exit thunderx_edac_exit(void)
  1723. {
  1724. pci_unregister_driver(&thunderx_l2c_driver);
  1725. pci_unregister_driver(&thunderx_ocx_driver);
  1726. pci_unregister_driver(&thunderx_lmc_driver);
  1727. }
  1728. module_init(thunderx_edac_init);
  1729. module_exit(thunderx_edac_exit);
  1730. MODULE_LICENSE("GPL v2");
  1731. MODULE_AUTHOR("Cavium, Inc.");
  1732. MODULE_DESCRIPTION("EDAC Driver for Cavium ThunderX");