nitrox_csr.h 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NITROX_CSR_H
  3. #define __NITROX_CSR_H
  4. #include <asm/byteorder.h>
  5. #include <linux/types.h>
  6. /* EMU clusters */
  7. #define NR_CLUSTERS 4
  8. /* Maximum cores per cluster,
  9. * varies based on partname
  10. */
  11. #define AE_CORES_PER_CLUSTER 20
  12. #define SE_CORES_PER_CLUSTER 16
  13. #define AE_MAX_CORES (AE_CORES_PER_CLUSTER * NR_CLUSTERS)
  14. #define SE_MAX_CORES (SE_CORES_PER_CLUSTER * NR_CLUSTERS)
  15. #define ZIP_MAX_CORES 5
  16. /* BIST registers */
  17. #define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000))
  18. #define UCD_BIST_STATUS 0x12C0070
  19. #define NPS_CORE_BIST_REG 0x10000E8
  20. #define NPS_CORE_NPC_BIST_REG 0x1000128
  21. #define NPS_PKT_SLC_BIST_REG 0x1040088
  22. #define NPS_PKT_IN_BIST_REG 0x1040100
  23. #define POM_BIST_REG 0x11C0100
  24. #define BMI_BIST_REG 0x1140080
  25. #define EFL_CORE_BIST_REGX(_i) (0x1240100 + ((_i) * 0x400))
  26. #define EFL_TOP_BIST_STAT 0x1241090
  27. #define BMO_BIST_REG 0x1180080
  28. #define LBC_BIST_STATUS 0x1200020
  29. #define PEM_BIST_STATUSX(_i) (0x1080468 | ((_i) << 18))
  30. /* EMU registers */
  31. #define EMU_SE_ENABLEX(_i) (0x1400000 + ((_i) * 0x40000))
  32. #define EMU_AE_ENABLEX(_i) (0x1400008 + ((_i) * 0x40000))
  33. #define EMU_WD_INT_ENA_W1SX(_i) (0x1402318 + ((_i) * 0x40000))
  34. #define EMU_GE_INT_ENA_W1SX(_i) (0x1402518 + ((_i) * 0x40000))
  35. #define EMU_FUSE_MAPX(_i) (0x1402708 + ((_i) * 0x40000))
  36. /* UCD registers */
  37. #define UCD_UCODE_LOAD_BLOCK_NUM 0x12C0010
  38. #define UCD_UCODE_LOAD_IDX_DATAX(_i) (0x12C0018 + ((_i) * 0x20))
  39. #define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000))
  40. /* NPS core registers */
  41. #define NPS_CORE_GBL_VFCFG 0x1000000
  42. #define NPS_CORE_CONTROL 0x1000008
  43. #define NPS_CORE_INT_ACTIVE 0x1000080
  44. #define NPS_CORE_INT 0x10000A0
  45. #define NPS_CORE_INT_ENA_W1S 0x10000B8
  46. #define NPS_STATS_PKT_DMA_RD_CNT 0x1000180
  47. #define NPS_STATS_PKT_DMA_WR_CNT 0x1000190
  48. /* NPS packet registers */
  49. #define NPS_PKT_INT 0x1040018
  50. #define NPS_PKT_IN_RERR_HI 0x1040108
  51. #define NPS_PKT_IN_RERR_HI_ENA_W1S 0x1040120
  52. #define NPS_PKT_IN_RERR_LO 0x1040128
  53. #define NPS_PKT_IN_RERR_LO_ENA_W1S 0x1040140
  54. #define NPS_PKT_IN_ERR_TYPE 0x1040148
  55. #define NPS_PKT_IN_ERR_TYPE_ENA_W1S 0x1040160
  56. #define NPS_PKT_IN_INSTR_CTLX(_i) (0x10060 + ((_i) * 0x40000))
  57. #define NPS_PKT_IN_INSTR_BADDRX(_i) (0x10068 + ((_i) * 0x40000))
  58. #define NPS_PKT_IN_INSTR_RSIZEX(_i) (0x10070 + ((_i) * 0x40000))
  59. #define NPS_PKT_IN_DONE_CNTSX(_i) (0x10080 + ((_i) * 0x40000))
  60. #define NPS_PKT_IN_INSTR_BAOFF_DBELLX(_i) (0x10078 + ((_i) * 0x40000))
  61. #define NPS_PKT_IN_INT_LEVELSX(_i) (0x10088 + ((_i) * 0x40000))
  62. #define NPS_PKT_SLC_RERR_HI 0x1040208
  63. #define NPS_PKT_SLC_RERR_HI_ENA_W1S 0x1040220
  64. #define NPS_PKT_SLC_RERR_LO 0x1040228
  65. #define NPS_PKT_SLC_RERR_LO_ENA_W1S 0x1040240
  66. #define NPS_PKT_SLC_ERR_TYPE 0x1040248
  67. #define NPS_PKT_SLC_ERR_TYPE_ENA_W1S 0x1040260
  68. #define NPS_PKT_SLC_CTLX(_i) (0x10000 + ((_i) * 0x40000))
  69. #define NPS_PKT_SLC_CNTSX(_i) (0x10008 + ((_i) * 0x40000))
  70. #define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010 + ((_i) * 0x40000))
  71. /* POM registers */
  72. #define POM_INT_ENA_W1S 0x11C0018
  73. #define POM_GRP_EXECMASKX(_i) (0x11C1100 | ((_i) * 8))
  74. #define POM_INT 0x11C0000
  75. #define POM_PERF_CTL 0x11CC400
  76. /* BMI registers */
  77. #define BMI_INT 0x1140000
  78. #define BMI_CTL 0x1140020
  79. #define BMI_INT_ENA_W1S 0x1140018
  80. #define BMI_NPS_PKT_CNT 0x1140070
  81. /* EFL registers */
  82. #define EFL_CORE_INT_ENA_W1SX(_i) (0x1240018 + ((_i) * 0x400))
  83. #define EFL_CORE_VF_ERR_INT0X(_i) (0x1240050 + ((_i) * 0x400))
  84. #define EFL_CORE_VF_ERR_INT0_ENA_W1SX(_i) (0x1240068 + ((_i) * 0x400))
  85. #define EFL_CORE_VF_ERR_INT1X(_i) (0x1240070 + ((_i) * 0x400))
  86. #define EFL_CORE_VF_ERR_INT1_ENA_W1SX(_i) (0x1240088 + ((_i) * 0x400))
  87. #define EFL_CORE_SE_ERR_INTX(_i) (0x12400A0 + ((_i) * 0x400))
  88. #define EFL_RNM_CTL_STATUS 0x1241800
  89. #define EFL_CORE_INTX(_i) (0x1240000 + ((_i) * 0x400))
  90. /* BMO registers */
  91. #define BMO_CTL2 0x1180028
  92. #define BMO_NPS_SLC_PKT_CNT 0x1180078
  93. /* LBC registers */
  94. #define LBC_INT 0x1200000
  95. #define LBC_INVAL_CTL 0x1201010
  96. #define LBC_PLM_VF1_64_INT 0x1202008
  97. #define LBC_INVAL_STATUS 0x1202010
  98. #define LBC_INT_ENA_W1S 0x1203000
  99. #define LBC_PLM_VF1_64_INT_ENA_W1S 0x1205008
  100. #define LBC_PLM_VF65_128_INT 0x1206008
  101. #define LBC_ELM_VF1_64_INT 0x1208000
  102. #define LBC_PLM_VF65_128_INT_ENA_W1S 0x1209008
  103. #define LBC_ELM_VF1_64_INT_ENA_W1S 0x120B000
  104. #define LBC_ELM_VF65_128_INT 0x120C000
  105. #define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000
  106. #define RST_BOOT 0x10C1600
  107. #define FUS_DAT1 0x10C1408
  108. /* PEM registers */
  109. #define PEM0_INT 0x1080428
  110. /**
  111. * struct emu_fuse_map - EMU Fuse Map Registers
  112. * @ae_fuse: Fuse settings for AE 19..0
  113. * @se_fuse: Fuse settings for SE 15..0
  114. *
  115. * A set bit indicates the unit is fuse disabled.
  116. */
  117. union emu_fuse_map {
  118. u64 value;
  119. struct {
  120. #if (defined(__BIG_ENDIAN_BITFIELD))
  121. u64 valid : 1;
  122. u64 raz_52_62 : 11;
  123. u64 ae_fuse : 20;
  124. u64 raz_16_31 : 16;
  125. u64 se_fuse : 16;
  126. #else
  127. u64 se_fuse : 16;
  128. u64 raz_16_31 : 16;
  129. u64 ae_fuse : 20;
  130. u64 raz_52_62 : 11;
  131. u64 valid : 1;
  132. #endif
  133. } s;
  134. };
  135. /**
  136. * struct emu_se_enable - Symmetric Engine Enable Registers
  137. * @enable: Individual enables for each of the clusters
  138. * 16 symmetric engines.
  139. */
  140. union emu_se_enable {
  141. u64 value;
  142. struct {
  143. #if (defined(__BIG_ENDIAN_BITFIELD))
  144. u64 raz : 48;
  145. u64 enable : 16;
  146. #else
  147. u64 enable : 16;
  148. u64 raz : 48;
  149. #endif
  150. } s;
  151. };
  152. /**
  153. * struct emu_ae_enable - EMU Asymmetric engines.
  154. * @enable: Individual enables for each of the cluster's
  155. * 20 Asymmetric Engines.
  156. */
  157. union emu_ae_enable {
  158. u64 value;
  159. struct {
  160. #if (defined(__BIG_ENDIAN_BITFIELD))
  161. u64 raz : 44;
  162. u64 enable : 20;
  163. #else
  164. u64 enable : 20;
  165. u64 raz : 44;
  166. #endif
  167. } s;
  168. };
  169. /**
  170. * struct emu_wd_int_ena_w1s - EMU Interrupt Enable Registers
  171. * @ae_wd: Reads or sets enable for EMU(0..3)_WD_INT[AE_WD]
  172. * @se_wd: Reads or sets enable for EMU(0..3)_WD_INT[SE_WD]
  173. */
  174. union emu_wd_int_ena_w1s {
  175. u64 value;
  176. struct {
  177. #if (defined(__BIG_ENDIAN_BITFIELD))
  178. u64 raz2 : 12;
  179. u64 ae_wd : 20;
  180. u64 raz1 : 16;
  181. u64 se_wd : 16;
  182. #else
  183. u64 se_wd : 16;
  184. u64 raz1 : 16;
  185. u64 ae_wd : 20;
  186. u64 raz2 : 12;
  187. #endif
  188. } s;
  189. };
  190. /**
  191. * struct emu_ge_int_ena_w1s - EMU Interrupt Enable set registers
  192. * @ae_ge: Reads or sets enable for EMU(0..3)_GE_INT[AE_GE]
  193. * @se_ge: Reads or sets enable for EMU(0..3)_GE_INT[SE_GE]
  194. */
  195. union emu_ge_int_ena_w1s {
  196. u64 value;
  197. struct {
  198. #if (defined(__BIG_ENDIAN_BITFIELD))
  199. u64 raz_52_63 : 12;
  200. u64 ae_ge : 20;
  201. u64 raz_16_31: 16;
  202. u64 se_ge : 16;
  203. #else
  204. u64 se_ge : 16;
  205. u64 raz_16_31: 16;
  206. u64 ae_ge : 20;
  207. u64 raz_52_63 : 12;
  208. #endif
  209. } s;
  210. };
  211. /**
  212. * struct nps_pkt_slc_ctl - Solicited Packet Out Control Registers
  213. * @rh: Indicates whether to remove or include the response header
  214. * 1 = Include, 0 = Remove
  215. * @z: If set, 8 trailing 0x00 bytes will be added to the end of the
  216. * outgoing packet.
  217. * @enb: Enable for this port.
  218. */
  219. union nps_pkt_slc_ctl {
  220. u64 value;
  221. struct {
  222. #if defined(__BIG_ENDIAN_BITFIELD)
  223. u64 raz : 61;
  224. u64 rh : 1;
  225. u64 z : 1;
  226. u64 enb : 1;
  227. #else
  228. u64 enb : 1;
  229. u64 z : 1;
  230. u64 rh : 1;
  231. u64 raz : 61;
  232. #endif
  233. } s;
  234. };
  235. /**
  236. * struct nps_pkt_slc_cnts - Solicited Packet Out Count Registers
  237. * @slc_int: Returns a 1 when:
  238. * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or
  239. * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET].
  240. * To clear the bit, the CNTS register must be written to clear.
  241. * @in_int: Returns a 1 when:
  242. * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT].
  243. * To clear the bit, the DONE_CNTS register must be written to clear.
  244. * @mbox_int: Returns a 1 when:
  245. * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set. To clear the bit,
  246. * write NPS_PKT_MBOX_PF_VF(i)_INT[INTR] with 1.
  247. * @timer: Timer, incremented every 2048 coprocessor clock cycles
  248. * when [CNT] is not zero. The hardware clears both [TIMER] and
  249. * [INT] when [CNT] goes to 0.
  250. * @cnt: Packet counter. Hardware adds to [CNT] as it sends packets out.
  251. * On a write to this CSR, hardware subtracts the amount written to the
  252. * [CNT] field from [CNT].
  253. */
  254. union nps_pkt_slc_cnts {
  255. u64 value;
  256. struct {
  257. #if defined(__BIG_ENDIAN_BITFIELD)
  258. u64 slc_int : 1;
  259. u64 uns_int : 1;
  260. u64 in_int : 1;
  261. u64 mbox_int : 1;
  262. u64 resend : 1;
  263. u64 raz : 5;
  264. u64 timer : 22;
  265. u64 cnt : 32;
  266. #else
  267. u64 cnt : 32;
  268. u64 timer : 22;
  269. u64 raz : 5;
  270. u64 resend : 1;
  271. u64 mbox_int : 1;
  272. u64 in_int : 1;
  273. u64 uns_int : 1;
  274. u64 slc_int : 1;
  275. #endif
  276. } s;
  277. };
  278. /**
  279. * struct nps_pkt_slc_int_levels - Solicited Packet Out Interrupt Levels
  280. * Registers.
  281. * @bmode: Determines whether NPS_PKT_SLC_CNTS[CNT] is a byte or
  282. * packet counter.
  283. * @timet: Output port counter time interrupt threshold.
  284. * @cnt: Output port counter interrupt threshold.
  285. */
  286. union nps_pkt_slc_int_levels {
  287. u64 value;
  288. struct {
  289. #if defined(__BIG_ENDIAN_BITFIELD)
  290. u64 bmode : 1;
  291. u64 raz : 9;
  292. u64 timet : 22;
  293. u64 cnt : 32;
  294. #else
  295. u64 cnt : 32;
  296. u64 timet : 22;
  297. u64 raz : 9;
  298. u64 bmode : 1;
  299. #endif
  300. } s;
  301. };
  302. /**
  303. * struct nps_pkt_inst - NPS Packet Interrupt Register
  304. * @in_err: Set when any NPS_PKT_IN_RERR_HI/LO bit and
  305. * corresponding NPS_PKT_IN_RERR_*_ENA_* bit are bot set.
  306. * @uns_err: Set when any NSP_PKT_UNS_RERR_HI/LO bit and
  307. * corresponding NPS_PKT_UNS_RERR_*_ENA_* bit are both set.
  308. * @slc_er: Set when any NSP_PKT_SLC_RERR_HI/LO bit and
  309. * corresponding NPS_PKT_SLC_RERR_*_ENA_* bit are both set.
  310. */
  311. union nps_pkt_int {
  312. u64 value;
  313. struct {
  314. #if defined(__BIG_ENDIAN_BITFIELD)
  315. u64 raz : 54;
  316. u64 uns_wto : 1;
  317. u64 in_err : 1;
  318. u64 uns_err : 1;
  319. u64 slc_err : 1;
  320. u64 in_dbe : 1;
  321. u64 in_sbe : 1;
  322. u64 uns_dbe : 1;
  323. u64 uns_sbe : 1;
  324. u64 slc_dbe : 1;
  325. u64 slc_sbe : 1;
  326. #else
  327. u64 slc_sbe : 1;
  328. u64 slc_dbe : 1;
  329. u64 uns_sbe : 1;
  330. u64 uns_dbe : 1;
  331. u64 in_sbe : 1;
  332. u64 in_dbe : 1;
  333. u64 slc_err : 1;
  334. u64 uns_err : 1;
  335. u64 in_err : 1;
  336. u64 uns_wto : 1;
  337. u64 raz : 54;
  338. #endif
  339. } s;
  340. };
  341. /**
  342. * struct nps_pkt_in_done_cnts - Input instruction ring counts registers
  343. * @slc_cnt: Returns a 1 when:
  344. * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or
  345. * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SCL(i)_INT_LEVELS[TIMET]
  346. * To clear the bit, the CNTS register must be
  347. * written to clear the underlying condition
  348. * @uns_int: Return a 1 when:
  349. * NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT], or
  350. * NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET]
  351. * To clear the bit, the CNTS register must be
  352. * written to clear the underlying condition
  353. * @in_int: Returns a 1 when:
  354. * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]
  355. * To clear the bit, the DONE_CNTS register
  356. * must be written to clear the underlying condition
  357. * @mbox_int: Returns a 1 when:
  358. * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set.
  359. * To clear the bit, write NPS_PKT_MBOX_PF_VF(i)_INT[INTR]
  360. * with 1.
  361. * @resend: A write of 1 will resend an MSI-X interrupt message if any
  362. * of the following conditions are true for this ring "i".
  363. * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT]
  364. * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET]
  365. * NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT]
  366. * NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET]
  367. * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]
  368. * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set
  369. * @cnt: Packet counter. Hardware adds to [CNT] as it reads
  370. * packets. On a write to this CSR, hardware substracts the
  371. * amount written to the [CNT] field from [CNT], which will
  372. * clear PKT_IN(i)_INT_STATUS[INTR] if [CNT] becomes <=
  373. * NPS_PKT_IN(i)_INT_LEVELS[CNT]. This register should be
  374. * cleared before enabling a ring by reading the current
  375. * value and writing it back.
  376. */
  377. union nps_pkt_in_done_cnts {
  378. u64 value;
  379. struct {
  380. #if defined(__BIG_ENDIAN_BITFIELD)
  381. u64 slc_int : 1;
  382. u64 uns_int : 1;
  383. u64 in_int : 1;
  384. u64 mbox_int : 1;
  385. u64 resend : 1;
  386. u64 raz : 27;
  387. u64 cnt : 32;
  388. #else
  389. u64 cnt : 32;
  390. u64 raz : 27;
  391. u64 resend : 1;
  392. u64 mbox_int : 1;
  393. u64 in_int : 1;
  394. u64 uns_int : 1;
  395. u64 slc_int : 1;
  396. #endif
  397. } s;
  398. };
  399. /**
  400. * struct nps_pkt_in_instr_ctl - Input Instruction Ring Control Registers.
  401. * @is64b: If 1, the ring uses 64-byte instructions. If 0, the
  402. * ring uses 32-byte instructions.
  403. * @enb: Enable for the input ring.
  404. */
  405. union nps_pkt_in_instr_ctl {
  406. u64 value;
  407. struct {
  408. #if (defined(__BIG_ENDIAN_BITFIELD))
  409. u64 raz : 62;
  410. u64 is64b : 1;
  411. u64 enb : 1;
  412. #else
  413. u64 enb : 1;
  414. u64 is64b : 1;
  415. u64 raz : 62;
  416. #endif
  417. } s;
  418. };
  419. /**
  420. * struct nps_pkt_in_instr_rsize - Input instruction ring size registers
  421. * @rsize: Ring size (number of instructions)
  422. */
  423. union nps_pkt_in_instr_rsize {
  424. u64 value;
  425. struct {
  426. #if (defined(__BIG_ENDIAN_BITFIELD))
  427. u64 raz : 32;
  428. u64 rsize : 32;
  429. #else
  430. u64 rsize : 32;
  431. u64 raz : 32;
  432. #endif
  433. } s;
  434. };
  435. /**
  436. * struct nps_pkt_in_instr_baoff_dbell - Input instruction ring
  437. * base address offset and doorbell registers
  438. * @aoff: Address offset. The offset from the NPS_PKT_IN_INSTR_BADDR
  439. * where the next pointer is read.
  440. * @dbell: Pointer list doorbell count. Write operations to this field
  441. * increments the present value here. Read operations return the
  442. * present value.
  443. */
  444. union nps_pkt_in_instr_baoff_dbell {
  445. u64 value;
  446. struct {
  447. #if (defined(__BIG_ENDIAN_BITFIELD))
  448. u64 aoff : 32;
  449. u64 dbell : 32;
  450. #else
  451. u64 dbell : 32;
  452. u64 aoff : 32;
  453. #endif
  454. } s;
  455. };
  456. /**
  457. * struct nps_core_int_ena_w1s - NPS core interrupt enable set register
  458. * @host_nps_wr_err: Reads or sets enable for
  459. * NPS_CORE_INT[HOST_NPS_WR_ERR].
  460. * @npco_dma_malform: Reads or sets enable for
  461. * NPS_CORE_INT[NPCO_DMA_MALFORM].
  462. * @exec_wr_timeout: Reads or sets enable for
  463. * NPS_CORE_INT[EXEC_WR_TIMEOUT].
  464. * @host_wr_timeout: Reads or sets enable for
  465. * NPS_CORE_INT[HOST_WR_TIMEOUT].
  466. * @host_wr_err: Reads or sets enable for
  467. * NPS_CORE_INT[HOST_WR_ERR]
  468. */
  469. union nps_core_int_ena_w1s {
  470. u64 value;
  471. struct {
  472. #if (defined(__BIG_ENDIAN_BITFIELD))
  473. u64 raz4 : 55;
  474. u64 host_nps_wr_err : 1;
  475. u64 npco_dma_malform : 1;
  476. u64 exec_wr_timeout : 1;
  477. u64 host_wr_timeout : 1;
  478. u64 host_wr_err : 1;
  479. u64 raz3 : 1;
  480. u64 raz2 : 1;
  481. u64 raz1 : 1;
  482. u64 raz0 : 1;
  483. #else
  484. u64 raz0 : 1;
  485. u64 raz1 : 1;
  486. u64 raz2 : 1;
  487. u64 raz3 : 1;
  488. u64 host_wr_err : 1;
  489. u64 host_wr_timeout : 1;
  490. u64 exec_wr_timeout : 1;
  491. u64 npco_dma_malform : 1;
  492. u64 host_nps_wr_err : 1;
  493. u64 raz4 : 55;
  494. #endif
  495. } s;
  496. };
  497. /**
  498. * struct nps_core_gbl_vfcfg - Global VF Configuration Register.
  499. * @ilk_disable: When set, this bit indicates that the ILK interface has
  500. * been disabled.
  501. * @obaf: BMO allocation control
  502. * 0 = allocate per queue
  503. * 1 = allocate per VF
  504. * @ibaf: BMI allocation control
  505. * 0 = allocate per queue
  506. * 1 = allocate per VF
  507. * @zaf: ZIP allocation control
  508. * 0 = allocate per queue
  509. * 1 = allocate per VF
  510. * @aeaf: AE allocation control
  511. * 0 = allocate per queue
  512. * 1 = allocate per VF
  513. * @seaf: SE allocation control
  514. * 0 = allocation per queue
  515. * 1 = allocate per VF
  516. * @cfg: VF/PF mode.
  517. */
  518. union nps_core_gbl_vfcfg {
  519. u64 value;
  520. struct {
  521. #if (defined(__BIG_ENDIAN_BITFIELD))
  522. u64 raz :55;
  523. u64 ilk_disable :1;
  524. u64 obaf :1;
  525. u64 ibaf :1;
  526. u64 zaf :1;
  527. u64 aeaf :1;
  528. u64 seaf :1;
  529. u64 cfg :3;
  530. #else
  531. u64 cfg :3;
  532. u64 seaf :1;
  533. u64 aeaf :1;
  534. u64 zaf :1;
  535. u64 ibaf :1;
  536. u64 obaf :1;
  537. u64 ilk_disable :1;
  538. u64 raz :55;
  539. #endif
  540. } s;
  541. };
  542. /**
  543. * struct nps_core_int_active - NPS Core Interrupt Active Register
  544. * @resend: Resend MSI-X interrupt if needs to handle interrupts
  545. * Sofware can set this bit and then exit the ISR.
  546. * @ocla: Set when any OCLA(0)_INT and corresponding OCLA(0_INT_ENA_W1C
  547. * bit are set
  548. * @mbox: Set when any NPS_PKT_MBOX_INT_LO/HI and corresponding
  549. * NPS_PKT_MBOX_INT_LO_ENA_W1C/HI_ENA_W1C bits are set
  550. * @emu: bit i is set in [EMU] when any EMU(i)_INT bit is set
  551. * @bmo: Set when any BMO_INT bit is set
  552. * @bmi: Set when any BMI_INT bit is set or when any non-RO
  553. * BMI_INT and corresponding BMI_INT_ENA_W1C bits are both set
  554. * @aqm: Set when any AQM_INT bit is set
  555. * @zqm: Set when any ZQM_INT bit is set
  556. * @efl: Set when any EFL_INT RO bit is set or when any non-RO EFL_INT
  557. * and corresponding EFL_INT_ENA_W1C bits are both set
  558. * @ilk: Set when any ILK_INT bit is set
  559. * @lbc: Set when any LBC_INT RO bit is set or when any non-RO LBC_INT
  560. * and corresponding LBC_INT_ENA_W1C bits are bot set
  561. * @pem: Set when any PEM(0)_INT RO bit is set or when any non-RO
  562. * PEM(0)_INT and corresponding PEM(0)_INT_ENA_W1C bit are both set
  563. * @ucd: Set when any UCD_INT bit is set
  564. * @zctl: Set when any ZIP_INT RO bit is set or when any non-RO ZIP_INT
  565. * and corresponding ZIP_INT_ENA_W1C bits are both set
  566. * @lbm: Set when any LBM_INT bit is set
  567. * @nps_pkt: Set when any NPS_PKT_INT bit is set
  568. * @nps_core: Set when any NPS_CORE_INT RO bit is set or when non-RO
  569. * NPS_CORE_INT and corresponding NSP_CORE_INT_ENA_W1C bits are both set
  570. */
  571. union nps_core_int_active {
  572. u64 value;
  573. struct {
  574. #if (defined(__BIG_ENDIAN_BITFIELD))
  575. u64 resend : 1;
  576. u64 raz : 43;
  577. u64 ocla : 1;
  578. u64 mbox : 1;
  579. u64 emu : 4;
  580. u64 bmo : 1;
  581. u64 bmi : 1;
  582. u64 aqm : 1;
  583. u64 zqm : 1;
  584. u64 efl : 1;
  585. u64 ilk : 1;
  586. u64 lbc : 1;
  587. u64 pem : 1;
  588. u64 pom : 1;
  589. u64 ucd : 1;
  590. u64 zctl : 1;
  591. u64 lbm : 1;
  592. u64 nps_pkt : 1;
  593. u64 nps_core : 1;
  594. #else
  595. u64 nps_core : 1;
  596. u64 nps_pkt : 1;
  597. u64 lbm : 1;
  598. u64 zctl: 1;
  599. u64 ucd : 1;
  600. u64 pom : 1;
  601. u64 pem : 1;
  602. u64 lbc : 1;
  603. u64 ilk : 1;
  604. u64 efl : 1;
  605. u64 zqm : 1;
  606. u64 aqm : 1;
  607. u64 bmi : 1;
  608. u64 bmo : 1;
  609. u64 emu : 4;
  610. u64 mbox : 1;
  611. u64 ocla : 1;
  612. u64 raz : 43;
  613. u64 resend : 1;
  614. #endif
  615. } s;
  616. };
  617. /**
  618. * struct efl_core_int - EFL Interrupt Registers
  619. * @epci_decode_err: EPCI decoded a transacation that was unknown
  620. * This error should only occurred when there is a micrcode/SE error
  621. * and should be considered fatal
  622. * @ae_err: An AE uncorrectable error occurred.
  623. * See EFL_CORE(0..3)_AE_ERR_INT
  624. * @se_err: An SE uncorrectable error occurred.
  625. * See EFL_CORE(0..3)_SE_ERR_INT
  626. * @dbe: Double-bit error occurred in EFL
  627. * @sbe: Single-bit error occurred in EFL
  628. * @d_left: Asserted when new POM-Header-BMI-data is
  629. * being sent to an Exec, and that Exec has Not read all BMI
  630. * data associated with the previous POM header
  631. * @len_ovr: Asserted when an Exec-Read is issued that is more than
  632. * 14 greater in length that the BMI data left to be read
  633. */
  634. union efl_core_int {
  635. u64 value;
  636. struct {
  637. #if (defined(__BIG_ENDIAN_BITFIELD))
  638. u64 raz : 57;
  639. u64 epci_decode_err : 1;
  640. u64 ae_err : 1;
  641. u64 se_err : 1;
  642. u64 dbe : 1;
  643. u64 sbe : 1;
  644. u64 d_left : 1;
  645. u64 len_ovr : 1;
  646. #else
  647. u64 len_ovr : 1;
  648. u64 d_left : 1;
  649. u64 sbe : 1;
  650. u64 dbe : 1;
  651. u64 se_err : 1;
  652. u64 ae_err : 1;
  653. u64 epci_decode_err : 1;
  654. u64 raz : 57;
  655. #endif
  656. } s;
  657. };
  658. /**
  659. * struct efl_core_int_ena_w1s - EFL core interrupt enable set register
  660. * @epci_decode_err: Reads or sets enable for
  661. * EFL_CORE(0..3)_INT[EPCI_DECODE_ERR].
  662. * @d_left: Reads or sets enable for
  663. * EFL_CORE(0..3)_INT[D_LEFT].
  664. * @len_ovr: Reads or sets enable for
  665. * EFL_CORE(0..3)_INT[LEN_OVR].
  666. */
  667. union efl_core_int_ena_w1s {
  668. u64 value;
  669. struct {
  670. #if (defined(__BIG_ENDIAN_BITFIELD))
  671. u64 raz_7_63 : 57;
  672. u64 epci_decode_err : 1;
  673. u64 raz_2_5 : 4;
  674. u64 d_left : 1;
  675. u64 len_ovr : 1;
  676. #else
  677. u64 len_ovr : 1;
  678. u64 d_left : 1;
  679. u64 raz_2_5 : 4;
  680. u64 epci_decode_err : 1;
  681. u64 raz_7_63 : 57;
  682. #endif
  683. } s;
  684. };
  685. /**
  686. * struct efl_rnm_ctl_status - RNM Control and Status Register
  687. * @ent_sel: Select input to RNM FIFO
  688. * @exp_ent: Exported entropy enable for random number generator
  689. * @rng_rst: Reset to RNG. Setting this bit to 1 cancels the generation
  690. * of the current random number.
  691. * @rnm_rst: Reset the RNM. Setting this bit to 1 clears all sorted numbers
  692. * in the random number memory.
  693. * @rng_en: Enabled the output of the RNG.
  694. * @ent_en: Entropy enable for random number generator.
  695. */
  696. union efl_rnm_ctl_status {
  697. u64 value;
  698. struct {
  699. #if (defined(__BIG_ENDIAN_BITFIELD))
  700. u64 raz_9_63 : 55;
  701. u64 ent_sel : 4;
  702. u64 exp_ent : 1;
  703. u64 rng_rst : 1;
  704. u64 rnm_rst : 1;
  705. u64 rng_en : 1;
  706. u64 ent_en : 1;
  707. #else
  708. u64 ent_en : 1;
  709. u64 rng_en : 1;
  710. u64 rnm_rst : 1;
  711. u64 rng_rst : 1;
  712. u64 exp_ent : 1;
  713. u64 ent_sel : 4;
  714. u64 raz_9_63 : 55;
  715. #endif
  716. } s;
  717. };
  718. /**
  719. * struct bmi_ctl - BMI control register
  720. * @ilk_hdrq_thrsh: Maximum number of header queue locations
  721. * that ILK packets may consume. When the threshold is
  722. * exceeded ILK_XOFF is sent to the BMI_X2P_ARB.
  723. * @nps_hdrq_thrsh: Maximum number of header queue locations
  724. * that NPS packets may consume. When the threshold is
  725. * exceeded NPS_XOFF is sent to the BMI_X2P_ARB.
  726. * @totl_hdrq_thrsh: Maximum number of header queue locations
  727. * that the sum of ILK and NPS packets may consume.
  728. * @ilk_free_thrsh: Maximum number of buffers that ILK packet
  729. * flows may consume before ILK_XOFF is sent to the BMI_X2P_ARB.
  730. * @nps_free_thrsh: Maximum number of buffers that NPS packet
  731. * flows may consume before NPS XOFF is sent to the BMI_X2p_ARB.
  732. * @totl_free_thrsh: Maximum number of buffers that bot ILK and NPS
  733. * packet flows may consume before both NPS_XOFF and ILK_XOFF
  734. * are asserted to the BMI_X2P_ARB.
  735. * @max_pkt_len: Maximum packet length, integral number of 256B
  736. * buffers.
  737. */
  738. union bmi_ctl {
  739. u64 value;
  740. struct {
  741. #if (defined(__BIG_ENDIAN_BITFIELD))
  742. u64 raz_56_63 : 8;
  743. u64 ilk_hdrq_thrsh : 8;
  744. u64 nps_hdrq_thrsh : 8;
  745. u64 totl_hdrq_thrsh : 8;
  746. u64 ilk_free_thrsh : 8;
  747. u64 nps_free_thrsh : 8;
  748. u64 totl_free_thrsh : 8;
  749. u64 max_pkt_len : 8;
  750. #else
  751. u64 max_pkt_len : 8;
  752. u64 totl_free_thrsh : 8;
  753. u64 nps_free_thrsh : 8;
  754. u64 ilk_free_thrsh : 8;
  755. u64 totl_hdrq_thrsh : 8;
  756. u64 nps_hdrq_thrsh : 8;
  757. u64 ilk_hdrq_thrsh : 8;
  758. u64 raz_56_63 : 8;
  759. #endif
  760. } s;
  761. };
  762. /**
  763. * struct bmi_int_ena_w1s - BMI interrupt enable set register
  764. * @ilk_req_oflw: Reads or sets enable for
  765. * BMI_INT[ILK_REQ_OFLW].
  766. * @nps_req_oflw: Reads or sets enable for
  767. * BMI_INT[NPS_REQ_OFLW].
  768. * @fpf_undrrn: Reads or sets enable for
  769. * BMI_INT[FPF_UNDRRN].
  770. * @eop_err_ilk: Reads or sets enable for
  771. * BMI_INT[EOP_ERR_ILK].
  772. * @eop_err_nps: Reads or sets enable for
  773. * BMI_INT[EOP_ERR_NPS].
  774. * @sop_err_ilk: Reads or sets enable for
  775. * BMI_INT[SOP_ERR_ILK].
  776. * @sop_err_nps: Reads or sets enable for
  777. * BMI_INT[SOP_ERR_NPS].
  778. * @pkt_rcv_err_ilk: Reads or sets enable for
  779. * BMI_INT[PKT_RCV_ERR_ILK].
  780. * @pkt_rcv_err_nps: Reads or sets enable for
  781. * BMI_INT[PKT_RCV_ERR_NPS].
  782. * @max_len_err_ilk: Reads or sets enable for
  783. * BMI_INT[MAX_LEN_ERR_ILK].
  784. * @max_len_err_nps: Reads or sets enable for
  785. * BMI_INT[MAX_LEN_ERR_NPS].
  786. */
  787. union bmi_int_ena_w1s {
  788. u64 value;
  789. struct {
  790. #if (defined(__BIG_ENDIAN_BITFIELD))
  791. u64 raz_13_63 : 51;
  792. u64 ilk_req_oflw : 1;
  793. u64 nps_req_oflw : 1;
  794. u64 raz_10 : 1;
  795. u64 raz_9 : 1;
  796. u64 fpf_undrrn : 1;
  797. u64 eop_err_ilk : 1;
  798. u64 eop_err_nps : 1;
  799. u64 sop_err_ilk : 1;
  800. u64 sop_err_nps : 1;
  801. u64 pkt_rcv_err_ilk : 1;
  802. u64 pkt_rcv_err_nps : 1;
  803. u64 max_len_err_ilk : 1;
  804. u64 max_len_err_nps : 1;
  805. #else
  806. u64 max_len_err_nps : 1;
  807. u64 max_len_err_ilk : 1;
  808. u64 pkt_rcv_err_nps : 1;
  809. u64 pkt_rcv_err_ilk : 1;
  810. u64 sop_err_nps : 1;
  811. u64 sop_err_ilk : 1;
  812. u64 eop_err_nps : 1;
  813. u64 eop_err_ilk : 1;
  814. u64 fpf_undrrn : 1;
  815. u64 raz_9 : 1;
  816. u64 raz_10 : 1;
  817. u64 nps_req_oflw : 1;
  818. u64 ilk_req_oflw : 1;
  819. u64 raz_13_63 : 51;
  820. #endif
  821. } s;
  822. };
  823. /**
  824. * struct bmo_ctl2 - BMO Control2 Register
  825. * @arb_sel: Determines P2X Arbitration
  826. * @ilk_buf_thrsh: Maximum number of buffers that the
  827. * ILK packet flows may consume before ILK XOFF is
  828. * asserted to the POM.
  829. * @nps_slc_buf_thrsh: Maximum number of buffers that the
  830. * NPS_SLC packet flow may consume before NPS_SLC XOFF is
  831. * asserted to the POM.
  832. * @nps_uns_buf_thrsh: Maximum number of buffers that the
  833. * NPS_UNS packet flow may consume before NPS_UNS XOFF is
  834. * asserted to the POM.
  835. * @totl_buf_thrsh: Maximum number of buffers that ILK, NPS_UNS and
  836. * NPS_SLC packet flows may consume before NPS_UNS XOFF, NSP_SLC and
  837. * ILK_XOFF are all asserted POM.
  838. */
  839. union bmo_ctl2 {
  840. u64 value;
  841. struct {
  842. #if (defined(__BIG_ENDIAN_BITFIELD))
  843. u64 arb_sel : 1;
  844. u64 raz_32_62 : 31;
  845. u64 ilk_buf_thrsh : 8;
  846. u64 nps_slc_buf_thrsh : 8;
  847. u64 nps_uns_buf_thrsh : 8;
  848. u64 totl_buf_thrsh : 8;
  849. #else
  850. u64 totl_buf_thrsh : 8;
  851. u64 nps_uns_buf_thrsh : 8;
  852. u64 nps_slc_buf_thrsh : 8;
  853. u64 ilk_buf_thrsh : 8;
  854. u64 raz_32_62 : 31;
  855. u64 arb_sel : 1;
  856. #endif
  857. } s;
  858. };
  859. /**
  860. * struct pom_int_ena_w1s - POM interrupt enable set register
  861. * @illegal_intf: Reads or sets enable for POM_INT[ILLEGAL_INTF].
  862. * @illegal_dport: Reads or sets enable for POM_INT[ILLEGAL_DPORT].
  863. */
  864. union pom_int_ena_w1s {
  865. u64 value;
  866. struct {
  867. #if (defined(__BIG_ENDIAN_BITFIELD))
  868. u64 raz2 : 60;
  869. u64 illegal_intf : 1;
  870. u64 illegal_dport : 1;
  871. u64 raz1 : 1;
  872. u64 raz0 : 1;
  873. #else
  874. u64 raz0 : 1;
  875. u64 raz1 : 1;
  876. u64 illegal_dport : 1;
  877. u64 illegal_intf : 1;
  878. u64 raz2 : 60;
  879. #endif
  880. } s;
  881. };
  882. /**
  883. * struct lbc_inval_ctl - LBC invalidation control register
  884. * @wait_timer: Wait timer for wait state. [WAIT_TIMER] must
  885. * always be written with its reset value.
  886. * @cam_inval_start: Software should write [CAM_INVAL_START]=1
  887. * to initiate an LBC cache invalidation. After this, software
  888. * should read LBC_INVAL_STATUS until LBC_INVAL_STATUS[DONE] is set.
  889. * LBC hardware clears [CAVM_INVAL_START] before software can
  890. * observed LBC_INVAL_STATUS[DONE] to be set
  891. */
  892. union lbc_inval_ctl {
  893. u64 value;
  894. struct {
  895. #if (defined(__BIG_ENDIAN_BITFIELD))
  896. u64 raz2 : 48;
  897. u64 wait_timer : 8;
  898. u64 raz1 : 6;
  899. u64 cam_inval_start : 1;
  900. u64 raz0 : 1;
  901. #else
  902. u64 raz0 : 1;
  903. u64 cam_inval_start : 1;
  904. u64 raz1 : 6;
  905. u64 wait_timer : 8;
  906. u64 raz2 : 48;
  907. #endif
  908. } s;
  909. };
  910. /**
  911. * struct lbc_int_ena_w1s - LBC interrupt enable set register
  912. * @cam_hard_err: Reads or sets enable for LBC_INT[CAM_HARD_ERR].
  913. * @cam_inval_abort: Reads or sets enable for LBC_INT[CAM_INVAL_ABORT].
  914. * @over_fetch_err: Reads or sets enable for LBC_INT[OVER_FETCH_ERR].
  915. * @cache_line_to_err: Reads or sets enable for
  916. * LBC_INT[CACHE_LINE_TO_ERR].
  917. * @cam_soft_err: Reads or sets enable for
  918. * LBC_INT[CAM_SOFT_ERR].
  919. * @dma_rd_err: Reads or sets enable for
  920. * LBC_INT[DMA_RD_ERR].
  921. */
  922. union lbc_int_ena_w1s {
  923. u64 value;
  924. struct {
  925. #if (defined(__BIG_ENDIAN_BITFIELD))
  926. u64 raz_10_63 : 54;
  927. u64 cam_hard_err : 1;
  928. u64 cam_inval_abort : 1;
  929. u64 over_fetch_err : 1;
  930. u64 cache_line_to_err : 1;
  931. u64 raz_2_5 : 4;
  932. u64 cam_soft_err : 1;
  933. u64 dma_rd_err : 1;
  934. #else
  935. u64 dma_rd_err : 1;
  936. u64 cam_soft_err : 1;
  937. u64 raz_2_5 : 4;
  938. u64 cache_line_to_err : 1;
  939. u64 over_fetch_err : 1;
  940. u64 cam_inval_abort : 1;
  941. u64 cam_hard_err : 1;
  942. u64 raz_10_63 : 54;
  943. #endif
  944. } s;
  945. };
  946. /**
  947. * struct lbc_int - LBC interrupt summary register
  948. * @cam_hard_err: indicates a fatal hardware error.
  949. * It requires system reset.
  950. * When [CAM_HARD_ERR] is set, LBC stops logging any new information in
  951. * LBC_POM_MISS_INFO_LOG,
  952. * LBC_POM_MISS_ADDR_LOG,
  953. * LBC_EFL_MISS_INFO_LOG, and
  954. * LBC_EFL_MISS_ADDR_LOG.
  955. * Software should sample them.
  956. * @cam_inval_abort: indicates a fatal hardware error.
  957. * System reset is required.
  958. * @over_fetch_err: indicates a fatal hardware error
  959. * System reset is required
  960. * @cache_line_to_err: is a debug feature.
  961. * This timeout interrupt bit tells the software that
  962. * a cacheline in LBC has non-zero usage and the context
  963. * has not been used for greater than the
  964. * LBC_TO_CNT[TO_CNT] time interval.
  965. * @sbe: Memory SBE error. This is recoverable via ECC.
  966. * See LBC_ECC_INT for more details.
  967. * @dbe: Memory DBE error. This is a fatal and requires a
  968. * system reset.
  969. * @pref_dat_len_mismatch_err: Summary bit for context length
  970. * mismatch errors.
  971. * @rd_dat_len_mismatch_err: Summary bit for SE read data length
  972. * greater than data prefect length errors.
  973. * @cam_soft_err: is recoverable. Software must complete a
  974. * LBC_INVAL_CTL[CAM_INVAL_START] invalidation sequence and
  975. * then clear [CAM_SOFT_ERR].
  976. * @dma_rd_err: A context prefect read of host memory returned with
  977. * a read error.
  978. */
  979. union lbc_int {
  980. u64 value;
  981. struct {
  982. #if (defined(__BIG_ENDIAN_BITFIELD))
  983. u64 raz_10_63 : 54;
  984. u64 cam_hard_err : 1;
  985. u64 cam_inval_abort : 1;
  986. u64 over_fetch_err : 1;
  987. u64 cache_line_to_err : 1;
  988. u64 sbe : 1;
  989. u64 dbe : 1;
  990. u64 pref_dat_len_mismatch_err : 1;
  991. u64 rd_dat_len_mismatch_err : 1;
  992. u64 cam_soft_err : 1;
  993. u64 dma_rd_err : 1;
  994. #else
  995. u64 dma_rd_err : 1;
  996. u64 cam_soft_err : 1;
  997. u64 rd_dat_len_mismatch_err : 1;
  998. u64 pref_dat_len_mismatch_err : 1;
  999. u64 dbe : 1;
  1000. u64 sbe : 1;
  1001. u64 cache_line_to_err : 1;
  1002. u64 over_fetch_err : 1;
  1003. u64 cam_inval_abort : 1;
  1004. u64 cam_hard_err : 1;
  1005. u64 raz_10_63 : 54;
  1006. #endif
  1007. } s;
  1008. };
  1009. /**
  1010. * struct lbc_inval_status: LBC Invalidation status register
  1011. * @cam_clean_entry_complete_cnt: The number of entries that are
  1012. * cleaned up successfully.
  1013. * @cam_clean_entry_cnt: The number of entries that have the CAM
  1014. * inval command issued.
  1015. * @cam_inval_state: cam invalidation FSM state
  1016. * @cam_inval_abort: cam invalidation abort
  1017. * @cam_rst_rdy: lbc_cam reset ready
  1018. * @done: LBC clears [DONE] when
  1019. * LBC_INVAL_CTL[CAM_INVAL_START] is written with a one,
  1020. * and sets [DONE] when it completes the invalidation
  1021. * sequence.
  1022. */
  1023. union lbc_inval_status {
  1024. u64 value;
  1025. struct {
  1026. #if (defined(__BIG_ENDIAN_BITFIELD))
  1027. u64 raz3 : 23;
  1028. u64 cam_clean_entry_complete_cnt : 9;
  1029. u64 raz2 : 7;
  1030. u64 cam_clean_entry_cnt : 9;
  1031. u64 raz1 : 5;
  1032. u64 cam_inval_state : 3;
  1033. u64 raz0 : 5;
  1034. u64 cam_inval_abort : 1;
  1035. u64 cam_rst_rdy : 1;
  1036. u64 done : 1;
  1037. #else
  1038. u64 done : 1;
  1039. u64 cam_rst_rdy : 1;
  1040. u64 cam_inval_abort : 1;
  1041. u64 raz0 : 5;
  1042. u64 cam_inval_state : 3;
  1043. u64 raz1 : 5;
  1044. u64 cam_clean_entry_cnt : 9;
  1045. u64 raz2 : 7;
  1046. u64 cam_clean_entry_complete_cnt : 9;
  1047. u64 raz3 : 23;
  1048. #endif
  1049. } s;
  1050. };
  1051. /**
  1052. * struct rst_boot: RST Boot Register
  1053. * @jtcsrdis: when set, internal CSR access via JTAG TAP controller
  1054. * is disabled
  1055. * @jt_tst_mode: JTAG test mode
  1056. * @io_supply: I/O power supply setting based on IO_VDD_SELECT pin:
  1057. * 0x1 = 1.8V
  1058. * 0x2 = 2.5V
  1059. * 0x4 = 3.3V
  1060. * All other values are reserved
  1061. * @pnr_mul: clock multiplier
  1062. * @lboot: last boot cause mask, resets only with PLL_DC_OK
  1063. * @rboot: determines whether core 0 remains in reset after
  1064. * chip cold or warm or soft reset
  1065. * @rboot_pin: read only access to REMOTE_BOOT pin
  1066. */
  1067. union rst_boot {
  1068. u64 value;
  1069. struct {
  1070. #if (defined(__BIG_ENDIAN_BITFIELD))
  1071. u64 raz_63 : 1;
  1072. u64 jtcsrdis : 1;
  1073. u64 raz_59_61 : 3;
  1074. u64 jt_tst_mode : 1;
  1075. u64 raz_40_57 : 18;
  1076. u64 io_supply : 3;
  1077. u64 raz_30_36 : 7;
  1078. u64 pnr_mul : 6;
  1079. u64 raz_12_23 : 12;
  1080. u64 lboot : 10;
  1081. u64 rboot : 1;
  1082. u64 rboot_pin : 1;
  1083. #else
  1084. u64 rboot_pin : 1;
  1085. u64 rboot : 1;
  1086. u64 lboot : 10;
  1087. u64 raz_12_23 : 12;
  1088. u64 pnr_mul : 6;
  1089. u64 raz_30_36 : 7;
  1090. u64 io_supply : 3;
  1091. u64 raz_40_57 : 18;
  1092. u64 jt_tst_mode : 1;
  1093. u64 raz_59_61 : 3;
  1094. u64 jtcsrdis : 1;
  1095. u64 raz_63 : 1;
  1096. #endif
  1097. };
  1098. };
  1099. /**
  1100. * struct fus_dat1: Fuse Data 1 Register
  1101. * @pll_mul: main clock PLL multiplier hardware limit
  1102. * @pll_half_dis: main clock PLL control
  1103. * @efus_lck: efuse lockdown
  1104. * @zip_info: ZIP information
  1105. * @bar2_sz_conf: when zero, BAR2 size conforms to
  1106. * PCIe specification
  1107. * @efus_ign: efuse ignore
  1108. * @nozip: ZIP disable
  1109. * @pll_alt_matrix: select alternate PLL matrix
  1110. * @pll_bwadj_denom: select CLKF denominator for
  1111. * BWADJ value
  1112. * @chip_id: chip ID
  1113. */
  1114. union fus_dat1 {
  1115. u64 value;
  1116. struct {
  1117. #if (defined(__BIG_ENDIAN_BITFIELD))
  1118. u64 raz_57_63 : 7;
  1119. u64 pll_mul : 3;
  1120. u64 pll_half_dis : 1;
  1121. u64 raz_43_52 : 10;
  1122. u64 efus_lck : 3;
  1123. u64 raz_26_39 : 14;
  1124. u64 zip_info : 5;
  1125. u64 bar2_sz_conf : 1;
  1126. u64 efus_ign : 1;
  1127. u64 nozip : 1;
  1128. u64 raz_11_17 : 7;
  1129. u64 pll_alt_matrix : 1;
  1130. u64 pll_bwadj_denom : 2;
  1131. u64 chip_id : 8;
  1132. #else
  1133. u64 chip_id : 8;
  1134. u64 pll_bwadj_denom : 2;
  1135. u64 pll_alt_matrix : 1;
  1136. u64 raz_11_17 : 7;
  1137. u64 nozip : 1;
  1138. u64 efus_ign : 1;
  1139. u64 bar2_sz_conf : 1;
  1140. u64 zip_info : 5;
  1141. u64 raz_26_39 : 14;
  1142. u64 efus_lck : 3;
  1143. u64 raz_43_52 : 10;
  1144. u64 pll_half_dis : 1;
  1145. u64 pll_mul : 3;
  1146. u64 raz_57_63 : 7;
  1147. #endif
  1148. };
  1149. };
  1150. #endif /* __NITROX_CSR_H */