qed_int.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <asm/byteorder.h>
  34. #include <linux/io.h>
  35. #include <linux/bitops.h>
  36. #include <linux/delay.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/errno.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/kernel.h>
  41. #include <linux/pci.h>
  42. #include <linux/slab.h>
  43. #include <linux/string.h>
  44. #include "qed.h"
  45. #include "qed_hsi.h"
  46. #include "qed_hw.h"
  47. #include "qed_init_ops.h"
  48. #include "qed_int.h"
  49. #include "qed_mcp.h"
  50. #include "qed_reg_addr.h"
  51. #include "qed_sp.h"
  52. #include "qed_sriov.h"
  53. #include "qed_vf.h"
  54. struct qed_pi_info {
  55. qed_int_comp_cb_t comp_cb;
  56. void *cookie;
  57. };
  58. struct qed_sb_sp_info {
  59. struct qed_sb_info sb_info;
  60. /* per protocol index data */
  61. struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
  62. };
  63. enum qed_attention_type {
  64. QED_ATTN_TYPE_ATTN,
  65. QED_ATTN_TYPE_PARITY,
  66. };
  67. #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
  68. ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
  69. struct aeu_invert_reg_bit {
  70. char bit_name[30];
  71. #define ATTENTION_PARITY (1 << 0)
  72. #define ATTENTION_LENGTH_MASK (0x00000ff0)
  73. #define ATTENTION_LENGTH_SHIFT (4)
  74. #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
  75. ATTENTION_LENGTH_SHIFT)
  76. #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
  77. #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
  78. #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
  79. ATTENTION_PARITY)
  80. /* Multiple bits start with this offset */
  81. #define ATTENTION_OFFSET_MASK (0x000ff000)
  82. #define ATTENTION_OFFSET_SHIFT (12)
  83. #define ATTENTION_BB_MASK (0x00700000)
  84. #define ATTENTION_BB_SHIFT (20)
  85. #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
  86. #define ATTENTION_BB_DIFFERENT BIT(23)
  87. unsigned int flags;
  88. /* Callback to call if attention will be triggered */
  89. int (*cb)(struct qed_hwfn *p_hwfn);
  90. enum block_id block_index;
  91. };
  92. struct aeu_invert_reg {
  93. struct aeu_invert_reg_bit bits[32];
  94. };
  95. #define MAX_ATTN_GRPS (8)
  96. #define NUM_ATTN_REGS (9)
  97. /* Specific HW attention callbacks */
  98. static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
  99. {
  100. u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
  101. /* This might occur on certain instances; Log it once then mask it */
  102. DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
  103. tmp);
  104. qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
  105. 0xffffffff);
  106. return 0;
  107. }
  108. #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
  109. #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
  110. #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
  111. #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
  112. #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
  113. #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
  114. #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
  115. #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
  116. #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
  117. #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
  118. #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
  119. #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
  120. #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
  121. static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
  122. {
  123. u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  124. PSWHST_REG_INCORRECT_ACCESS_VALID);
  125. if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
  126. u32 addr, data, length;
  127. addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  128. PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
  129. data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  130. PSWHST_REG_INCORRECT_ACCESS_DATA);
  131. length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  132. PSWHST_REG_INCORRECT_ACCESS_LENGTH);
  133. DP_INFO(p_hwfn->cdev,
  134. "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
  135. addr, length,
  136. (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
  137. (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
  138. (u8) GET_FIELD(data,
  139. ATTENTION_INCORRECT_ACCESS_VF_VALID),
  140. (u8) GET_FIELD(data,
  141. ATTENTION_INCORRECT_ACCESS_CLIENT),
  142. (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
  143. (u8) GET_FIELD(data,
  144. ATTENTION_INCORRECT_ACCESS_BYTE_EN),
  145. data);
  146. }
  147. return 0;
  148. }
  149. #define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
  150. #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
  151. #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
  152. #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
  153. #define QED_GRC_ATTENTION_MASTER_MASK (0xf)
  154. #define QED_GRC_ATTENTION_MASTER_SHIFT (24)
  155. #define QED_GRC_ATTENTION_PF_MASK (0xf)
  156. #define QED_GRC_ATTENTION_PF_SHIFT (0)
  157. #define QED_GRC_ATTENTION_VF_MASK (0xff)
  158. #define QED_GRC_ATTENTION_VF_SHIFT (4)
  159. #define QED_GRC_ATTENTION_PRIV_MASK (0x3)
  160. #define QED_GRC_ATTENTION_PRIV_SHIFT (14)
  161. #define QED_GRC_ATTENTION_PRIV_VF (0)
  162. static const char *attn_master_to_str(u8 master)
  163. {
  164. switch (master) {
  165. case 1: return "PXP";
  166. case 2: return "MCP";
  167. case 3: return "MSDM";
  168. case 4: return "PSDM";
  169. case 5: return "YSDM";
  170. case 6: return "USDM";
  171. case 7: return "TSDM";
  172. case 8: return "XSDM";
  173. case 9: return "DBU";
  174. case 10: return "DMAE";
  175. default:
  176. return "Unknown";
  177. }
  178. }
  179. static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
  180. {
  181. u32 tmp, tmp2;
  182. /* We've already cleared the timeout interrupt register, so we learn
  183. * of interrupts via the validity register
  184. */
  185. tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  186. GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
  187. if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
  188. goto out;
  189. /* Read the GRC timeout information */
  190. tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  191. GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
  192. tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  193. GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
  194. DP_INFO(p_hwfn->cdev,
  195. "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
  196. tmp2, tmp,
  197. (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
  198. GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
  199. attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
  200. GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
  201. (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
  202. QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
  203. GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
  204. out:
  205. /* Regardles of anything else, clean the validity bit */
  206. qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
  207. GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
  208. return 0;
  209. }
  210. #define PGLUE_ATTENTION_VALID (1 << 29)
  211. #define PGLUE_ATTENTION_RD_VALID (1 << 26)
  212. #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
  213. #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
  214. #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
  215. #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
  216. #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
  217. #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
  218. #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
  219. #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
  220. #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
  221. #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
  222. #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
  223. #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
  224. #define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
  225. #define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
  226. #define PGLUE_ATTENTION_ILT_VALID (1 << 23)
  227. static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
  228. {
  229. u32 tmp;
  230. tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  231. PGLUE_B_REG_TX_ERR_WR_DETAILS2);
  232. if (tmp & PGLUE_ATTENTION_VALID) {
  233. u32 addr_lo, addr_hi, details;
  234. addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  235. PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
  236. addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  237. PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
  238. details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  239. PGLUE_B_REG_TX_ERR_WR_DETAILS);
  240. DP_INFO(p_hwfn,
  241. "Illegal write by chip to [%08x:%08x] blocked.\n"
  242. "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
  243. "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
  244. addr_hi, addr_lo, details,
  245. (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
  246. (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
  247. GET_FIELD(details,
  248. PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
  249. tmp,
  250. GET_FIELD(tmp,
  251. PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
  252. GET_FIELD(tmp,
  253. PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
  254. GET_FIELD(tmp,
  255. PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
  256. }
  257. tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  258. PGLUE_B_REG_TX_ERR_RD_DETAILS2);
  259. if (tmp & PGLUE_ATTENTION_RD_VALID) {
  260. u32 addr_lo, addr_hi, details;
  261. addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  262. PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
  263. addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  264. PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
  265. details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  266. PGLUE_B_REG_TX_ERR_RD_DETAILS);
  267. DP_INFO(p_hwfn,
  268. "Illegal read by chip from [%08x:%08x] blocked.\n"
  269. " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
  270. " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
  271. addr_hi, addr_lo, details,
  272. (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
  273. (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
  274. GET_FIELD(details,
  275. PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
  276. tmp,
  277. GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
  278. : 0,
  279. GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
  280. GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
  281. : 0);
  282. }
  283. tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  284. PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
  285. if (tmp & PGLUE_ATTENTION_ICPL_VALID)
  286. DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
  287. tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  288. PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
  289. if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
  290. u32 addr_hi, addr_lo;
  291. addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  292. PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
  293. addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  294. PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
  295. DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n",
  296. tmp, addr_hi, addr_lo);
  297. }
  298. tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  299. PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
  300. if (tmp & PGLUE_ATTENTION_ILT_VALID) {
  301. u32 addr_hi, addr_lo, details;
  302. addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  303. PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
  304. addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  305. PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
  306. details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  307. PGLUE_B_REG_VF_ILT_ERR_DETAILS);
  308. DP_INFO(p_hwfn,
  309. "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
  310. details, tmp, addr_hi, addr_lo);
  311. }
  312. /* Clear the indications */
  313. qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
  314. PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
  315. return 0;
  316. }
  317. #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
  318. #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
  319. #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
  320. #define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
  321. static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
  322. {
  323. u32 reason;
  324. reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
  325. QED_DORQ_ATTENTION_REASON_MASK;
  326. if (reason) {
  327. u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  328. DORQ_REG_DB_DROP_DETAILS);
  329. DP_INFO(p_hwfn->cdev,
  330. "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
  331. qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  332. DORQ_REG_DB_DROP_DETAILS_ADDRESS),
  333. (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
  334. GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
  335. reason);
  336. }
  337. return -EINVAL;
  338. }
  339. /* Instead of major changes to the data-structure, we have a some 'special'
  340. * identifiers for sources that changed meaning between adapters.
  341. */
  342. enum aeu_invert_reg_special_type {
  343. AEU_INVERT_REG_SPECIAL_CNIG_0,
  344. AEU_INVERT_REG_SPECIAL_CNIG_1,
  345. AEU_INVERT_REG_SPECIAL_CNIG_2,
  346. AEU_INVERT_REG_SPECIAL_CNIG_3,
  347. AEU_INVERT_REG_SPECIAL_MAX,
  348. };
  349. static struct aeu_invert_reg_bit
  350. aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
  351. {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
  352. {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
  353. {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
  354. {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
  355. };
  356. /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
  357. static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
  358. {
  359. { /* After Invert 1 */
  360. {"GPIO0 function%d",
  361. (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
  362. }
  363. },
  364. {
  365. { /* After Invert 2 */
  366. {"PGLUE config_space", ATTENTION_SINGLE,
  367. NULL, MAX_BLOCK_ID},
  368. {"PGLUE misc_flr", ATTENTION_SINGLE,
  369. NULL, MAX_BLOCK_ID},
  370. {"PGLUE B RBC", ATTENTION_PAR_INT,
  371. qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
  372. {"PGLUE misc_mctp", ATTENTION_SINGLE,
  373. NULL, MAX_BLOCK_ID},
  374. {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
  375. {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
  376. {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
  377. {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
  378. (1 << ATTENTION_OFFSET_SHIFT),
  379. NULL, MAX_BLOCK_ID},
  380. {"PCIE glue/PXP VPD %d",
  381. (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
  382. }
  383. },
  384. {
  385. { /* After Invert 3 */
  386. {"General Attention %d",
  387. (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
  388. }
  389. },
  390. {
  391. { /* After Invert 4 */
  392. {"General Attention 32", ATTENTION_SINGLE,
  393. NULL, MAX_BLOCK_ID},
  394. {"General Attention %d",
  395. (2 << ATTENTION_LENGTH_SHIFT) |
  396. (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
  397. {"General Attention 35", ATTENTION_SINGLE,
  398. NULL, MAX_BLOCK_ID},
  399. {"NWS Parity",
  400. ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
  401. ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
  402. NULL, BLOCK_NWS},
  403. {"NWS Interrupt",
  404. ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
  405. ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
  406. NULL, BLOCK_NWS},
  407. {"NWM Parity",
  408. ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
  409. ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
  410. NULL, BLOCK_NWM},
  411. {"NWM Interrupt",
  412. ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
  413. ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
  414. NULL, BLOCK_NWM},
  415. {"MCP CPU", ATTENTION_SINGLE,
  416. qed_mcp_attn_cb, MAX_BLOCK_ID},
  417. {"MCP Watchdog timer", ATTENTION_SINGLE,
  418. NULL, MAX_BLOCK_ID},
  419. {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
  420. {"AVS stop status ready", ATTENTION_SINGLE,
  421. NULL, MAX_BLOCK_ID},
  422. {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
  423. {"MSTAT per-path", ATTENTION_PAR_INT,
  424. NULL, MAX_BLOCK_ID},
  425. {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
  426. NULL, MAX_BLOCK_ID},
  427. {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
  428. {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
  429. {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB},
  430. {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB},
  431. {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS},
  432. }
  433. },
  434. {
  435. { /* After Invert 5 */
  436. {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
  437. {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
  438. {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
  439. {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
  440. {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
  441. {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
  442. {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
  443. {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM},
  444. {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
  445. {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
  446. {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
  447. {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
  448. {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
  449. {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
  450. {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
  451. {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
  452. }
  453. },
  454. {
  455. { /* After Invert 6 */
  456. {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
  457. {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
  458. {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
  459. {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM},
  460. {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
  461. {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
  462. {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM},
  463. {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
  464. {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
  465. {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
  466. {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
  467. {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
  468. {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
  469. {"DORQ", ATTENTION_PAR_INT,
  470. qed_dorq_attn_cb, BLOCK_DORQ},
  471. {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
  472. {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC},
  473. }
  474. },
  475. {
  476. { /* After Invert 7 */
  477. {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
  478. {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
  479. {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
  480. {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
  481. {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
  482. {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
  483. {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
  484. {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
  485. {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
  486. {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
  487. {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
  488. {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
  489. {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
  490. {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
  491. {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
  492. {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
  493. {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
  494. }
  495. },
  496. {
  497. { /* After Invert 8 */
  498. {"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
  499. NULL, BLOCK_PSWRQ2},
  500. {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
  501. {"PSWWR (pci_clk)", ATTENTION_PAR_INT,
  502. NULL, BLOCK_PSWWR2},
  503. {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
  504. {"PSWRD (pci_clk)", ATTENTION_PAR_INT,
  505. NULL, BLOCK_PSWRD2},
  506. {"PSWHST", ATTENTION_PAR_INT,
  507. qed_pswhst_attn_cb, BLOCK_PSWHST},
  508. {"PSWHST (pci_clk)", ATTENTION_PAR_INT,
  509. NULL, BLOCK_PSWHST2},
  510. {"GRC", ATTENTION_PAR_INT,
  511. qed_grc_attn_cb, BLOCK_GRC},
  512. {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
  513. {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
  514. {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
  515. {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
  516. {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
  517. {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
  518. {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
  519. {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
  520. {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
  521. {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
  522. NULL, BLOCK_PGLCS},
  523. {"PERST_B assertion", ATTENTION_SINGLE,
  524. NULL, MAX_BLOCK_ID},
  525. {"PERST_B deassertion", ATTENTION_SINGLE,
  526. NULL, MAX_BLOCK_ID},
  527. {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
  528. NULL, MAX_BLOCK_ID},
  529. }
  530. },
  531. {
  532. { /* After Invert 9 */
  533. {"MCP Latched memory", ATTENTION_PAR,
  534. NULL, MAX_BLOCK_ID},
  535. {"MCP Latched scratchpad cache", ATTENTION_SINGLE,
  536. NULL, MAX_BLOCK_ID},
  537. {"MCP Latched ump_tx", ATTENTION_PAR,
  538. NULL, MAX_BLOCK_ID},
  539. {"MCP Latched scratchpad", ATTENTION_PAR,
  540. NULL, MAX_BLOCK_ID},
  541. {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
  542. NULL, MAX_BLOCK_ID},
  543. }
  544. },
  545. };
  546. static struct aeu_invert_reg_bit *
  547. qed_int_aeu_translate(struct qed_hwfn *p_hwfn,
  548. struct aeu_invert_reg_bit *p_bit)
  549. {
  550. if (!QED_IS_BB(p_hwfn->cdev))
  551. return p_bit;
  552. if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
  553. return p_bit;
  554. return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
  555. ATTENTION_BB_SHIFT];
  556. }
  557. static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn,
  558. struct aeu_invert_reg_bit *p_bit)
  559. {
  560. return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags &
  561. ATTENTION_PARITY);
  562. }
  563. #define ATTN_STATE_BITS (0xfff)
  564. #define ATTN_BITS_MASKABLE (0x3ff)
  565. struct qed_sb_attn_info {
  566. /* Virtual & Physical address of the SB */
  567. struct atten_status_block *sb_attn;
  568. dma_addr_t sb_phys;
  569. /* Last seen running index */
  570. u16 index;
  571. /* A mask of the AEU bits resulting in a parity error */
  572. u32 parity_mask[NUM_ATTN_REGS];
  573. /* A pointer to the attention description structure */
  574. struct aeu_invert_reg *p_aeu_desc;
  575. /* Previously asserted attentions, which are still unasserted */
  576. u16 known_attn;
  577. /* Cleanup address for the link's general hw attention */
  578. u32 mfw_attn_addr;
  579. };
  580. static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
  581. struct qed_sb_attn_info *p_sb_desc)
  582. {
  583. u16 rc = 0, index;
  584. /* Make certain HW write took affect */
  585. mmiowb();
  586. index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
  587. if (p_sb_desc->index != index) {
  588. p_sb_desc->index = index;
  589. rc = QED_SB_ATT_IDX;
  590. }
  591. /* Make certain we got a consistent view with HW */
  592. mmiowb();
  593. return rc;
  594. }
  595. /**
  596. * @brief qed_int_assertion - handles asserted attention bits
  597. *
  598. * @param p_hwfn
  599. * @param asserted_bits newly asserted bits
  600. * @return int
  601. */
  602. static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
  603. {
  604. struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
  605. u32 igu_mask;
  606. /* Mask the source of the attention in the IGU */
  607. igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
  608. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
  609. igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
  610. igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
  611. qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
  612. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
  613. "inner known ATTN state: 0x%04x --> 0x%04x\n",
  614. sb_attn_sw->known_attn,
  615. sb_attn_sw->known_attn | asserted_bits);
  616. sb_attn_sw->known_attn |= asserted_bits;
  617. /* Handle MCP events */
  618. if (asserted_bits & 0x100) {
  619. qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
  620. /* Clean the MCP attention */
  621. qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
  622. sb_attn_sw->mfw_attn_addr, 0);
  623. }
  624. DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
  625. GTT_BAR0_MAP_REG_IGU_CMD +
  626. ((IGU_CMD_ATTN_BIT_SET_UPPER -
  627. IGU_CMD_INT_ACK_BASE) << 3),
  628. (u32)asserted_bits);
  629. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
  630. asserted_bits);
  631. return 0;
  632. }
  633. static void qed_int_attn_print(struct qed_hwfn *p_hwfn,
  634. enum block_id id,
  635. enum dbg_attn_type type, bool b_clear)
  636. {
  637. struct dbg_attn_block_result attn_results;
  638. enum dbg_status status;
  639. memset(&attn_results, 0, sizeof(attn_results));
  640. status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
  641. b_clear, &attn_results);
  642. if (status != DBG_STATUS_OK)
  643. DP_NOTICE(p_hwfn,
  644. "Failed to parse attention information [status: %s]\n",
  645. qed_dbg_get_status_str(status));
  646. else
  647. qed_dbg_parse_attn(p_hwfn, &attn_results);
  648. }
  649. /**
  650. * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
  651. * cause of the attention
  652. *
  653. * @param p_hwfn
  654. * @param p_aeu - descriptor of an AEU bit which caused the attention
  655. * @param aeu_en_reg - register offset of the AEU enable reg. which configured
  656. * this bit to this group.
  657. * @param bit_index - index of this bit in the aeu_en_reg
  658. *
  659. * @return int
  660. */
  661. static int
  662. qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
  663. struct aeu_invert_reg_bit *p_aeu,
  664. u32 aeu_en_reg,
  665. const char *p_bit_name, u32 bitmask)
  666. {
  667. bool b_fatal = false;
  668. int rc = -EINVAL;
  669. u32 val;
  670. DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
  671. p_bit_name, bitmask);
  672. /* Call callback before clearing the interrupt status */
  673. if (p_aeu->cb) {
  674. DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
  675. p_bit_name);
  676. rc = p_aeu->cb(p_hwfn);
  677. }
  678. if (rc)
  679. b_fatal = true;
  680. /* Print HW block interrupt registers */
  681. if (p_aeu->block_index != MAX_BLOCK_ID)
  682. qed_int_attn_print(p_hwfn, p_aeu->block_index,
  683. ATTN_TYPE_INTERRUPT, !b_fatal);
  684. /* If the attention is benign, no need to prevent it */
  685. if (!rc)
  686. goto out;
  687. /* Prevent this Attention from being asserted in the future */
  688. val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
  689. qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
  690. DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
  691. p_bit_name);
  692. out:
  693. return rc;
  694. }
  695. /**
  696. * @brief qed_int_deassertion_parity - handle a single parity AEU source
  697. *
  698. * @param p_hwfn
  699. * @param p_aeu - descriptor of an AEU bit which caused the parity
  700. * @param aeu_en_reg - address of the AEU enable register
  701. * @param bit_index
  702. */
  703. static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
  704. struct aeu_invert_reg_bit *p_aeu,
  705. u32 aeu_en_reg, u8 bit_index)
  706. {
  707. u32 block_id = p_aeu->block_index, mask, val;
  708. DP_NOTICE(p_hwfn->cdev,
  709. "%s parity attention is set [address 0x%08x, bit %d]\n",
  710. p_aeu->bit_name, aeu_en_reg, bit_index);
  711. if (block_id != MAX_BLOCK_ID) {
  712. qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
  713. /* In BB, there's a single parity bit for several blocks */
  714. if (block_id == BLOCK_BTB) {
  715. qed_int_attn_print(p_hwfn, BLOCK_OPTE,
  716. ATTN_TYPE_PARITY, false);
  717. qed_int_attn_print(p_hwfn, BLOCK_MCP,
  718. ATTN_TYPE_PARITY, false);
  719. }
  720. }
  721. /* Prevent this parity error from being re-asserted */
  722. mask = ~BIT(bit_index);
  723. val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
  724. qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
  725. DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
  726. p_aeu->bit_name);
  727. }
  728. /**
  729. * @brief - handles deassertion of previously asserted attentions.
  730. *
  731. * @param p_hwfn
  732. * @param deasserted_bits - newly deasserted bits
  733. * @return int
  734. *
  735. */
  736. static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
  737. u16 deasserted_bits)
  738. {
  739. struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
  740. u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
  741. u8 i, j, k, bit_idx;
  742. int rc = 0;
  743. /* Read the attention registers in the AEU */
  744. for (i = 0; i < NUM_ATTN_REGS; i++) {
  745. aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
  746. MISC_REG_AEU_AFTER_INVERT_1_IGU +
  747. i * 0x4);
  748. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
  749. "Deasserted bits [%d]: %08x\n",
  750. i, aeu_inv_arr[i]);
  751. }
  752. /* Find parity attentions first */
  753. for (i = 0; i < NUM_ATTN_REGS; i++) {
  754. struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
  755. u32 parities;
  756. aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
  757. en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
  758. /* Skip register in which no parity bit is currently set */
  759. parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
  760. if (!parities)
  761. continue;
  762. for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
  763. struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
  764. if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
  765. !!(parities & BIT(bit_idx)))
  766. qed_int_deassertion_parity(p_hwfn, p_bit,
  767. aeu_en, bit_idx);
  768. bit_idx += ATTENTION_LENGTH(p_bit->flags);
  769. }
  770. }
  771. /* Find non-parity cause for attention and act */
  772. for (k = 0; k < MAX_ATTN_GRPS; k++) {
  773. struct aeu_invert_reg_bit *p_aeu;
  774. /* Handle only groups whose attention is currently deasserted */
  775. if (!(deasserted_bits & (1 << k)))
  776. continue;
  777. for (i = 0; i < NUM_ATTN_REGS; i++) {
  778. u32 bits;
  779. aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
  780. i * sizeof(u32) +
  781. k * sizeof(u32) * NUM_ATTN_REGS;
  782. en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
  783. bits = aeu_inv_arr[i] & en;
  784. /* Skip if no bit from this group is currently set */
  785. if (!bits)
  786. continue;
  787. /* Find all set bits from current register which belong
  788. * to current group, making them responsible for the
  789. * previous assertion.
  790. */
  791. for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
  792. long unsigned int bitmask;
  793. u8 bit, bit_len;
  794. p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
  795. p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu);
  796. bit = bit_idx;
  797. bit_len = ATTENTION_LENGTH(p_aeu->flags);
  798. if (qed_int_is_parity_flag(p_hwfn, p_aeu)) {
  799. /* Skip Parity */
  800. bit++;
  801. bit_len--;
  802. }
  803. bitmask = bits & (((1 << bit_len) - 1) << bit);
  804. bitmask >>= bit;
  805. if (bitmask) {
  806. u32 flags = p_aeu->flags;
  807. char bit_name[30];
  808. u8 num;
  809. num = (u8)find_first_bit(&bitmask,
  810. bit_len);
  811. /* Some bits represent more than a
  812. * a single interrupt. Correctly print
  813. * their name.
  814. */
  815. if (ATTENTION_LENGTH(flags) > 2 ||
  816. ((flags & ATTENTION_PAR_INT) &&
  817. ATTENTION_LENGTH(flags) > 1))
  818. snprintf(bit_name, 30,
  819. p_aeu->bit_name, num);
  820. else
  821. strncpy(bit_name,
  822. p_aeu->bit_name, 30);
  823. /* We now need to pass bitmask in its
  824. * correct position.
  825. */
  826. bitmask <<= bit;
  827. /* Handle source of the attention */
  828. qed_int_deassertion_aeu_bit(p_hwfn,
  829. p_aeu,
  830. aeu_en,
  831. bit_name,
  832. bitmask);
  833. }
  834. bit_idx += ATTENTION_LENGTH(p_aeu->flags);
  835. }
  836. }
  837. }
  838. /* Clear IGU indication for the deasserted bits */
  839. DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
  840. GTT_BAR0_MAP_REG_IGU_CMD +
  841. ((IGU_CMD_ATTN_BIT_CLR_UPPER -
  842. IGU_CMD_INT_ACK_BASE) << 3),
  843. ~((u32)deasserted_bits));
  844. /* Unmask deasserted attentions in IGU */
  845. aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
  846. aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
  847. qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
  848. /* Clear deassertion from inner state */
  849. sb_attn_sw->known_attn &= ~deasserted_bits;
  850. return rc;
  851. }
  852. static int qed_int_attentions(struct qed_hwfn *p_hwfn)
  853. {
  854. struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
  855. struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
  856. u32 attn_bits = 0, attn_acks = 0;
  857. u16 asserted_bits, deasserted_bits;
  858. __le16 index;
  859. int rc = 0;
  860. /* Read current attention bits/acks - safeguard against attentions
  861. * by guaranting work on a synchronized timeframe
  862. */
  863. do {
  864. index = p_sb_attn->sb_index;
  865. attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
  866. attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
  867. } while (index != p_sb_attn->sb_index);
  868. p_sb_attn->sb_index = index;
  869. /* Attention / Deassertion are meaningful (and in correct state)
  870. * only when they differ and consistent with known state - deassertion
  871. * when previous attention & current ack, and assertion when current
  872. * attention with no previous attention
  873. */
  874. asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
  875. ~p_sb_attn_sw->known_attn;
  876. deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
  877. p_sb_attn_sw->known_attn;
  878. if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
  879. DP_INFO(p_hwfn,
  880. "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
  881. index, attn_bits, attn_acks, asserted_bits,
  882. deasserted_bits, p_sb_attn_sw->known_attn);
  883. } else if (asserted_bits == 0x100) {
  884. DP_INFO(p_hwfn, "MFW indication via attention\n");
  885. } else {
  886. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
  887. "MFW indication [deassertion]\n");
  888. }
  889. if (asserted_bits) {
  890. rc = qed_int_assertion(p_hwfn, asserted_bits);
  891. if (rc)
  892. return rc;
  893. }
  894. if (deasserted_bits)
  895. rc = qed_int_deassertion(p_hwfn, deasserted_bits);
  896. return rc;
  897. }
  898. static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
  899. void __iomem *igu_addr, u32 ack_cons)
  900. {
  901. struct igu_prod_cons_update igu_ack = { 0 };
  902. igu_ack.sb_id_and_flags =
  903. ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
  904. (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
  905. (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
  906. (IGU_SEG_ACCESS_ATTN <<
  907. IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
  908. DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
  909. /* Both segments (interrupts & acks) are written to same place address;
  910. * Need to guarantee all commands will be received (in-order) by HW.
  911. */
  912. mmiowb();
  913. barrier();
  914. }
  915. void qed_int_sp_dpc(unsigned long hwfn_cookie)
  916. {
  917. struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
  918. struct qed_pi_info *pi_info = NULL;
  919. struct qed_sb_attn_info *sb_attn;
  920. struct qed_sb_info *sb_info;
  921. int arr_size;
  922. u16 rc = 0;
  923. if (!p_hwfn->p_sp_sb) {
  924. DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
  925. return;
  926. }
  927. sb_info = &p_hwfn->p_sp_sb->sb_info;
  928. arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
  929. if (!sb_info) {
  930. DP_ERR(p_hwfn->cdev,
  931. "Status block is NULL - cannot ack interrupts\n");
  932. return;
  933. }
  934. if (!p_hwfn->p_sb_attn) {
  935. DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
  936. return;
  937. }
  938. sb_attn = p_hwfn->p_sb_attn;
  939. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
  940. p_hwfn, p_hwfn->my_id);
  941. /* Disable ack for def status block. Required both for msix +
  942. * inta in non-mask mode, in inta does no harm.
  943. */
  944. qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
  945. /* Gather Interrupts/Attentions information */
  946. if (!sb_info->sb_virt) {
  947. DP_ERR(p_hwfn->cdev,
  948. "Interrupt Status block is NULL - cannot check for new interrupts!\n");
  949. } else {
  950. u32 tmp_index = sb_info->sb_ack;
  951. rc = qed_sb_update_sb_idx(sb_info);
  952. DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
  953. "Interrupt indices: 0x%08x --> 0x%08x\n",
  954. tmp_index, sb_info->sb_ack);
  955. }
  956. if (!sb_attn || !sb_attn->sb_attn) {
  957. DP_ERR(p_hwfn->cdev,
  958. "Attentions Status block is NULL - cannot check for new attentions!\n");
  959. } else {
  960. u16 tmp_index = sb_attn->index;
  961. rc |= qed_attn_update_idx(p_hwfn, sb_attn);
  962. DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
  963. "Attention indices: 0x%08x --> 0x%08x\n",
  964. tmp_index, sb_attn->index);
  965. }
  966. /* Check if we expect interrupts at this time. if not just ack them */
  967. if (!(rc & QED_SB_EVENT_MASK)) {
  968. qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
  969. return;
  970. }
  971. /* Check the validity of the DPC ptt. If not ack interrupts and fail */
  972. if (!p_hwfn->p_dpc_ptt) {
  973. DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
  974. qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
  975. return;
  976. }
  977. if (rc & QED_SB_ATT_IDX)
  978. qed_int_attentions(p_hwfn);
  979. if (rc & QED_SB_IDX) {
  980. int pi;
  981. /* Look for a free index */
  982. for (pi = 0; pi < arr_size; pi++) {
  983. pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
  984. if (pi_info->comp_cb)
  985. pi_info->comp_cb(p_hwfn, pi_info->cookie);
  986. }
  987. }
  988. if (sb_attn && (rc & QED_SB_ATT_IDX))
  989. /* This should be done before the interrupts are enabled,
  990. * since otherwise a new attention will be generated.
  991. */
  992. qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
  993. qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
  994. }
  995. static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
  996. {
  997. struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
  998. if (!p_sb)
  999. return;
  1000. if (p_sb->sb_attn)
  1001. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  1002. SB_ATTN_ALIGNED_SIZE(p_hwfn),
  1003. p_sb->sb_attn, p_sb->sb_phys);
  1004. kfree(p_sb);
  1005. p_hwfn->p_sb_attn = NULL;
  1006. }
  1007. static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
  1008. struct qed_ptt *p_ptt)
  1009. {
  1010. struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
  1011. memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
  1012. sb_info->index = 0;
  1013. sb_info->known_attn = 0;
  1014. /* Configure Attention Status Block in IGU */
  1015. qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
  1016. lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
  1017. qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
  1018. upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
  1019. }
  1020. static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
  1021. struct qed_ptt *p_ptt,
  1022. void *sb_virt_addr, dma_addr_t sb_phy_addr)
  1023. {
  1024. struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
  1025. int i, j, k;
  1026. sb_info->sb_attn = sb_virt_addr;
  1027. sb_info->sb_phys = sb_phy_addr;
  1028. /* Set the pointer to the AEU descriptors */
  1029. sb_info->p_aeu_desc = aeu_descs;
  1030. /* Calculate Parity Masks */
  1031. memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
  1032. for (i = 0; i < NUM_ATTN_REGS; i++) {
  1033. /* j is array index, k is bit index */
  1034. for (j = 0, k = 0; k < 32; j++) {
  1035. struct aeu_invert_reg_bit *p_aeu;
  1036. p_aeu = &aeu_descs[i].bits[j];
  1037. if (qed_int_is_parity_flag(p_hwfn, p_aeu))
  1038. sb_info->parity_mask[i] |= 1 << k;
  1039. k += ATTENTION_LENGTH(p_aeu->flags);
  1040. }
  1041. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
  1042. "Attn Mask [Reg %d]: 0x%08x\n",
  1043. i, sb_info->parity_mask[i]);
  1044. }
  1045. /* Set the address of cleanup for the mcp attention */
  1046. sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
  1047. MISC_REG_AEU_GENERAL_ATTN_0;
  1048. qed_int_sb_attn_setup(p_hwfn, p_ptt);
  1049. }
  1050. static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
  1051. struct qed_ptt *p_ptt)
  1052. {
  1053. struct qed_dev *cdev = p_hwfn->cdev;
  1054. struct qed_sb_attn_info *p_sb;
  1055. dma_addr_t p_phys = 0;
  1056. void *p_virt;
  1057. /* SB struct */
  1058. p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
  1059. if (!p_sb)
  1060. return -ENOMEM;
  1061. /* SB ring */
  1062. p_virt = dma_alloc_coherent(&cdev->pdev->dev,
  1063. SB_ATTN_ALIGNED_SIZE(p_hwfn),
  1064. &p_phys, GFP_KERNEL);
  1065. if (!p_virt) {
  1066. kfree(p_sb);
  1067. return -ENOMEM;
  1068. }
  1069. /* Attention setup */
  1070. p_hwfn->p_sb_attn = p_sb;
  1071. qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
  1072. return 0;
  1073. }
  1074. /* coalescing timeout = timeset << (timer_res + 1) */
  1075. #define QED_CAU_DEF_RX_USECS 24
  1076. #define QED_CAU_DEF_TX_USECS 48
  1077. void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
  1078. struct cau_sb_entry *p_sb_entry,
  1079. u8 pf_id, u16 vf_number, u8 vf_valid)
  1080. {
  1081. struct qed_dev *cdev = p_hwfn->cdev;
  1082. u32 cau_state;
  1083. u8 timer_res;
  1084. memset(p_sb_entry, 0, sizeof(*p_sb_entry));
  1085. SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
  1086. SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
  1087. SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
  1088. SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
  1089. SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
  1090. cau_state = CAU_HC_DISABLE_STATE;
  1091. if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
  1092. cau_state = CAU_HC_ENABLE_STATE;
  1093. if (!cdev->rx_coalesce_usecs)
  1094. cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
  1095. if (!cdev->tx_coalesce_usecs)
  1096. cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
  1097. }
  1098. /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
  1099. if (cdev->rx_coalesce_usecs <= 0x7F)
  1100. timer_res = 0;
  1101. else if (cdev->rx_coalesce_usecs <= 0xFF)
  1102. timer_res = 1;
  1103. else
  1104. timer_res = 2;
  1105. SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
  1106. if (cdev->tx_coalesce_usecs <= 0x7F)
  1107. timer_res = 0;
  1108. else if (cdev->tx_coalesce_usecs <= 0xFF)
  1109. timer_res = 1;
  1110. else
  1111. timer_res = 2;
  1112. SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
  1113. SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
  1114. SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
  1115. }
  1116. static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
  1117. struct qed_ptt *p_ptt,
  1118. u16 igu_sb_id,
  1119. u32 pi_index,
  1120. enum qed_coalescing_fsm coalescing_fsm,
  1121. u8 timeset)
  1122. {
  1123. struct cau_pi_entry pi_entry;
  1124. u32 sb_offset, pi_offset;
  1125. if (IS_VF(p_hwfn->cdev))
  1126. return;
  1127. sb_offset = igu_sb_id * PIS_PER_SB_E4;
  1128. memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
  1129. SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
  1130. if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
  1131. SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
  1132. else
  1133. SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
  1134. pi_offset = sb_offset + pi_index;
  1135. if (p_hwfn->hw_init_done) {
  1136. qed_wr(p_hwfn, p_ptt,
  1137. CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
  1138. *((u32 *)&(pi_entry)));
  1139. } else {
  1140. STORE_RT_REG(p_hwfn,
  1141. CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
  1142. *((u32 *)&(pi_entry)));
  1143. }
  1144. }
  1145. void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
  1146. struct qed_ptt *p_ptt,
  1147. dma_addr_t sb_phys,
  1148. u16 igu_sb_id, u16 vf_number, u8 vf_valid)
  1149. {
  1150. struct cau_sb_entry sb_entry;
  1151. qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
  1152. vf_number, vf_valid);
  1153. if (p_hwfn->hw_init_done) {
  1154. /* Wide-bus, initialize via DMAE */
  1155. u64 phys_addr = (u64)sb_phys;
  1156. qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
  1157. CAU_REG_SB_ADDR_MEMORY +
  1158. igu_sb_id * sizeof(u64), 2, 0);
  1159. qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
  1160. CAU_REG_SB_VAR_MEMORY +
  1161. igu_sb_id * sizeof(u64), 2, 0);
  1162. } else {
  1163. /* Initialize Status Block Address */
  1164. STORE_RT_REG_AGG(p_hwfn,
  1165. CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
  1166. igu_sb_id * 2,
  1167. sb_phys);
  1168. STORE_RT_REG_AGG(p_hwfn,
  1169. CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
  1170. igu_sb_id * 2,
  1171. sb_entry);
  1172. }
  1173. /* Configure pi coalescing if set */
  1174. if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
  1175. u8 num_tc = p_hwfn->hw_info.num_hw_tc;
  1176. u8 timeset, timer_res;
  1177. u8 i;
  1178. /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
  1179. if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
  1180. timer_res = 0;
  1181. else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
  1182. timer_res = 1;
  1183. else
  1184. timer_res = 2;
  1185. timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
  1186. qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
  1187. QED_COAL_RX_STATE_MACHINE, timeset);
  1188. if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
  1189. timer_res = 0;
  1190. else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
  1191. timer_res = 1;
  1192. else
  1193. timer_res = 2;
  1194. timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
  1195. for (i = 0; i < num_tc; i++) {
  1196. qed_int_cau_conf_pi(p_hwfn, p_ptt,
  1197. igu_sb_id, TX_PI(i),
  1198. QED_COAL_TX_STATE_MACHINE,
  1199. timeset);
  1200. }
  1201. }
  1202. }
  1203. void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
  1204. struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
  1205. {
  1206. /* zero status block and ack counter */
  1207. sb_info->sb_ack = 0;
  1208. memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
  1209. if (IS_PF(p_hwfn->cdev))
  1210. qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
  1211. sb_info->igu_sb_id, 0, 0);
  1212. }
  1213. struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf)
  1214. {
  1215. struct qed_igu_block *p_block;
  1216. u16 igu_id;
  1217. for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
  1218. igu_id++) {
  1219. p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
  1220. if (!(p_block->status & QED_IGU_STATUS_VALID) ||
  1221. !(p_block->status & QED_IGU_STATUS_FREE))
  1222. continue;
  1223. if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf)
  1224. return p_block;
  1225. }
  1226. return NULL;
  1227. }
  1228. static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
  1229. {
  1230. struct qed_igu_block *p_block;
  1231. u16 igu_id;
  1232. for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
  1233. igu_id++) {
  1234. p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
  1235. if (!(p_block->status & QED_IGU_STATUS_VALID) ||
  1236. !p_block->is_pf ||
  1237. p_block->vector_number != vector_id)
  1238. continue;
  1239. return igu_id;
  1240. }
  1241. return QED_SB_INVALID_IDX;
  1242. }
  1243. u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
  1244. {
  1245. u16 igu_sb_id;
  1246. /* Assuming continuous set of IGU SBs dedicated for given PF */
  1247. if (sb_id == QED_SP_SB_ID)
  1248. igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
  1249. else if (IS_PF(p_hwfn->cdev))
  1250. igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
  1251. else
  1252. igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
  1253. if (sb_id == QED_SP_SB_ID)
  1254. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
  1255. "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
  1256. else
  1257. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
  1258. "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
  1259. return igu_sb_id;
  1260. }
  1261. int qed_int_sb_init(struct qed_hwfn *p_hwfn,
  1262. struct qed_ptt *p_ptt,
  1263. struct qed_sb_info *sb_info,
  1264. void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
  1265. {
  1266. sb_info->sb_virt = sb_virt_addr;
  1267. sb_info->sb_phys = sb_phy_addr;
  1268. sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
  1269. if (sb_id != QED_SP_SB_ID) {
  1270. if (IS_PF(p_hwfn->cdev)) {
  1271. struct qed_igu_info *p_info;
  1272. struct qed_igu_block *p_block;
  1273. p_info = p_hwfn->hw_info.p_igu_info;
  1274. p_block = &p_info->entry[sb_info->igu_sb_id];
  1275. p_block->sb_info = sb_info;
  1276. p_block->status &= ~QED_IGU_STATUS_FREE;
  1277. p_info->usage.free_cnt--;
  1278. } else {
  1279. qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
  1280. }
  1281. }
  1282. sb_info->cdev = p_hwfn->cdev;
  1283. /* The igu address will hold the absolute address that needs to be
  1284. * written to for a specific status block
  1285. */
  1286. if (IS_PF(p_hwfn->cdev)) {
  1287. sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
  1288. GTT_BAR0_MAP_REG_IGU_CMD +
  1289. (sb_info->igu_sb_id << 3);
  1290. } else {
  1291. sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
  1292. PXP_VF_BAR0_START_IGU +
  1293. ((IGU_CMD_INT_ACK_BASE +
  1294. sb_info->igu_sb_id) << 3);
  1295. }
  1296. sb_info->flags |= QED_SB_INFO_INIT;
  1297. qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
  1298. return 0;
  1299. }
  1300. int qed_int_sb_release(struct qed_hwfn *p_hwfn,
  1301. struct qed_sb_info *sb_info, u16 sb_id)
  1302. {
  1303. struct qed_igu_block *p_block;
  1304. struct qed_igu_info *p_info;
  1305. if (!sb_info)
  1306. return 0;
  1307. /* zero status block and ack counter */
  1308. sb_info->sb_ack = 0;
  1309. memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
  1310. if (IS_VF(p_hwfn->cdev)) {
  1311. qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
  1312. return 0;
  1313. }
  1314. p_info = p_hwfn->hw_info.p_igu_info;
  1315. p_block = &p_info->entry[sb_info->igu_sb_id];
  1316. /* Vector 0 is reserved to Default SB */
  1317. if (!p_block->vector_number) {
  1318. DP_ERR(p_hwfn, "Do Not free sp sb using this function");
  1319. return -EINVAL;
  1320. }
  1321. /* Lose reference to client's SB info, and fix counters */
  1322. p_block->sb_info = NULL;
  1323. p_block->status |= QED_IGU_STATUS_FREE;
  1324. p_info->usage.free_cnt++;
  1325. return 0;
  1326. }
  1327. static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
  1328. {
  1329. struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
  1330. if (!p_sb)
  1331. return;
  1332. if (p_sb->sb_info.sb_virt)
  1333. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  1334. SB_ALIGNED_SIZE(p_hwfn),
  1335. p_sb->sb_info.sb_virt,
  1336. p_sb->sb_info.sb_phys);
  1337. kfree(p_sb);
  1338. p_hwfn->p_sp_sb = NULL;
  1339. }
  1340. static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1341. {
  1342. struct qed_sb_sp_info *p_sb;
  1343. dma_addr_t p_phys = 0;
  1344. void *p_virt;
  1345. /* SB struct */
  1346. p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
  1347. if (!p_sb)
  1348. return -ENOMEM;
  1349. /* SB ring */
  1350. p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  1351. SB_ALIGNED_SIZE(p_hwfn),
  1352. &p_phys, GFP_KERNEL);
  1353. if (!p_virt) {
  1354. kfree(p_sb);
  1355. return -ENOMEM;
  1356. }
  1357. /* Status Block setup */
  1358. p_hwfn->p_sp_sb = p_sb;
  1359. qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
  1360. p_phys, QED_SP_SB_ID);
  1361. memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
  1362. return 0;
  1363. }
  1364. int qed_int_register_cb(struct qed_hwfn *p_hwfn,
  1365. qed_int_comp_cb_t comp_cb,
  1366. void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
  1367. {
  1368. struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
  1369. int rc = -ENOMEM;
  1370. u8 pi;
  1371. /* Look for a free index */
  1372. for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
  1373. if (p_sp_sb->pi_info_arr[pi].comp_cb)
  1374. continue;
  1375. p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
  1376. p_sp_sb->pi_info_arr[pi].cookie = cookie;
  1377. *sb_idx = pi;
  1378. *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
  1379. rc = 0;
  1380. break;
  1381. }
  1382. return rc;
  1383. }
  1384. int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
  1385. {
  1386. struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
  1387. if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
  1388. return -ENOMEM;
  1389. p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
  1390. p_sp_sb->pi_info_arr[pi].cookie = NULL;
  1391. return 0;
  1392. }
  1393. u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
  1394. {
  1395. return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
  1396. }
  1397. void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
  1398. struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
  1399. {
  1400. u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
  1401. p_hwfn->cdev->int_mode = int_mode;
  1402. switch (p_hwfn->cdev->int_mode) {
  1403. case QED_INT_MODE_INTA:
  1404. igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
  1405. igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
  1406. break;
  1407. case QED_INT_MODE_MSI:
  1408. igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
  1409. igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
  1410. break;
  1411. case QED_INT_MODE_MSIX:
  1412. igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
  1413. break;
  1414. case QED_INT_MODE_POLL:
  1415. break;
  1416. }
  1417. qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
  1418. }
  1419. static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
  1420. struct qed_ptt *p_ptt)
  1421. {
  1422. /* Configure AEU signal change to produce attentions */
  1423. qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
  1424. qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
  1425. qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
  1426. qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
  1427. /* Flush the writes to IGU */
  1428. mmiowb();
  1429. /* Unmask AEU signals toward IGU */
  1430. qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
  1431. }
  1432. int
  1433. qed_int_igu_enable(struct qed_hwfn *p_hwfn,
  1434. struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
  1435. {
  1436. int rc = 0;
  1437. qed_int_igu_enable_attn(p_hwfn, p_ptt);
  1438. if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
  1439. rc = qed_slowpath_irq_req(p_hwfn);
  1440. if (rc) {
  1441. DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
  1442. return -EINVAL;
  1443. }
  1444. p_hwfn->b_int_requested = true;
  1445. }
  1446. /* Enable interrupt Generation */
  1447. qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
  1448. p_hwfn->b_int_enabled = 1;
  1449. return rc;
  1450. }
  1451. void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1452. {
  1453. p_hwfn->b_int_enabled = 0;
  1454. if (IS_VF(p_hwfn->cdev))
  1455. return;
  1456. qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
  1457. }
  1458. #define IGU_CLEANUP_SLEEP_LENGTH (1000)
  1459. static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
  1460. struct qed_ptt *p_ptt,
  1461. u16 igu_sb_id,
  1462. bool cleanup_set, u16 opaque_fid)
  1463. {
  1464. u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
  1465. u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
  1466. u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
  1467. /* Set the data field */
  1468. SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
  1469. SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
  1470. SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
  1471. /* Set the control register */
  1472. SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
  1473. SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
  1474. SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
  1475. qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
  1476. barrier();
  1477. qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
  1478. /* Flush the write to IGU */
  1479. mmiowb();
  1480. /* calculate where to read the status bit from */
  1481. sb_bit = 1 << (igu_sb_id % 32);
  1482. sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
  1483. sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
  1484. /* Now wait for the command to complete */
  1485. do {
  1486. val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
  1487. if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
  1488. break;
  1489. usleep_range(5000, 10000);
  1490. } while (--sleep_cnt);
  1491. if (!sleep_cnt)
  1492. DP_NOTICE(p_hwfn,
  1493. "Timeout waiting for clear status 0x%08x [for sb %d]\n",
  1494. val, igu_sb_id);
  1495. }
  1496. void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
  1497. struct qed_ptt *p_ptt,
  1498. u16 igu_sb_id, u16 opaque, bool b_set)
  1499. {
  1500. struct qed_igu_block *p_block;
  1501. int pi, i;
  1502. p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
  1503. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
  1504. "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
  1505. igu_sb_id,
  1506. p_block->function_id,
  1507. p_block->is_pf, p_block->vector_number);
  1508. /* Set */
  1509. if (b_set)
  1510. qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
  1511. /* Clear */
  1512. qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
  1513. /* Wait for the IGU SB to cleanup */
  1514. for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
  1515. u32 val;
  1516. val = qed_rd(p_hwfn, p_ptt,
  1517. IGU_REG_WRITE_DONE_PENDING +
  1518. ((igu_sb_id / 32) * 4));
  1519. if (val & BIT((igu_sb_id % 32)))
  1520. usleep_range(10, 20);
  1521. else
  1522. break;
  1523. }
  1524. if (i == IGU_CLEANUP_SLEEP_LENGTH)
  1525. DP_NOTICE(p_hwfn,
  1526. "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
  1527. igu_sb_id);
  1528. /* Clear the CAU for the SB */
  1529. for (pi = 0; pi < 12; pi++)
  1530. qed_wr(p_hwfn, p_ptt,
  1531. CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
  1532. }
  1533. void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
  1534. struct qed_ptt *p_ptt,
  1535. bool b_set, bool b_slowpath)
  1536. {
  1537. struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
  1538. struct qed_igu_block *p_block;
  1539. u16 igu_sb_id = 0;
  1540. u32 val = 0;
  1541. val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
  1542. val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
  1543. val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
  1544. qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
  1545. for (igu_sb_id = 0;
  1546. igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
  1547. p_block = &p_info->entry[igu_sb_id];
  1548. if (!(p_block->status & QED_IGU_STATUS_VALID) ||
  1549. !p_block->is_pf ||
  1550. (p_block->status & QED_IGU_STATUS_DSB))
  1551. continue;
  1552. qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
  1553. p_hwfn->hw_info.opaque_fid,
  1554. b_set);
  1555. }
  1556. if (b_slowpath)
  1557. qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
  1558. p_info->igu_dsb_id,
  1559. p_hwfn->hw_info.opaque_fid,
  1560. b_set);
  1561. }
  1562. int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1563. {
  1564. struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
  1565. struct qed_igu_block *p_block;
  1566. int pf_sbs, vf_sbs;
  1567. u16 igu_sb_id;
  1568. u32 val, rval;
  1569. if (!RESC_NUM(p_hwfn, QED_SB)) {
  1570. p_info->b_allow_pf_vf_change = false;
  1571. } else {
  1572. /* Use the numbers the MFW have provided -
  1573. * don't forget MFW accounts for the default SB as well.
  1574. */
  1575. p_info->b_allow_pf_vf_change = true;
  1576. if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
  1577. DP_INFO(p_hwfn,
  1578. "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
  1579. RESC_NUM(p_hwfn, QED_SB) - 1,
  1580. p_info->usage.cnt);
  1581. p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
  1582. }
  1583. if (IS_PF_SRIOV(p_hwfn)) {
  1584. u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
  1585. if (vfs != p_info->usage.iov_cnt)
  1586. DP_VERBOSE(p_hwfn,
  1587. NETIF_MSG_INTR,
  1588. "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
  1589. p_info->usage.iov_cnt, vfs);
  1590. /* At this point we know how many SBs we have totally
  1591. * in IGU + number of PF SBs. So we can validate that
  1592. * we'd have sufficient for VF.
  1593. */
  1594. if (vfs > p_info->usage.free_cnt +
  1595. p_info->usage.free_cnt_iov - p_info->usage.cnt) {
  1596. DP_NOTICE(p_hwfn,
  1597. "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
  1598. p_info->usage.free_cnt +
  1599. p_info->usage.free_cnt_iov,
  1600. p_info->usage.cnt, vfs);
  1601. return -EINVAL;
  1602. }
  1603. /* Currently cap the number of VFs SBs by the
  1604. * number of VFs.
  1605. */
  1606. p_info->usage.iov_cnt = vfs;
  1607. }
  1608. }
  1609. /* Mark all SBs as free, now in the right PF/VFs division */
  1610. p_info->usage.free_cnt = p_info->usage.cnt;
  1611. p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
  1612. p_info->usage.orig = p_info->usage.cnt;
  1613. p_info->usage.iov_orig = p_info->usage.iov_cnt;
  1614. /* We now proceed to re-configure the IGU cam to reflect the initial
  1615. * configuration. We can start with the Default SB.
  1616. */
  1617. pf_sbs = p_info->usage.cnt;
  1618. vf_sbs = p_info->usage.iov_cnt;
  1619. for (igu_sb_id = p_info->igu_dsb_id;
  1620. igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
  1621. p_block = &p_info->entry[igu_sb_id];
  1622. val = 0;
  1623. if (!(p_block->status & QED_IGU_STATUS_VALID))
  1624. continue;
  1625. if (p_block->status & QED_IGU_STATUS_DSB) {
  1626. p_block->function_id = p_hwfn->rel_pf_id;
  1627. p_block->is_pf = 1;
  1628. p_block->vector_number = 0;
  1629. p_block->status = QED_IGU_STATUS_VALID |
  1630. QED_IGU_STATUS_PF |
  1631. QED_IGU_STATUS_DSB;
  1632. } else if (pf_sbs) {
  1633. pf_sbs--;
  1634. p_block->function_id = p_hwfn->rel_pf_id;
  1635. p_block->is_pf = 1;
  1636. p_block->vector_number = p_info->usage.cnt - pf_sbs;
  1637. p_block->status = QED_IGU_STATUS_VALID |
  1638. QED_IGU_STATUS_PF |
  1639. QED_IGU_STATUS_FREE;
  1640. } else if (vf_sbs) {
  1641. p_block->function_id =
  1642. p_hwfn->cdev->p_iov_info->first_vf_in_pf +
  1643. p_info->usage.iov_cnt - vf_sbs;
  1644. p_block->is_pf = 0;
  1645. p_block->vector_number = 0;
  1646. p_block->status = QED_IGU_STATUS_VALID |
  1647. QED_IGU_STATUS_FREE;
  1648. vf_sbs--;
  1649. } else {
  1650. p_block->function_id = 0;
  1651. p_block->is_pf = 0;
  1652. p_block->vector_number = 0;
  1653. }
  1654. SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
  1655. p_block->function_id);
  1656. SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
  1657. SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
  1658. p_block->vector_number);
  1659. /* VF entries would be enabled when VF is initializaed */
  1660. SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
  1661. rval = qed_rd(p_hwfn, p_ptt,
  1662. IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
  1663. if (rval != val) {
  1664. qed_wr(p_hwfn, p_ptt,
  1665. IGU_REG_MAPPING_MEMORY +
  1666. sizeof(u32) * igu_sb_id, val);
  1667. DP_VERBOSE(p_hwfn,
  1668. NETIF_MSG_INTR,
  1669. "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
  1670. igu_sb_id,
  1671. p_block->function_id,
  1672. p_block->is_pf,
  1673. p_block->vector_number, rval, val);
  1674. }
  1675. }
  1676. return 0;
  1677. }
  1678. static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
  1679. struct qed_ptt *p_ptt, u16 igu_sb_id)
  1680. {
  1681. u32 val = qed_rd(p_hwfn, p_ptt,
  1682. IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
  1683. struct qed_igu_block *p_block;
  1684. p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
  1685. /* Fill the block information */
  1686. p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
  1687. p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
  1688. p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
  1689. p_block->igu_sb_id = igu_sb_id;
  1690. }
  1691. int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1692. {
  1693. struct qed_igu_info *p_igu_info;
  1694. struct qed_igu_block *p_block;
  1695. u32 min_vf = 0, max_vf = 0;
  1696. u16 igu_sb_id;
  1697. p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
  1698. if (!p_hwfn->hw_info.p_igu_info)
  1699. return -ENOMEM;
  1700. p_igu_info = p_hwfn->hw_info.p_igu_info;
  1701. /* Distinguish between existent and non-existent default SB */
  1702. p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX;
  1703. /* Find the range of VF ids whose SB belong to this PF */
  1704. if (p_hwfn->cdev->p_iov_info) {
  1705. struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
  1706. min_vf = p_iov->first_vf_in_pf;
  1707. max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
  1708. }
  1709. for (igu_sb_id = 0;
  1710. igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
  1711. /* Read current entry; Notice it might not belong to this PF */
  1712. qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
  1713. p_block = &p_igu_info->entry[igu_sb_id];
  1714. if ((p_block->is_pf) &&
  1715. (p_block->function_id == p_hwfn->rel_pf_id)) {
  1716. p_block->status = QED_IGU_STATUS_PF |
  1717. QED_IGU_STATUS_VALID |
  1718. QED_IGU_STATUS_FREE;
  1719. if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
  1720. p_igu_info->usage.cnt++;
  1721. } else if (!(p_block->is_pf) &&
  1722. (p_block->function_id >= min_vf) &&
  1723. (p_block->function_id < max_vf)) {
  1724. /* Available for VFs of this PF */
  1725. p_block->status = QED_IGU_STATUS_VALID |
  1726. QED_IGU_STATUS_FREE;
  1727. if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
  1728. p_igu_info->usage.iov_cnt++;
  1729. }
  1730. /* Mark the First entry belonging to the PF or its VFs
  1731. * as the default SB [we'll reset IGU prior to first usage].
  1732. */
  1733. if ((p_block->status & QED_IGU_STATUS_VALID) &&
  1734. (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
  1735. p_igu_info->igu_dsb_id = igu_sb_id;
  1736. p_block->status |= QED_IGU_STATUS_DSB;
  1737. }
  1738. /* limit number of prints by having each PF print only its
  1739. * entries with the exception of PF0 which would print
  1740. * everything.
  1741. */
  1742. if ((p_block->status & QED_IGU_STATUS_VALID) ||
  1743. (p_hwfn->abs_pf_id == 0)) {
  1744. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
  1745. "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
  1746. igu_sb_id, p_block->function_id,
  1747. p_block->is_pf, p_block->vector_number);
  1748. }
  1749. }
  1750. if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) {
  1751. DP_NOTICE(p_hwfn,
  1752. "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
  1753. p_igu_info->igu_dsb_id);
  1754. return -EINVAL;
  1755. }
  1756. /* All non default SB are considered free at this point */
  1757. p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
  1758. p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
  1759. DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
  1760. "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
  1761. p_igu_info->igu_dsb_id,
  1762. p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
  1763. return 0;
  1764. }
  1765. /**
  1766. * @brief Initialize igu runtime registers
  1767. *
  1768. * @param p_hwfn
  1769. */
  1770. void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
  1771. {
  1772. u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
  1773. STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
  1774. }
  1775. u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
  1776. {
  1777. u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
  1778. IGU_CMD_INT_ACK_BASE;
  1779. u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
  1780. IGU_CMD_INT_ACK_BASE;
  1781. u32 intr_status_hi = 0, intr_status_lo = 0;
  1782. u64 intr_status = 0;
  1783. intr_status_lo = REG_RD(p_hwfn,
  1784. GTT_BAR0_MAP_REG_IGU_CMD +
  1785. lsb_igu_cmd_addr * 8);
  1786. intr_status_hi = REG_RD(p_hwfn,
  1787. GTT_BAR0_MAP_REG_IGU_CMD +
  1788. msb_igu_cmd_addr * 8);
  1789. intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
  1790. return intr_status;
  1791. }
  1792. static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
  1793. {
  1794. tasklet_init(p_hwfn->sp_dpc,
  1795. qed_int_sp_dpc, (unsigned long)p_hwfn);
  1796. p_hwfn->b_sp_dpc_enabled = true;
  1797. }
  1798. static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
  1799. {
  1800. p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
  1801. if (!p_hwfn->sp_dpc)
  1802. return -ENOMEM;
  1803. return 0;
  1804. }
  1805. static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
  1806. {
  1807. kfree(p_hwfn->sp_dpc);
  1808. p_hwfn->sp_dpc = NULL;
  1809. }
  1810. int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1811. {
  1812. int rc = 0;
  1813. rc = qed_int_sp_dpc_alloc(p_hwfn);
  1814. if (rc)
  1815. return rc;
  1816. rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
  1817. if (rc)
  1818. return rc;
  1819. rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
  1820. return rc;
  1821. }
  1822. void qed_int_free(struct qed_hwfn *p_hwfn)
  1823. {
  1824. qed_int_sp_sb_free(p_hwfn);
  1825. qed_int_sb_attn_free(p_hwfn);
  1826. qed_int_sp_dpc_free(p_hwfn);
  1827. }
  1828. void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  1829. {
  1830. qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
  1831. qed_int_sb_attn_setup(p_hwfn, p_ptt);
  1832. qed_int_sp_dpc_setup(p_hwfn);
  1833. }
  1834. void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
  1835. struct qed_sb_cnt_info *p_sb_cnt_info)
  1836. {
  1837. struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
  1838. if (!info || !p_sb_cnt_info)
  1839. return;
  1840. memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
  1841. }
  1842. void qed_int_disable_post_isr_release(struct qed_dev *cdev)
  1843. {
  1844. int i;
  1845. for_each_hwfn(cdev, i)
  1846. cdev->hwfns[i].b_int_requested = false;
  1847. }
  1848. int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
  1849. u8 timer_res, u16 sb_id, bool tx)
  1850. {
  1851. struct cau_sb_entry sb_entry;
  1852. int rc;
  1853. if (!p_hwfn->hw_init_done) {
  1854. DP_ERR(p_hwfn, "hardware not initialized yet\n");
  1855. return -EINVAL;
  1856. }
  1857. rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
  1858. sb_id * sizeof(u64),
  1859. (u64)(uintptr_t)&sb_entry, 2, 0);
  1860. if (rc) {
  1861. DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
  1862. return rc;
  1863. }
  1864. if (tx)
  1865. SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
  1866. else
  1867. SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
  1868. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  1869. (u64)(uintptr_t)&sb_entry,
  1870. CAU_REG_SB_VAR_MEMORY +
  1871. sb_id * sizeof(u64), 2, 0);
  1872. if (rc) {
  1873. DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
  1874. return rc;
  1875. }
  1876. return rc;
  1877. }