fm10k_pf.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Intel(R) Ethernet Switch Host Interface Driver
  3. * Copyright(c) 2013 - 2018 Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * The full GNU General Public License is included in this distribution in
  15. * the file called "COPYING".
  16. *
  17. * Contact Information:
  18. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. */
  21. #include "fm10k_pf.h"
  22. #include "fm10k_vf.h"
  23. /**
  24. * fm10k_reset_hw_pf - PF hardware reset
  25. * @hw: pointer to hardware structure
  26. *
  27. * This function should return the hardware to a state similar to the
  28. * one it is in after being powered on.
  29. **/
  30. static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
  31. {
  32. s32 err;
  33. u32 reg;
  34. u16 i;
  35. /* Disable interrupts */
  36. fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
  37. /* Lock ITR2 reg 0 into itself and disable interrupt moderation */
  38. fm10k_write_reg(hw, FM10K_ITR2(0), 0);
  39. fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
  40. /* We assume here Tx and Rx queue 0 are owned by the PF */
  41. /* Shut off VF access to their queues forcing them to queue 0 */
  42. for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
  43. fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
  44. fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
  45. }
  46. /* shut down all rings */
  47. err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
  48. if (err == FM10K_ERR_REQUESTS_PENDING) {
  49. hw->mac.reset_while_pending++;
  50. goto force_reset;
  51. } else if (err) {
  52. return err;
  53. }
  54. /* Verify that DMA is no longer active */
  55. reg = fm10k_read_reg(hw, FM10K_DMA_CTRL);
  56. if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
  57. return FM10K_ERR_DMA_PENDING;
  58. force_reset:
  59. /* Inititate data path reset */
  60. reg = FM10K_DMA_CTRL_DATAPATH_RESET;
  61. fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
  62. /* Flush write and allow 100us for reset to complete */
  63. fm10k_write_flush(hw);
  64. udelay(FM10K_RESET_TIMEOUT);
  65. /* Verify we made it out of reset */
  66. reg = fm10k_read_reg(hw, FM10K_IP);
  67. if (!(reg & FM10K_IP_NOTINRESET))
  68. return FM10K_ERR_RESET_FAILED;
  69. return 0;
  70. }
  71. /**
  72. * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
  73. * @hw: pointer to hardware structure
  74. *
  75. * Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
  76. **/
  77. static bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
  78. {
  79. u16 sriov_ctrl = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_SRIOV_CTRL);
  80. return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
  81. }
  82. /**
  83. * fm10k_init_hw_pf - PF hardware initialization
  84. * @hw: pointer to hardware structure
  85. *
  86. **/
  87. static s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
  88. {
  89. u32 dma_ctrl, txqctl;
  90. u16 i;
  91. /* Establish default VSI as valid */
  92. fm10k_write_reg(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
  93. fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
  94. FM10K_DGLORTMAP_ANY);
  95. /* Invalidate all other GLORT entries */
  96. for (i = 1; i < FM10K_DGLORT_COUNT; i++)
  97. fm10k_write_reg(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
  98. /* reset ITR2(0) to point to itself */
  99. fm10k_write_reg(hw, FM10K_ITR2(0), 0);
  100. /* reset VF ITR2(0) to point to 0 avoid PF registers */
  101. fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
  102. /* loop through all PF ITR2 registers pointing them to the previous */
  103. for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
  104. fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
  105. /* Enable interrupt moderator if not already enabled */
  106. fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
  107. /* compute the default txqctl configuration */
  108. txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
  109. (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
  110. for (i = 0; i < FM10K_MAX_QUEUES; i++) {
  111. /* configure rings for 256 Queue / 32 Descriptor cache mode */
  112. fm10k_write_reg(hw, FM10K_TQDLOC(i),
  113. (i * FM10K_TQDLOC_BASE_32_DESC) |
  114. FM10K_TQDLOC_SIZE_32_DESC);
  115. fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
  116. /* configure rings to provide TPH processing hints */
  117. fm10k_write_reg(hw, FM10K_TPH_TXCTRL(i),
  118. FM10K_TPH_TXCTRL_DESC_TPHEN |
  119. FM10K_TPH_TXCTRL_DESC_RROEN |
  120. FM10K_TPH_TXCTRL_DESC_WROEN |
  121. FM10K_TPH_TXCTRL_DATA_RROEN);
  122. fm10k_write_reg(hw, FM10K_TPH_RXCTRL(i),
  123. FM10K_TPH_RXCTRL_DESC_TPHEN |
  124. FM10K_TPH_RXCTRL_DESC_RROEN |
  125. FM10K_TPH_RXCTRL_DATA_WROEN |
  126. FM10K_TPH_RXCTRL_HDR_WROEN);
  127. }
  128. /* set max hold interval to align with 1.024 usec in all modes and
  129. * store ITR scale
  130. */
  131. switch (hw->bus.speed) {
  132. case fm10k_bus_speed_2500:
  133. dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
  134. hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1;
  135. break;
  136. case fm10k_bus_speed_5000:
  137. dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
  138. hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2;
  139. break;
  140. case fm10k_bus_speed_8000:
  141. dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
  142. hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
  143. break;
  144. default:
  145. dma_ctrl = 0;
  146. /* just in case, assume Gen3 ITR scale */
  147. hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
  148. break;
  149. }
  150. /* Configure TSO flags */
  151. fm10k_write_reg(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
  152. fm10k_write_reg(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
  153. /* Enable DMA engine
  154. * Set Rx Descriptor size to 32
  155. * Set Minimum MSS to 64
  156. * Set Maximum number of Rx queues to 256 / 32 Descriptor
  157. */
  158. dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
  159. FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
  160. FM10K_DMA_CTRL_32_DESC;
  161. fm10k_write_reg(hw, FM10K_DMA_CTRL, dma_ctrl);
  162. /* record maximum queue count, we limit ourselves to 128 */
  163. hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
  164. /* We support either 64 VFs or 7 VFs depending on if we have ARI */
  165. hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
  166. return 0;
  167. }
  168. /**
  169. * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
  170. * @hw: pointer to hardware structure
  171. * @vid: VLAN ID to add to table
  172. * @vsi: Index indicating VF ID or PF ID in table
  173. * @set: Indicates if this is a set or clear operation
  174. *
  175. * This function adds or removes the corresponding VLAN ID from the VLAN
  176. * filter table for the corresponding function. In addition to the
  177. * standard set/clear that supports one bit a multi-bit write is
  178. * supported to set 64 bits at a time.
  179. **/
  180. static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
  181. {
  182. u32 vlan_table, reg, mask, bit, len;
  183. /* verify the VSI index is valid */
  184. if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
  185. return FM10K_ERR_PARAM;
  186. /* VLAN multi-bit write:
  187. * The multi-bit write has several parts to it.
  188. * 24 16 8 0
  189. * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
  190. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  191. * | RSVD0 | Length |C|RSVD0| VLAN ID |
  192. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  193. *
  194. * VLAN ID: Vlan Starting value
  195. * RSVD0: Reserved section, must be 0
  196. * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
  197. * Length: Number of times to repeat the bit being set
  198. */
  199. len = vid >> 16;
  200. vid = (vid << 17) >> 17;
  201. /* verify the reserved 0 fields are 0 */
  202. if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
  203. return FM10K_ERR_PARAM;
  204. /* Loop through the table updating all required VLANs */
  205. for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
  206. len < FM10K_VLAN_TABLE_VID_MAX;
  207. len -= 32 - bit, reg++, bit = 0) {
  208. /* record the initial state of the register */
  209. vlan_table = fm10k_read_reg(hw, reg);
  210. /* truncate mask if we are at the start or end of the run */
  211. mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
  212. /* make necessary modifications to the register */
  213. mask &= set ? ~vlan_table : vlan_table;
  214. if (mask)
  215. fm10k_write_reg(hw, reg, vlan_table ^ mask);
  216. }
  217. return 0;
  218. }
  219. /**
  220. * fm10k_read_mac_addr_pf - Read device MAC address
  221. * @hw: pointer to the HW structure
  222. *
  223. * Reads the device MAC address from the SM_AREA and stores the value.
  224. **/
  225. static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
  226. {
  227. u8 perm_addr[ETH_ALEN];
  228. u32 serial_num;
  229. serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1));
  230. /* last byte should be all 1's */
  231. if ((~serial_num) << 24)
  232. return FM10K_ERR_INVALID_MAC_ADDR;
  233. perm_addr[0] = (u8)(serial_num >> 24);
  234. perm_addr[1] = (u8)(serial_num >> 16);
  235. perm_addr[2] = (u8)(serial_num >> 8);
  236. serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(0));
  237. /* first byte should be all 1's */
  238. if ((~serial_num) >> 24)
  239. return FM10K_ERR_INVALID_MAC_ADDR;
  240. perm_addr[3] = (u8)(serial_num >> 16);
  241. perm_addr[4] = (u8)(serial_num >> 8);
  242. perm_addr[5] = (u8)(serial_num);
  243. ether_addr_copy(hw->mac.perm_addr, perm_addr);
  244. ether_addr_copy(hw->mac.addr, perm_addr);
  245. return 0;
  246. }
  247. /**
  248. * fm10k_glort_valid_pf - Validate that the provided glort is valid
  249. * @hw: pointer to the HW structure
  250. * @glort: base glort to be validated
  251. *
  252. * This function will return an error if the provided glort is invalid
  253. **/
  254. bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
  255. {
  256. glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
  257. return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
  258. }
  259. /**
  260. * fm10k_update_xc_addr_pf - Update device addresses
  261. * @hw: pointer to the HW structure
  262. * @glort: base resource tag for this request
  263. * @mac: MAC address to add/remove from table
  264. * @vid: VLAN ID to add/remove from table
  265. * @add: Indicates if this is an add or remove operation
  266. * @flags: flags field to indicate add and secure
  267. *
  268. * This function generates a message to the Switch API requesting
  269. * that the given logical port add/remove the given L2 MAC/VLAN address.
  270. **/
  271. static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
  272. const u8 *mac, u16 vid, bool add, u8 flags)
  273. {
  274. struct fm10k_mbx_info *mbx = &hw->mbx;
  275. struct fm10k_mac_update mac_update;
  276. u32 msg[5];
  277. /* clear set bit from VLAN ID */
  278. vid &= ~FM10K_VLAN_CLEAR;
  279. /* if glort or VLAN are not valid return error */
  280. if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
  281. return FM10K_ERR_PARAM;
  282. /* record fields */
  283. mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
  284. ((u32)mac[3] << 16) |
  285. ((u32)mac[4] << 8) |
  286. ((u32)mac[5]));
  287. mac_update.mac_upper = cpu_to_le16(((u16)mac[0] << 8) |
  288. ((u16)mac[1]));
  289. mac_update.vlan = cpu_to_le16(vid);
  290. mac_update.glort = cpu_to_le16(glort);
  291. mac_update.action = add ? 0 : 1;
  292. mac_update.flags = flags;
  293. /* populate mac_update fields */
  294. fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
  295. fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
  296. &mac_update, sizeof(mac_update));
  297. /* load onto outgoing mailbox */
  298. return mbx->ops.enqueue_tx(hw, mbx, msg);
  299. }
  300. /**
  301. * fm10k_update_uc_addr_pf - Update device unicast addresses
  302. * @hw: pointer to the HW structure
  303. * @glort: base resource tag for this request
  304. * @mac: MAC address to add/remove from table
  305. * @vid: VLAN ID to add/remove from table
  306. * @add: Indicates if this is an add or remove operation
  307. * @flags: flags field to indicate add and secure
  308. *
  309. * This function is used to add or remove unicast addresses for
  310. * the PF.
  311. **/
  312. static s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
  313. const u8 *mac, u16 vid, bool add, u8 flags)
  314. {
  315. /* verify MAC address is valid */
  316. if (!is_valid_ether_addr(mac))
  317. return FM10K_ERR_PARAM;
  318. return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
  319. }
  320. /**
  321. * fm10k_update_mc_addr_pf - Update device multicast addresses
  322. * @hw: pointer to the HW structure
  323. * @glort: base resource tag for this request
  324. * @mac: MAC address to add/remove from table
  325. * @vid: VLAN ID to add/remove from table
  326. * @add: Indicates if this is an add or remove operation
  327. *
  328. * This function is used to add or remove multicast MAC addresses for
  329. * the PF.
  330. **/
  331. static s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
  332. const u8 *mac, u16 vid, bool add)
  333. {
  334. /* verify multicast address is valid */
  335. if (!is_multicast_ether_addr(mac))
  336. return FM10K_ERR_PARAM;
  337. return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
  338. }
  339. /**
  340. * fm10k_update_xcast_mode_pf - Request update of multicast mode
  341. * @hw: pointer to hardware structure
  342. * @glort: base resource tag for this request
  343. * @mode: integer value indicating mode being requested
  344. *
  345. * This function will attempt to request a higher mode for the port
  346. * so that it can enable either multicast, multicast promiscuous, or
  347. * promiscuous mode of operation.
  348. **/
  349. static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
  350. {
  351. struct fm10k_mbx_info *mbx = &hw->mbx;
  352. u32 msg[3], xcast_mode;
  353. if (mode > FM10K_XCAST_MODE_NONE)
  354. return FM10K_ERR_PARAM;
  355. /* if glort is not valid return error */
  356. if (!fm10k_glort_valid_pf(hw, glort))
  357. return FM10K_ERR_PARAM;
  358. /* write xcast mode as a single u32 value,
  359. * lower 16 bits: glort
  360. * upper 16 bits: mode
  361. */
  362. xcast_mode = ((u32)mode << 16) | glort;
  363. /* generate message requesting to change xcast mode */
  364. fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
  365. fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
  366. /* load onto outgoing mailbox */
  367. return mbx->ops.enqueue_tx(hw, mbx, msg);
  368. }
  369. /**
  370. * fm10k_update_int_moderator_pf - Update interrupt moderator linked list
  371. * @hw: pointer to hardware structure
  372. *
  373. * This function walks through the MSI-X vector table to determine the
  374. * number of active interrupts and based on that information updates the
  375. * interrupt moderator linked list.
  376. **/
  377. static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
  378. {
  379. u32 i;
  380. /* Disable interrupt moderator */
  381. fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
  382. /* loop through PF from last to first looking enabled vectors */
  383. for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
  384. if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
  385. break;
  386. }
  387. /* always reset VFITR2[0] to point to last enabled PF vector */
  388. fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
  389. /* reset ITR2[0] to point to last enabled PF vector */
  390. if (!hw->iov.num_vfs)
  391. fm10k_write_reg(hw, FM10K_ITR2(0), i);
  392. /* Enable interrupt moderator */
  393. fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
  394. }
  395. /**
  396. * fm10k_update_lport_state_pf - Notify the switch of a change in port state
  397. * @hw: pointer to the HW structure
  398. * @glort: base resource tag for this request
  399. * @count: number of logical ports being updated
  400. * @enable: boolean value indicating enable or disable
  401. *
  402. * This function is used to add/remove a logical port from the switch.
  403. **/
  404. static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
  405. u16 count, bool enable)
  406. {
  407. struct fm10k_mbx_info *mbx = &hw->mbx;
  408. u32 msg[3], lport_msg;
  409. /* do nothing if we are being asked to create or destroy 0 ports */
  410. if (!count)
  411. return 0;
  412. /* if glort is not valid return error */
  413. if (!fm10k_glort_valid_pf(hw, glort))
  414. return FM10K_ERR_PARAM;
  415. /* reset multicast mode if deleting lport */
  416. if (!enable)
  417. fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
  418. /* construct the lport message from the 2 pieces of data we have */
  419. lport_msg = ((u32)count << 16) | glort;
  420. /* generate lport create/delete message */
  421. fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
  422. FM10K_PF_MSG_ID_LPORT_DELETE);
  423. fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
  424. /* load onto outgoing mailbox */
  425. return mbx->ops.enqueue_tx(hw, mbx, msg);
  426. }
  427. /**
  428. * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
  429. * @hw: pointer to hardware structure
  430. * @dglort: pointer to dglort configuration structure
  431. *
  432. * Reads the configuration structure contained in dglort_cfg and uses
  433. * that information to then populate a DGLORTMAP/DEC entry and the queues
  434. * to which it has been assigned.
  435. **/
  436. static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
  437. struct fm10k_dglort_cfg *dglort)
  438. {
  439. u16 glort, queue_count, vsi_count, pc_count;
  440. u16 vsi, queue, pc, q_idx;
  441. u32 txqctl, dglortdec, dglortmap;
  442. /* verify the dglort pointer */
  443. if (!dglort)
  444. return FM10K_ERR_PARAM;
  445. /* verify the dglort values */
  446. if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
  447. (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
  448. (dglort->queue_l > 8) || (dglort->queue_b >= 256))
  449. return FM10K_ERR_PARAM;
  450. /* determine count of VSIs and queues */
  451. queue_count = BIT(dglort->rss_l + dglort->pc_l);
  452. vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
  453. glort = dglort->glort;
  454. q_idx = dglort->queue_b;
  455. /* configure SGLORT for queues */
  456. for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
  457. for (queue = 0; queue < queue_count; queue++, q_idx++) {
  458. if (q_idx >= FM10K_MAX_QUEUES)
  459. break;
  460. fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort);
  461. fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort);
  462. }
  463. }
  464. /* determine count of PCs and queues */
  465. queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
  466. pc_count = BIT(dglort->pc_l);
  467. /* configure PC for Tx queues */
  468. for (pc = 0; pc < pc_count; pc++) {
  469. q_idx = pc + dglort->queue_b;
  470. for (queue = 0; queue < queue_count; queue++) {
  471. if (q_idx >= FM10K_MAX_QUEUES)
  472. break;
  473. txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx));
  474. txqctl &= ~FM10K_TXQCTL_PC_MASK;
  475. txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
  476. fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl);
  477. q_idx += pc_count;
  478. }
  479. }
  480. /* configure DGLORTDEC */
  481. dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
  482. ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
  483. ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
  484. ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
  485. ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
  486. ((u32)(dglort->queue_l));
  487. if (dglort->inner_rss)
  488. dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE;
  489. /* configure DGLORTMAP */
  490. dglortmap = (dglort->idx == fm10k_dglort_default) ?
  491. FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
  492. dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
  493. dglortmap |= dglort->glort;
  494. /* write values to hardware */
  495. fm10k_write_reg(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
  496. fm10k_write_reg(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
  497. return 0;
  498. }
  499. u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
  500. {
  501. u16 num_pools = hw->iov.num_pools;
  502. return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
  503. 8 : FM10K_MAX_QUEUES_POOL;
  504. }
  505. u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
  506. {
  507. u16 num_vfs = hw->iov.num_vfs;
  508. u16 vf_q_idx = FM10K_MAX_QUEUES;
  509. vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
  510. return vf_q_idx;
  511. }
  512. static u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
  513. {
  514. u16 num_pools = hw->iov.num_pools;
  515. return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
  516. FM10K_MAX_VECTORS_POOL;
  517. }
  518. static u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
  519. {
  520. u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
  521. vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
  522. return vf_v_idx;
  523. }
  524. /**
  525. * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
  526. * @hw: pointer to the HW structure
  527. * @num_vfs: number of VFs to be allocated
  528. * @num_pools: number of virtualization pools to be allocated
  529. *
  530. * Allocates queues and traffic classes to virtualization entities to prepare
  531. * the PF for SR-IOV and VMDq
  532. **/
  533. static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
  534. u16 num_pools)
  535. {
  536. u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
  537. u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
  538. int i, j;
  539. /* hardware only supports up to 64 pools */
  540. if (num_pools > 64)
  541. return FM10K_ERR_PARAM;
  542. /* the number of VFs cannot exceed the number of pools */
  543. if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
  544. return FM10K_ERR_PARAM;
  545. /* record number of virtualization entities */
  546. hw->iov.num_vfs = num_vfs;
  547. hw->iov.num_pools = num_pools;
  548. /* determine qmap offsets and counts */
  549. qmap_stride = (num_vfs > 8) ? 32 : 256;
  550. qpp = fm10k_queues_per_pool(hw);
  551. vpp = fm10k_vectors_per_pool(hw);
  552. /* calculate starting index for queues */
  553. vf_q_idx = fm10k_vf_queue_index(hw, 0);
  554. qmap_idx = 0;
  555. /* establish TCs with -1 credits and no quanta to prevent transmit */
  556. for (i = 0; i < num_vfs; i++) {
  557. fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(i), 0);
  558. fm10k_write_reg(hw, FM10K_TC_RATE(i), 0);
  559. fm10k_write_reg(hw, FM10K_TC_CREDIT(i),
  560. FM10K_TC_CREDIT_CREDIT_MASK);
  561. }
  562. /* zero out all mbmem registers */
  563. for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
  564. fm10k_write_reg(hw, FM10K_MBMEM(i), 0);
  565. /* clear event notification of VF FLR */
  566. fm10k_write_reg(hw, FM10K_PFVFLREC(0), ~0);
  567. fm10k_write_reg(hw, FM10K_PFVFLREC(1), ~0);
  568. /* loop through unallocated rings assigning them back to PF */
  569. for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
  570. fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
  571. fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
  572. FM10K_TXQCTL_UNLIMITED_BW | vid);
  573. fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
  574. }
  575. /* PF should have already updated VFITR2[0] */
  576. /* update all ITR registers to flow to VFITR2[0] */
  577. for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
  578. if (!(i & (vpp - 1)))
  579. fm10k_write_reg(hw, FM10K_ITR2(i), i - vpp);
  580. else
  581. fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
  582. }
  583. /* update PF ITR2[0] to reference the last vector */
  584. fm10k_write_reg(hw, FM10K_ITR2(0),
  585. fm10k_vf_vector_index(hw, num_vfs - 1));
  586. /* loop through rings populating rings and TCs */
  587. for (i = 0; i < num_vfs; i++) {
  588. /* record index for VF queue 0 for use in end of loop */
  589. vf_q_idx0 = vf_q_idx;
  590. for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
  591. /* assign VF and locked TC to queues */
  592. fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
  593. fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx),
  594. (i << FM10K_TXQCTL_TC_SHIFT) | i |
  595. FM10K_TXQCTL_VF | vid);
  596. fm10k_write_reg(hw, FM10K_RXDCTL(vf_q_idx),
  597. FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
  598. FM10K_RXDCTL_DROP_ON_EMPTY);
  599. fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
  600. (i << FM10K_RXQCTL_VF_SHIFT) |
  601. FM10K_RXQCTL_VF);
  602. /* map queue pair to VF */
  603. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
  604. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
  605. }
  606. /* repeat the first ring for all of the remaining VF rings */
  607. for (; j < qmap_stride; j++, qmap_idx++) {
  608. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
  609. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
  610. }
  611. }
  612. /* loop through remaining indexes assigning all to queue 0 */
  613. while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
  614. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
  615. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), 0);
  616. qmap_idx++;
  617. }
  618. return 0;
  619. }
  620. /**
  621. * fm10k_iov_configure_tc_pf - Configure the shaping group for VF
  622. * @hw: pointer to the HW structure
  623. * @vf_idx: index of VF receiving GLORT
  624. * @rate: Rate indicated in Mb/s
  625. *
  626. * Configured the TC for a given VF to allow only up to a given number
  627. * of Mb/s of outgoing Tx throughput.
  628. **/
  629. static s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
  630. {
  631. /* configure defaults */
  632. u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
  633. u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
  634. /* verify vf is in range */
  635. if (vf_idx >= hw->iov.num_vfs)
  636. return FM10K_ERR_PARAM;
  637. /* set interval to align with 4.096 usec in all modes */
  638. switch (hw->bus.speed) {
  639. case fm10k_bus_speed_2500:
  640. interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
  641. break;
  642. case fm10k_bus_speed_5000:
  643. interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
  644. break;
  645. default:
  646. break;
  647. }
  648. if (rate) {
  649. if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
  650. return FM10K_ERR_PARAM;
  651. /* The quanta is measured in Bytes per 4.096 or 8.192 usec
  652. * The rate is provided in Mbits per second
  653. * To tralslate from rate to quanta we need to multiply the
  654. * rate by 8.192 usec and divide by 8 bits/byte. To avoid
  655. * dealing with floating point we can round the values up
  656. * to the nearest whole number ratio which gives us 128 / 125.
  657. */
  658. tc_rate = (rate * 128) / 125;
  659. /* try to keep the rate limiting accurate by increasing
  660. * the number of credits and interval for rates less than 4Gb/s
  661. */
  662. if (rate < 4000)
  663. interval <<= 1;
  664. else
  665. tc_rate >>= 1;
  666. }
  667. /* update rate limiter with new values */
  668. fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
  669. fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
  670. fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
  671. return 0;
  672. }
  673. /**
  674. * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
  675. * @hw: pointer to the HW structure
  676. * @vf_idx: index of VF receiving GLORT
  677. *
  678. * Update the interrupt moderator linked list to include any MSI-X
  679. * interrupts which the VF has enabled in the MSI-X vector table.
  680. **/
  681. static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
  682. {
  683. u16 vf_v_idx, vf_v_limit, i;
  684. /* verify vf is in range */
  685. if (vf_idx >= hw->iov.num_vfs)
  686. return FM10K_ERR_PARAM;
  687. /* determine vector offset and count */
  688. vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
  689. vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
  690. /* search for first vector that is not masked */
  691. for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
  692. if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
  693. break;
  694. }
  695. /* reset linked list so it now includes our active vectors */
  696. if (vf_idx == (hw->iov.num_vfs - 1))
  697. fm10k_write_reg(hw, FM10K_ITR2(0), i);
  698. else
  699. fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), i);
  700. return 0;
  701. }
  702. /**
  703. * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
  704. * @hw: pointer to the HW structure
  705. * @vf_info: pointer to VF information structure
  706. *
  707. * Assign a MAC address and default VLAN to a VF and notify it of the update
  708. **/
  709. static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
  710. struct fm10k_vf_info *vf_info)
  711. {
  712. u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
  713. u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
  714. s32 err = 0;
  715. u16 vf_idx, vf_vid;
  716. /* verify vf is in range */
  717. if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
  718. return FM10K_ERR_PARAM;
  719. /* determine qmap offsets and counts */
  720. qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
  721. queues_per_pool = fm10k_queues_per_pool(hw);
  722. /* calculate starting index for queues */
  723. vf_idx = vf_info->vf_idx;
  724. vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
  725. qmap_idx = qmap_stride * vf_idx;
  726. /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
  727. * used here to indicate to the VF that it will not have privilege to
  728. * write VLAN_TABLE. All policy is enforced on the PF but this allows
  729. * the VF to correctly report errors to userspace requests.
  730. */
  731. if (vf_info->pf_vid)
  732. vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
  733. else
  734. vf_vid = vf_info->sw_vid;
  735. /* generate MAC_ADDR request */
  736. fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
  737. fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
  738. vf_info->mac, vf_vid);
  739. /* Configure Queue control register with new VLAN ID. The TXQCTL
  740. * register is RO from the VF, so the PF must do this even in the
  741. * case of notifying the VF of a new VID via the mailbox.
  742. */
  743. txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
  744. FM10K_TXQCTL_VID_MASK;
  745. txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
  746. FM10K_TXQCTL_VF | vf_idx;
  747. for (i = 0; i < queues_per_pool; i++)
  748. fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
  749. /* try loading a message onto outgoing mailbox first */
  750. if (vf_info->mbx.ops.enqueue_tx) {
  751. err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
  752. if (err != FM10K_MBX_ERR_NO_MBX)
  753. return err;
  754. err = 0;
  755. }
  756. /* If we aren't connected to a mailbox, this is most likely because
  757. * the VF driver is not running. It should thus be safe to re-map
  758. * queues and use the registers to pass the MAC address so that the VF
  759. * driver gets correct information during its initialization.
  760. */
  761. /* MAP Tx queue back to 0 temporarily, and disable it */
  762. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
  763. fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
  764. /* verify ring has disabled before modifying base address registers */
  765. txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
  766. for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
  767. /* limit ourselves to a 1ms timeout */
  768. if (timeout == 10) {
  769. err = FM10K_ERR_DMA_PENDING;
  770. goto err_out;
  771. }
  772. usleep_range(100, 200);
  773. txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
  774. }
  775. /* Update base address registers to contain MAC address */
  776. if (is_valid_ether_addr(vf_info->mac)) {
  777. tdbal = (((u32)vf_info->mac[3]) << 24) |
  778. (((u32)vf_info->mac[4]) << 16) |
  779. (((u32)vf_info->mac[5]) << 8);
  780. tdbah = (((u32)0xFF) << 24) |
  781. (((u32)vf_info->mac[0]) << 16) |
  782. (((u32)vf_info->mac[1]) << 8) |
  783. ((u32)vf_info->mac[2]);
  784. }
  785. /* Record the base address into queue 0 */
  786. fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal);
  787. fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah);
  788. /* Provide the VF the ITR scale, using software-defined fields in TDLEN
  789. * to pass the information during VF initialization. See definition of
  790. * FM10K_TDLEN_ITR_SCALE_SHIFT for more details.
  791. */
  792. fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale <<
  793. FM10K_TDLEN_ITR_SCALE_SHIFT);
  794. err_out:
  795. /* restore the queue back to VF ownership */
  796. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
  797. return err;
  798. }
  799. /**
  800. * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
  801. * @hw: pointer to the HW structure
  802. * @vf_info: pointer to VF information structure
  803. *
  804. * Reassign the interrupts and queues to a VF following an FLR
  805. **/
  806. static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
  807. struct fm10k_vf_info *vf_info)
  808. {
  809. u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
  810. u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
  811. u16 vf_v_idx, vf_v_limit, vf_vid;
  812. u8 vf_idx = vf_info->vf_idx;
  813. int i;
  814. /* verify vf is in range */
  815. if (vf_idx >= hw->iov.num_vfs)
  816. return FM10K_ERR_PARAM;
  817. /* clear event notification of VF FLR */
  818. fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
  819. /* force timeout and then disconnect the mailbox */
  820. vf_info->mbx.timeout = 0;
  821. if (vf_info->mbx.ops.disconnect)
  822. vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
  823. /* determine vector offset and count */
  824. vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
  825. vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
  826. /* determine qmap offsets and counts */
  827. qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
  828. queues_per_pool = fm10k_queues_per_pool(hw);
  829. qmap_idx = qmap_stride * vf_idx;
  830. /* make all the queues inaccessible to the VF */
  831. for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
  832. fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
  833. fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
  834. }
  835. /* calculate starting index for queues */
  836. vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
  837. /* determine correct default VLAN ID */
  838. if (vf_info->pf_vid)
  839. vf_vid = vf_info->pf_vid;
  840. else
  841. vf_vid = vf_info->sw_vid;
  842. /* configure Queue control register */
  843. txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
  844. (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
  845. FM10K_TXQCTL_VF | vf_idx;
  846. rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
  847. /* stop further DMA and reset queue ownership back to VF */
  848. for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
  849. fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
  850. fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
  851. fm10k_write_reg(hw, FM10K_RXDCTL(i),
  852. FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
  853. FM10K_RXDCTL_DROP_ON_EMPTY);
  854. fm10k_write_reg(hw, FM10K_RXQCTL(i), rxqctl);
  855. }
  856. /* reset TC with -1 credits and no quanta to prevent transmit */
  857. fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
  858. fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), 0);
  859. fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx),
  860. FM10K_TC_CREDIT_CREDIT_MASK);
  861. /* update our first entry in the table based on previous VF */
  862. if (!vf_idx)
  863. hw->mac.ops.update_int_moderator(hw);
  864. else
  865. hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
  866. /* reset linked list so it now includes our active vectors */
  867. if (vf_idx == (hw->iov.num_vfs - 1))
  868. fm10k_write_reg(hw, FM10K_ITR2(0), vf_v_idx);
  869. else
  870. fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
  871. /* link remaining vectors so that next points to previous */
  872. for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
  873. fm10k_write_reg(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
  874. /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
  875. for (i = FM10K_VFMBMEM_LEN; i--;)
  876. fm10k_write_reg(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
  877. for (i = FM10K_VLAN_TABLE_SIZE; i--;)
  878. fm10k_write_reg(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
  879. for (i = FM10K_RETA_SIZE; i--;)
  880. fm10k_write_reg(hw, FM10K_RETA(vf_info->vsi, i), 0);
  881. for (i = FM10K_RSSRK_SIZE; i--;)
  882. fm10k_write_reg(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
  883. fm10k_write_reg(hw, FM10K_MRQC(vf_info->vsi), 0);
  884. /* Update base address registers to contain MAC address */
  885. if (is_valid_ether_addr(vf_info->mac)) {
  886. tdbal = (((u32)vf_info->mac[3]) << 24) |
  887. (((u32)vf_info->mac[4]) << 16) |
  888. (((u32)vf_info->mac[5]) << 8);
  889. tdbah = (((u32)0xFF) << 24) |
  890. (((u32)vf_info->mac[0]) << 16) |
  891. (((u32)vf_info->mac[1]) << 8) |
  892. ((u32)vf_info->mac[2]);
  893. }
  894. /* map queue pairs back to VF from last to first */
  895. for (i = queues_per_pool; i--;) {
  896. fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
  897. fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
  898. /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an
  899. * explanation of how TDLEN is used.
  900. */
  901. fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx + i),
  902. hw->mac.itr_scale <<
  903. FM10K_TDLEN_ITR_SCALE_SHIFT);
  904. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
  905. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
  906. }
  907. /* repeat the first ring for all the remaining VF rings */
  908. for (i = queues_per_pool; i < qmap_stride; i++) {
  909. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
  910. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
  911. }
  912. return 0;
  913. }
  914. /**
  915. * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
  916. * @hw: pointer to hardware structure
  917. * @vf_info: pointer to VF information structure
  918. * @lport_idx: Logical port offset from the hardware glort
  919. * @flags: Set of capability flags to extend port beyond basic functionality
  920. *
  921. * This function allows enabling a VF port by assigning it a GLORT and
  922. * setting the flags so that it can enable an Rx mode.
  923. **/
  924. static s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
  925. struct fm10k_vf_info *vf_info,
  926. u16 lport_idx, u8 flags)
  927. {
  928. u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
  929. /* if glort is not valid return error */
  930. if (!fm10k_glort_valid_pf(hw, glort))
  931. return FM10K_ERR_PARAM;
  932. vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
  933. vf_info->glort = glort;
  934. return 0;
  935. }
  936. /**
  937. * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
  938. * @hw: pointer to hardware structure
  939. * @vf_info: pointer to VF information structure
  940. *
  941. * This function disables a VF port by stripping it of a GLORT and
  942. * setting the flags so that it cannot enable any Rx mode.
  943. **/
  944. static void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
  945. struct fm10k_vf_info *vf_info)
  946. {
  947. u32 msg[1];
  948. /* need to disable the port if it is already enabled */
  949. if (FM10K_VF_FLAG_ENABLED(vf_info)) {
  950. /* notify switch that this port has been disabled */
  951. fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
  952. /* generate port state response to notify VF it is not ready */
  953. fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
  954. vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
  955. }
  956. /* clear flags and glort if it exists */
  957. vf_info->vf_flags = 0;
  958. vf_info->glort = 0;
  959. }
  960. /**
  961. * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
  962. * @hw: pointer to hardware structure
  963. * @q: stats for all queues of a VF
  964. * @vf_idx: index of VF
  965. *
  966. * This function collects queue stats for VFs.
  967. **/
  968. static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
  969. struct fm10k_hw_stats_q *q,
  970. u16 vf_idx)
  971. {
  972. u32 idx, qpp;
  973. /* get stats for all of the queues */
  974. qpp = fm10k_queues_per_pool(hw);
  975. idx = fm10k_vf_queue_index(hw, vf_idx);
  976. fm10k_update_hw_stats_q(hw, q, idx, qpp);
  977. }
  978. /**
  979. * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
  980. * @hw: Pointer to hardware structure
  981. * @results: Pointer array to message, results[0] is pointer to message
  982. * @mbx: Pointer to mailbox information structure
  983. *
  984. * This function is a default handler for MSI-X requests from the VF. The
  985. * assumption is that in this case it is acceptable to just directly
  986. * hand off the message from the VF to the underlying shared code.
  987. **/
  988. s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
  989. struct fm10k_mbx_info *mbx)
  990. {
  991. struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
  992. u8 vf_idx = vf_info->vf_idx;
  993. return hw->iov.ops.assign_int_moderator(hw, vf_idx);
  994. }
  995. /**
  996. * fm10k_iov_select_vid - Select correct default VLAN ID
  997. * @vf_info: pointer to VF information structure
  998. * @vid: VLAN ID to correct
  999. *
  1000. * Will report an error if the VLAN ID is out of range. For VID = 0, it will
  1001. * return either the pf_vid or sw_vid depending on which one is set.
  1002. */
  1003. s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
  1004. {
  1005. if (!vid)
  1006. return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
  1007. else if (vf_info->pf_vid && vid != vf_info->pf_vid)
  1008. return FM10K_ERR_PARAM;
  1009. else
  1010. return vid;
  1011. }
  1012. /**
  1013. * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
  1014. * @hw: Pointer to hardware structure
  1015. * @results: Pointer array to message, results[0] is pointer to message
  1016. * @mbx: Pointer to mailbox information structure
  1017. *
  1018. * This function is a default handler for MAC/VLAN requests from the VF.
  1019. * The assumption is that in this case it is acceptable to just directly
  1020. * hand off the message from the VF to the underlying shared code.
  1021. **/
  1022. s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
  1023. struct fm10k_mbx_info *mbx)
  1024. {
  1025. struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
  1026. u8 mac[ETH_ALEN];
  1027. u32 *result;
  1028. int err = 0;
  1029. bool set;
  1030. u16 vlan;
  1031. u32 vid;
  1032. /* we shouldn't be updating rules on a disabled interface */
  1033. if (!FM10K_VF_FLAG_ENABLED(vf_info))
  1034. err = FM10K_ERR_PARAM;
  1035. if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
  1036. result = results[FM10K_MAC_VLAN_MSG_VLAN];
  1037. /* record VLAN id requested */
  1038. err = fm10k_tlv_attr_get_u32(result, &vid);
  1039. if (err)
  1040. return err;
  1041. set = !(vid & FM10K_VLAN_CLEAR);
  1042. vid &= ~FM10K_VLAN_CLEAR;
  1043. /* if the length field has been set, this is a multi-bit
  1044. * update request. For multi-bit requests, simply disallow
  1045. * them when the pf_vid has been set. In this case, the PF
  1046. * should have already cleared the VLAN_TABLE, and if we
  1047. * allowed them, it could allow a rogue VF to receive traffic
  1048. * on a VLAN it was not assigned. In the single-bit case, we
  1049. * need to modify requests for VLAN 0 to use the default PF or
  1050. * SW vid when assigned.
  1051. */
  1052. if (vid >> 16) {
  1053. /* prevent multi-bit requests when PF has
  1054. * administratively set the VLAN for this VF
  1055. */
  1056. if (vf_info->pf_vid)
  1057. return FM10K_ERR_PARAM;
  1058. } else {
  1059. err = fm10k_iov_select_vid(vf_info, (u16)vid);
  1060. if (err < 0)
  1061. return err;
  1062. vid = err;
  1063. }
  1064. /* update VSI info for VF in regards to VLAN table */
  1065. err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
  1066. }
  1067. if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
  1068. result = results[FM10K_MAC_VLAN_MSG_MAC];
  1069. /* record unicast MAC address requested */
  1070. err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
  1071. if (err)
  1072. return err;
  1073. /* block attempts to set MAC for a locked device */
  1074. if (is_valid_ether_addr(vf_info->mac) &&
  1075. !ether_addr_equal(mac, vf_info->mac))
  1076. return FM10K_ERR_PARAM;
  1077. set = !(vlan & FM10K_VLAN_CLEAR);
  1078. vlan &= ~FM10K_VLAN_CLEAR;
  1079. err = fm10k_iov_select_vid(vf_info, vlan);
  1080. if (err < 0)
  1081. return err;
  1082. vlan = (u16)err;
  1083. /* notify switch of request for new unicast address */
  1084. err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
  1085. mac, vlan, set, 0);
  1086. }
  1087. if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
  1088. result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
  1089. /* record multicast MAC address requested */
  1090. err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
  1091. if (err)
  1092. return err;
  1093. /* verify that the VF is allowed to request multicast */
  1094. if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
  1095. return FM10K_ERR_PARAM;
  1096. set = !(vlan & FM10K_VLAN_CLEAR);
  1097. vlan &= ~FM10K_VLAN_CLEAR;
  1098. err = fm10k_iov_select_vid(vf_info, vlan);
  1099. if (err < 0)
  1100. return err;
  1101. vlan = (u16)err;
  1102. /* notify switch of request for new multicast address */
  1103. err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
  1104. mac, vlan, set);
  1105. }
  1106. return err;
  1107. }
  1108. /**
  1109. * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
  1110. * @vf_info: VF info structure containing capability flags
  1111. * @mode: Requested xcast mode
  1112. *
  1113. * This function outputs the mode that most closely matches the requested
  1114. * mode. If not modes match it will request we disable the port
  1115. **/
  1116. static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
  1117. u8 mode)
  1118. {
  1119. u8 vf_flags = vf_info->vf_flags;
  1120. /* match up mode to capabilities as best as possible */
  1121. switch (mode) {
  1122. case FM10K_XCAST_MODE_PROMISC:
  1123. if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
  1124. return FM10K_XCAST_MODE_PROMISC;
  1125. /* fall through */
  1126. case FM10K_XCAST_MODE_ALLMULTI:
  1127. if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
  1128. return FM10K_XCAST_MODE_ALLMULTI;
  1129. /* fall through */
  1130. case FM10K_XCAST_MODE_MULTI:
  1131. if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
  1132. return FM10K_XCAST_MODE_MULTI;
  1133. /* fall through */
  1134. case FM10K_XCAST_MODE_NONE:
  1135. if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
  1136. return FM10K_XCAST_MODE_NONE;
  1137. /* fall through */
  1138. default:
  1139. break;
  1140. }
  1141. /* disable interface as it should not be able to request any */
  1142. return FM10K_XCAST_MODE_DISABLE;
  1143. }
  1144. /**
  1145. * fm10k_iov_msg_lport_state_pf - Message handler for port state requests
  1146. * @hw: Pointer to hardware structure
  1147. * @results: Pointer array to message, results[0] is pointer to message
  1148. * @mbx: Pointer to mailbox information structure
  1149. *
  1150. * This function is a default handler for port state requests. The port
  1151. * state requests for now are basic and consist of enabling or disabling
  1152. * the port.
  1153. **/
  1154. s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
  1155. struct fm10k_mbx_info *mbx)
  1156. {
  1157. struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
  1158. u32 *result;
  1159. s32 err = 0;
  1160. u32 msg[2];
  1161. u8 mode = 0;
  1162. /* verify VF is allowed to enable even minimal mode */
  1163. if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
  1164. return FM10K_ERR_PARAM;
  1165. if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
  1166. result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
  1167. /* XCAST mode update requested */
  1168. err = fm10k_tlv_attr_get_u8(result, &mode);
  1169. if (err)
  1170. return FM10K_ERR_PARAM;
  1171. /* prep for possible demotion depending on capabilities */
  1172. mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
  1173. /* if mode is not currently enabled, enable it */
  1174. if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
  1175. fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
  1176. /* swap mode back to a bit flag */
  1177. mode = FM10K_VF_FLAG_SET_MODE(mode);
  1178. } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
  1179. /* need to disable the port if it is already enabled */
  1180. if (FM10K_VF_FLAG_ENABLED(vf_info))
  1181. err = fm10k_update_lport_state_pf(hw, vf_info->glort,
  1182. 1, false);
  1183. /* we need to clear VF_FLAG_ENABLED flags in order to ensure
  1184. * that we actually re-enable the LPORT state below. Note that
  1185. * this has no impact if the VF is already disabled, as the
  1186. * flags are already cleared.
  1187. */
  1188. if (!err)
  1189. vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
  1190. /* when enabling the port we should reset the rate limiters */
  1191. hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
  1192. /* set mode for minimal functionality */
  1193. mode = FM10K_VF_FLAG_SET_MODE_NONE;
  1194. /* generate port state response to notify VF it is ready */
  1195. fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
  1196. fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
  1197. mbx->ops.enqueue_tx(hw, mbx, msg);
  1198. }
  1199. /* if enable state toggled note the update */
  1200. if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
  1201. err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
  1202. !!mode);
  1203. /* if state change succeeded, then update our stored state */
  1204. mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
  1205. if (!err)
  1206. vf_info->vf_flags = mode;
  1207. return err;
  1208. }
  1209. /**
  1210. * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
  1211. * @hw: pointer to hardware structure
  1212. * @stats: pointer to the stats structure to update
  1213. *
  1214. * This function collects and aggregates global and per queue hardware
  1215. * statistics.
  1216. **/
  1217. static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
  1218. struct fm10k_hw_stats *stats)
  1219. {
  1220. u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
  1221. u32 id, id_prev;
  1222. /* Use Tx queue 0 as a canary to detect a reset */
  1223. id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
  1224. /* Read Global Statistics */
  1225. do {
  1226. timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
  1227. &stats->timeout);
  1228. ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
  1229. ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
  1230. um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
  1231. xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
  1232. vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
  1233. &stats->vlan_drop);
  1234. loopback_drop =
  1235. fm10k_read_hw_stats_32b(hw,
  1236. FM10K_STATS_LOOPBACK_DROP,
  1237. &stats->loopback_drop);
  1238. nodesc_drop = fm10k_read_hw_stats_32b(hw,
  1239. FM10K_STATS_NODESC_DROP,
  1240. &stats->nodesc_drop);
  1241. /* if value has not changed then we have consistent data */
  1242. id_prev = id;
  1243. id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
  1244. } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
  1245. /* drop non-ID bits and set VALID ID bit */
  1246. id &= FM10K_TXQCTL_ID_MASK;
  1247. id |= FM10K_STAT_VALID;
  1248. /* Update Global Statistics */
  1249. if (stats->stats_idx == id) {
  1250. stats->timeout.count += timeout;
  1251. stats->ur.count += ur;
  1252. stats->ca.count += ca;
  1253. stats->um.count += um;
  1254. stats->xec.count += xec;
  1255. stats->vlan_drop.count += vlan_drop;
  1256. stats->loopback_drop.count += loopback_drop;
  1257. stats->nodesc_drop.count += nodesc_drop;
  1258. }
  1259. /* Update bases and record current PF id */
  1260. fm10k_update_hw_base_32b(&stats->timeout, timeout);
  1261. fm10k_update_hw_base_32b(&stats->ur, ur);
  1262. fm10k_update_hw_base_32b(&stats->ca, ca);
  1263. fm10k_update_hw_base_32b(&stats->um, um);
  1264. fm10k_update_hw_base_32b(&stats->xec, xec);
  1265. fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
  1266. fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
  1267. fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
  1268. stats->stats_idx = id;
  1269. /* Update Queue Statistics */
  1270. fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
  1271. }
  1272. /**
  1273. * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
  1274. * @hw: pointer to hardware structure
  1275. * @stats: pointer to the stats structure to update
  1276. *
  1277. * This function resets the base for global and per queue hardware
  1278. * statistics.
  1279. **/
  1280. static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
  1281. struct fm10k_hw_stats *stats)
  1282. {
  1283. /* Unbind Global Statistics */
  1284. fm10k_unbind_hw_stats_32b(&stats->timeout);
  1285. fm10k_unbind_hw_stats_32b(&stats->ur);
  1286. fm10k_unbind_hw_stats_32b(&stats->ca);
  1287. fm10k_unbind_hw_stats_32b(&stats->um);
  1288. fm10k_unbind_hw_stats_32b(&stats->xec);
  1289. fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
  1290. fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
  1291. fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
  1292. /* Unbind Queue Statistics */
  1293. fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
  1294. /* Reinitialize bases for all stats */
  1295. fm10k_update_hw_stats_pf(hw, stats);
  1296. }
  1297. /**
  1298. * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
  1299. * @hw: pointer to hardware structure
  1300. * @dma_mask: 64 bit DMA mask required for platform
  1301. *
  1302. * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
  1303. * to limit the access to memory beyond what is physically in the system.
  1304. **/
  1305. static void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
  1306. {
  1307. /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
  1308. u32 phyaddr = (u32)(dma_mask >> 32);
  1309. fm10k_write_reg(hw, FM10K_PHYADDR, phyaddr);
  1310. }
  1311. /**
  1312. * fm10k_get_fault_pf - Record a fault in one of the interface units
  1313. * @hw: pointer to hardware structure
  1314. * @type: pointer to fault type register offset
  1315. * @fault: pointer to memory location to record the fault
  1316. *
  1317. * Record the fault register contents to the fault data structure and
  1318. * clear the entry from the register.
  1319. *
  1320. * Returns ERR_PARAM if invalid register is specified or no error is present.
  1321. **/
  1322. static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
  1323. struct fm10k_fault *fault)
  1324. {
  1325. u32 func;
  1326. /* verify the fault register is in range and is aligned */
  1327. switch (type) {
  1328. case FM10K_PCA_FAULT:
  1329. case FM10K_THI_FAULT:
  1330. case FM10K_FUM_FAULT:
  1331. break;
  1332. default:
  1333. return FM10K_ERR_PARAM;
  1334. }
  1335. /* only service faults that are valid */
  1336. func = fm10k_read_reg(hw, type + FM10K_FAULT_FUNC);
  1337. if (!(func & FM10K_FAULT_FUNC_VALID))
  1338. return FM10K_ERR_PARAM;
  1339. /* read remaining fields */
  1340. fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI);
  1341. fault->address <<= 32;
  1342. fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
  1343. fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO);
  1344. /* clear valid bit to allow for next error */
  1345. fm10k_write_reg(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
  1346. /* Record which function triggered the error */
  1347. if (func & FM10K_FAULT_FUNC_PF)
  1348. fault->func = 0;
  1349. else
  1350. fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
  1351. FM10K_FAULT_FUNC_VF_SHIFT);
  1352. /* record fault type */
  1353. fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
  1354. return 0;
  1355. }
  1356. /**
  1357. * fm10k_request_lport_map_pf - Request LPORT map from the switch API
  1358. * @hw: pointer to hardware structure
  1359. *
  1360. **/
  1361. static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
  1362. {
  1363. struct fm10k_mbx_info *mbx = &hw->mbx;
  1364. u32 msg[1];
  1365. /* issue request asking for LPORT map */
  1366. fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
  1367. /* load onto outgoing mailbox */
  1368. return mbx->ops.enqueue_tx(hw, mbx, msg);
  1369. }
  1370. /**
  1371. * fm10k_get_host_state_pf - Returns the state of the switch and mailbox
  1372. * @hw: pointer to hardware structure
  1373. * @switch_ready: pointer to boolean value that will record switch state
  1374. *
  1375. * This function will check the DMA_CTRL2 register and mailbox in order
  1376. * to determine if the switch is ready for the PF to begin requesting
  1377. * addresses and mapping traffic to the local interface.
  1378. **/
  1379. static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
  1380. {
  1381. u32 dma_ctrl2;
  1382. /* verify the switch is ready for interaction */
  1383. dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
  1384. if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
  1385. return 0;
  1386. /* retrieve generic host state info */
  1387. return fm10k_get_host_state_generic(hw, switch_ready);
  1388. }
  1389. /* This structure defines the attibutes to be parsed below */
  1390. const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
  1391. FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
  1392. sizeof(struct fm10k_swapi_error)),
  1393. FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
  1394. FM10K_TLV_ATTR_LAST
  1395. };
  1396. /**
  1397. * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
  1398. * @hw: Pointer to hardware structure
  1399. * @results: pointer array containing parsed data
  1400. * @mbx: Pointer to mailbox information structure
  1401. *
  1402. * This handler configures the lport mapping based on the reply from the
  1403. * switch API.
  1404. **/
  1405. s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
  1406. struct fm10k_mbx_info *mbx)
  1407. {
  1408. u16 glort, mask;
  1409. u32 dglort_map;
  1410. s32 err;
  1411. err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
  1412. &dglort_map);
  1413. if (err)
  1414. return err;
  1415. /* extract values out of the header */
  1416. glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
  1417. mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
  1418. /* verify mask is set and none of the masked bits in glort are set */
  1419. if (!mask || (glort & ~mask))
  1420. return FM10K_ERR_PARAM;
  1421. /* verify the mask is contiguous, and that it is 1's followed by 0's */
  1422. if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
  1423. return FM10K_ERR_PARAM;
  1424. /* record the glort, mask, and port count */
  1425. hw->mac.dglort_map = dglort_map;
  1426. return 0;
  1427. }
  1428. const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
  1429. FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
  1430. FM10K_TLV_ATTR_LAST
  1431. };
  1432. /**
  1433. * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
  1434. * @hw: Pointer to hardware structure
  1435. * @results: pointer array containing parsed data
  1436. * @mbx: Pointer to mailbox information structure
  1437. *
  1438. * This handler configures the default VLAN for the PF
  1439. **/
  1440. static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
  1441. struct fm10k_mbx_info *mbx)
  1442. {
  1443. u16 glort, pvid;
  1444. u32 pvid_update;
  1445. s32 err;
  1446. err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
  1447. &pvid_update);
  1448. if (err)
  1449. return err;
  1450. /* extract values from the pvid update */
  1451. glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
  1452. pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
  1453. /* if glort is not valid return error */
  1454. if (!fm10k_glort_valid_pf(hw, glort))
  1455. return FM10K_ERR_PARAM;
  1456. /* verify VLAN ID is valid */
  1457. if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
  1458. return FM10K_ERR_PARAM;
  1459. /* record the port VLAN ID value */
  1460. hw->mac.default_vid = pvid;
  1461. return 0;
  1462. }
  1463. /**
  1464. * fm10k_record_global_table_data - Move global table data to swapi table info
  1465. * @from: pointer to source table data structure
  1466. * @to: pointer to destination table info structure
  1467. *
  1468. * This function is will copy table_data to the table_info contained in
  1469. * the hw struct.
  1470. **/
  1471. static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
  1472. struct fm10k_swapi_table_info *to)
  1473. {
  1474. /* convert from le32 struct to CPU byte ordered values */
  1475. to->used = le32_to_cpu(from->used);
  1476. to->avail = le32_to_cpu(from->avail);
  1477. }
  1478. const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
  1479. FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
  1480. sizeof(struct fm10k_swapi_error)),
  1481. FM10K_TLV_ATTR_LAST
  1482. };
  1483. /**
  1484. * fm10k_msg_err_pf - Message handler for error reply
  1485. * @hw: Pointer to hardware structure
  1486. * @results: pointer array containing parsed data
  1487. * @mbx: Pointer to mailbox information structure
  1488. *
  1489. * This handler will capture the data for any error replies to previous
  1490. * messages that the PF has sent.
  1491. **/
  1492. s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
  1493. struct fm10k_mbx_info *mbx)
  1494. {
  1495. struct fm10k_swapi_error err_msg;
  1496. s32 err;
  1497. /* extract structure from message */
  1498. err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
  1499. &err_msg, sizeof(err_msg));
  1500. if (err)
  1501. return err;
  1502. /* record table status */
  1503. fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
  1504. fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
  1505. fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
  1506. /* record SW API status value */
  1507. hw->swapi.status = le32_to_cpu(err_msg.status);
  1508. return 0;
  1509. }
  1510. static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
  1511. FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
  1512. FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
  1513. FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
  1514. FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
  1515. FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
  1516. FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
  1517. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
  1518. };
  1519. static const struct fm10k_mac_ops mac_ops_pf = {
  1520. .get_bus_info = fm10k_get_bus_info_generic,
  1521. .reset_hw = fm10k_reset_hw_pf,
  1522. .init_hw = fm10k_init_hw_pf,
  1523. .start_hw = fm10k_start_hw_generic,
  1524. .stop_hw = fm10k_stop_hw_generic,
  1525. .update_vlan = fm10k_update_vlan_pf,
  1526. .read_mac_addr = fm10k_read_mac_addr_pf,
  1527. .update_uc_addr = fm10k_update_uc_addr_pf,
  1528. .update_mc_addr = fm10k_update_mc_addr_pf,
  1529. .update_xcast_mode = fm10k_update_xcast_mode_pf,
  1530. .update_int_moderator = fm10k_update_int_moderator_pf,
  1531. .update_lport_state = fm10k_update_lport_state_pf,
  1532. .update_hw_stats = fm10k_update_hw_stats_pf,
  1533. .rebind_hw_stats = fm10k_rebind_hw_stats_pf,
  1534. .configure_dglort_map = fm10k_configure_dglort_map_pf,
  1535. .set_dma_mask = fm10k_set_dma_mask_pf,
  1536. .get_fault = fm10k_get_fault_pf,
  1537. .get_host_state = fm10k_get_host_state_pf,
  1538. .request_lport_map = fm10k_request_lport_map_pf,
  1539. };
  1540. static const struct fm10k_iov_ops iov_ops_pf = {
  1541. .assign_resources = fm10k_iov_assign_resources_pf,
  1542. .configure_tc = fm10k_iov_configure_tc_pf,
  1543. .assign_int_moderator = fm10k_iov_assign_int_moderator_pf,
  1544. .assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf,
  1545. .reset_resources = fm10k_iov_reset_resources_pf,
  1546. .set_lport = fm10k_iov_set_lport_pf,
  1547. .reset_lport = fm10k_iov_reset_lport_pf,
  1548. .update_stats = fm10k_iov_update_stats_pf,
  1549. };
  1550. static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
  1551. {
  1552. fm10k_get_invariants_generic(hw);
  1553. return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
  1554. }
  1555. const struct fm10k_info fm10k_pf_info = {
  1556. .mac = fm10k_mac_pf,
  1557. .get_invariants = fm10k_get_invariants_pf,
  1558. .mac_ops = &mac_ops_pf,
  1559. .iov_ops = &iov_ops_pf,
  1560. };