fm10k_pf.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843
  1. /* Intel(R) Ethernet Switch Host Interface Driver
  2. * Copyright(c) 2013 - 2016 Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * The full GNU General Public License is included in this distribution in
  14. * the file called "COPYING".
  15. *
  16. * Contact Information:
  17. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. */
  20. #include "fm10k_pf.h"
  21. #include "fm10k_vf.h"
  22. /**
  23. * fm10k_reset_hw_pf - PF hardware reset
  24. * @hw: pointer to hardware structure
  25. *
  26. * This function should return the hardware to a state similar to the
  27. * one it is in after being powered on.
  28. **/
  29. static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
  30. {
  31. s32 err;
  32. u32 reg;
  33. u16 i;
  34. /* Disable interrupts */
  35. fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
  36. /* Lock ITR2 reg 0 into itself and disable interrupt moderation */
  37. fm10k_write_reg(hw, FM10K_ITR2(0), 0);
  38. fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
  39. /* We assume here Tx and Rx queue 0 are owned by the PF */
  40. /* Shut off VF access to their queues forcing them to queue 0 */
  41. for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
  42. fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
  43. fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
  44. }
  45. /* shut down all rings */
  46. err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
  47. if (err == FM10K_ERR_REQUESTS_PENDING) {
  48. hw->mac.reset_while_pending++;
  49. goto force_reset;
  50. } else if (err) {
  51. return err;
  52. }
  53. /* Verify that DMA is no longer active */
  54. reg = fm10k_read_reg(hw, FM10K_DMA_CTRL);
  55. if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
  56. return FM10K_ERR_DMA_PENDING;
  57. force_reset:
  58. /* Inititate data path reset */
  59. reg = FM10K_DMA_CTRL_DATAPATH_RESET;
  60. fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
  61. /* Flush write and allow 100us for reset to complete */
  62. fm10k_write_flush(hw);
  63. udelay(FM10K_RESET_TIMEOUT);
  64. /* Verify we made it out of reset */
  65. reg = fm10k_read_reg(hw, FM10K_IP);
  66. if (!(reg & FM10K_IP_NOTINRESET))
  67. return FM10K_ERR_RESET_FAILED;
  68. return 0;
  69. }
  70. /**
  71. * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
  72. * @hw: pointer to hardware structure
  73. *
  74. * Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
  75. **/
  76. static bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
  77. {
  78. u16 sriov_ctrl = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_SRIOV_CTRL);
  79. return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
  80. }
  81. /**
  82. * fm10k_init_hw_pf - PF hardware initialization
  83. * @hw: pointer to hardware structure
  84. *
  85. **/
  86. static s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
  87. {
  88. u32 dma_ctrl, txqctl;
  89. u16 i;
  90. /* Establish default VSI as valid */
  91. fm10k_write_reg(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
  92. fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
  93. FM10K_DGLORTMAP_ANY);
  94. /* Invalidate all other GLORT entries */
  95. for (i = 1; i < FM10K_DGLORT_COUNT; i++)
  96. fm10k_write_reg(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
  97. /* reset ITR2(0) to point to itself */
  98. fm10k_write_reg(hw, FM10K_ITR2(0), 0);
  99. /* reset VF ITR2(0) to point to 0 avoid PF registers */
  100. fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
  101. /* loop through all PF ITR2 registers pointing them to the previous */
  102. for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
  103. fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
  104. /* Enable interrupt moderator if not already enabled */
  105. fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
  106. /* compute the default txqctl configuration */
  107. txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
  108. (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
  109. for (i = 0; i < FM10K_MAX_QUEUES; i++) {
  110. /* configure rings for 256 Queue / 32 Descriptor cache mode */
  111. fm10k_write_reg(hw, FM10K_TQDLOC(i),
  112. (i * FM10K_TQDLOC_BASE_32_DESC) |
  113. FM10K_TQDLOC_SIZE_32_DESC);
  114. fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
  115. /* configure rings to provide TPH processing hints */
  116. fm10k_write_reg(hw, FM10K_TPH_TXCTRL(i),
  117. FM10K_TPH_TXCTRL_DESC_TPHEN |
  118. FM10K_TPH_TXCTRL_DESC_RROEN |
  119. FM10K_TPH_TXCTRL_DESC_WROEN |
  120. FM10K_TPH_TXCTRL_DATA_RROEN);
  121. fm10k_write_reg(hw, FM10K_TPH_RXCTRL(i),
  122. FM10K_TPH_RXCTRL_DESC_TPHEN |
  123. FM10K_TPH_RXCTRL_DESC_RROEN |
  124. FM10K_TPH_RXCTRL_DATA_WROEN |
  125. FM10K_TPH_RXCTRL_HDR_WROEN);
  126. }
  127. /* set max hold interval to align with 1.024 usec in all modes and
  128. * store ITR scale
  129. */
  130. switch (hw->bus.speed) {
  131. case fm10k_bus_speed_2500:
  132. dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
  133. hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1;
  134. break;
  135. case fm10k_bus_speed_5000:
  136. dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
  137. hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2;
  138. break;
  139. case fm10k_bus_speed_8000:
  140. dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
  141. hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
  142. break;
  143. default:
  144. dma_ctrl = 0;
  145. /* just in case, assume Gen3 ITR scale */
  146. hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
  147. break;
  148. }
  149. /* Configure TSO flags */
  150. fm10k_write_reg(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
  151. fm10k_write_reg(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
  152. /* Enable DMA engine
  153. * Set Rx Descriptor size to 32
  154. * Set Minimum MSS to 64
  155. * Set Maximum number of Rx queues to 256 / 32 Descriptor
  156. */
  157. dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
  158. FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
  159. FM10K_DMA_CTRL_32_DESC;
  160. fm10k_write_reg(hw, FM10K_DMA_CTRL, dma_ctrl);
  161. /* record maximum queue count, we limit ourselves to 128 */
  162. hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
  163. /* We support either 64 VFs or 7 VFs depending on if we have ARI */
  164. hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
  165. return 0;
  166. }
  167. /**
  168. * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
  169. * @hw: pointer to hardware structure
  170. * @vid: VLAN ID to add to table
  171. * @vsi: Index indicating VF ID or PF ID in table
  172. * @set: Indicates if this is a set or clear operation
  173. *
  174. * This function adds or removes the corresponding VLAN ID from the VLAN
  175. * filter table for the corresponding function. In addition to the
  176. * standard set/clear that supports one bit a multi-bit write is
  177. * supported to set 64 bits at a time.
  178. **/
  179. static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
  180. {
  181. u32 vlan_table, reg, mask, bit, len;
  182. /* verify the VSI index is valid */
  183. if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
  184. return FM10K_ERR_PARAM;
  185. /* VLAN multi-bit write:
  186. * The multi-bit write has several parts to it.
  187. * 24 16 8 0
  188. * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
  189. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  190. * | RSVD0 | Length |C|RSVD0| VLAN ID |
  191. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  192. *
  193. * VLAN ID: Vlan Starting value
  194. * RSVD0: Reserved section, must be 0
  195. * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
  196. * Length: Number of times to repeat the bit being set
  197. */
  198. len = vid >> 16;
  199. vid = (vid << 17) >> 17;
  200. /* verify the reserved 0 fields are 0 */
  201. if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
  202. return FM10K_ERR_PARAM;
  203. /* Loop through the table updating all required VLANs */
  204. for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
  205. len < FM10K_VLAN_TABLE_VID_MAX;
  206. len -= 32 - bit, reg++, bit = 0) {
  207. /* record the initial state of the register */
  208. vlan_table = fm10k_read_reg(hw, reg);
  209. /* truncate mask if we are at the start or end of the run */
  210. mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
  211. /* make necessary modifications to the register */
  212. mask &= set ? ~vlan_table : vlan_table;
  213. if (mask)
  214. fm10k_write_reg(hw, reg, vlan_table ^ mask);
  215. }
  216. return 0;
  217. }
  218. /**
  219. * fm10k_read_mac_addr_pf - Read device MAC address
  220. * @hw: pointer to the HW structure
  221. *
  222. * Reads the device MAC address from the SM_AREA and stores the value.
  223. **/
  224. static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
  225. {
  226. u8 perm_addr[ETH_ALEN];
  227. u32 serial_num;
  228. serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1));
  229. /* last byte should be all 1's */
  230. if ((~serial_num) << 24)
  231. return FM10K_ERR_INVALID_MAC_ADDR;
  232. perm_addr[0] = (u8)(serial_num >> 24);
  233. perm_addr[1] = (u8)(serial_num >> 16);
  234. perm_addr[2] = (u8)(serial_num >> 8);
  235. serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(0));
  236. /* first byte should be all 1's */
  237. if ((~serial_num) >> 24)
  238. return FM10K_ERR_INVALID_MAC_ADDR;
  239. perm_addr[3] = (u8)(serial_num >> 16);
  240. perm_addr[4] = (u8)(serial_num >> 8);
  241. perm_addr[5] = (u8)(serial_num);
  242. ether_addr_copy(hw->mac.perm_addr, perm_addr);
  243. ether_addr_copy(hw->mac.addr, perm_addr);
  244. return 0;
  245. }
  246. /**
  247. * fm10k_glort_valid_pf - Validate that the provided glort is valid
  248. * @hw: pointer to the HW structure
  249. * @glort: base glort to be validated
  250. *
  251. * This function will return an error if the provided glort is invalid
  252. **/
  253. bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
  254. {
  255. glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
  256. return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
  257. }
  258. /**
  259. * fm10k_update_xc_addr_pf - Update device addresses
  260. * @hw: pointer to the HW structure
  261. * @glort: base resource tag for this request
  262. * @mac: MAC address to add/remove from table
  263. * @vid: VLAN ID to add/remove from table
  264. * @add: Indicates if this is an add or remove operation
  265. * @flags: flags field to indicate add and secure
  266. *
  267. * This function generates a message to the Switch API requesting
  268. * that the given logical port add/remove the given L2 MAC/VLAN address.
  269. **/
  270. static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
  271. const u8 *mac, u16 vid, bool add, u8 flags)
  272. {
  273. struct fm10k_mbx_info *mbx = &hw->mbx;
  274. struct fm10k_mac_update mac_update;
  275. u32 msg[5];
  276. /* clear set bit from VLAN ID */
  277. vid &= ~FM10K_VLAN_CLEAR;
  278. /* if glort or VLAN are not valid return error */
  279. if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
  280. return FM10K_ERR_PARAM;
  281. /* record fields */
  282. mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
  283. ((u32)mac[3] << 16) |
  284. ((u32)mac[4] << 8) |
  285. ((u32)mac[5]));
  286. mac_update.mac_upper = cpu_to_le16(((u16)mac[0] << 8) |
  287. ((u16)mac[1]));
  288. mac_update.vlan = cpu_to_le16(vid);
  289. mac_update.glort = cpu_to_le16(glort);
  290. mac_update.action = add ? 0 : 1;
  291. mac_update.flags = flags;
  292. /* populate mac_update fields */
  293. fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
  294. fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
  295. &mac_update, sizeof(mac_update));
  296. /* load onto outgoing mailbox */
  297. return mbx->ops.enqueue_tx(hw, mbx, msg);
  298. }
  299. /**
  300. * fm10k_update_uc_addr_pf - Update device unicast addresses
  301. * @hw: pointer to the HW structure
  302. * @glort: base resource tag for this request
  303. * @mac: MAC address to add/remove from table
  304. * @vid: VLAN ID to add/remove from table
  305. * @add: Indicates if this is an add or remove operation
  306. * @flags: flags field to indicate add and secure
  307. *
  308. * This function is used to add or remove unicast addresses for
  309. * the PF.
  310. **/
  311. static s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
  312. const u8 *mac, u16 vid, bool add, u8 flags)
  313. {
  314. /* verify MAC address is valid */
  315. if (!is_valid_ether_addr(mac))
  316. return FM10K_ERR_PARAM;
  317. return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
  318. }
  319. /**
  320. * fm10k_update_mc_addr_pf - Update device multicast addresses
  321. * @hw: pointer to the HW structure
  322. * @glort: base resource tag for this request
  323. * @mac: MAC address to add/remove from table
  324. * @vid: VLAN ID to add/remove from table
  325. * @add: Indicates if this is an add or remove operation
  326. *
  327. * This function is used to add or remove multicast MAC addresses for
  328. * the PF.
  329. **/
  330. static s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
  331. const u8 *mac, u16 vid, bool add)
  332. {
  333. /* verify multicast address is valid */
  334. if (!is_multicast_ether_addr(mac))
  335. return FM10K_ERR_PARAM;
  336. return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
  337. }
  338. /**
  339. * fm10k_update_xcast_mode_pf - Request update of multicast mode
  340. * @hw: pointer to hardware structure
  341. * @glort: base resource tag for this request
  342. * @mode: integer value indicating mode being requested
  343. *
  344. * This function will attempt to request a higher mode for the port
  345. * so that it can enable either multicast, multicast promiscuous, or
  346. * promiscuous mode of operation.
  347. **/
  348. static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
  349. {
  350. struct fm10k_mbx_info *mbx = &hw->mbx;
  351. u32 msg[3], xcast_mode;
  352. if (mode > FM10K_XCAST_MODE_NONE)
  353. return FM10K_ERR_PARAM;
  354. /* if glort is not valid return error */
  355. if (!fm10k_glort_valid_pf(hw, glort))
  356. return FM10K_ERR_PARAM;
  357. /* write xcast mode as a single u32 value,
  358. * lower 16 bits: glort
  359. * upper 16 bits: mode
  360. */
  361. xcast_mode = ((u32)mode << 16) | glort;
  362. /* generate message requesting to change xcast mode */
  363. fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
  364. fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
  365. /* load onto outgoing mailbox */
  366. return mbx->ops.enqueue_tx(hw, mbx, msg);
  367. }
  368. /**
  369. * fm10k_update_int_moderator_pf - Update interrupt moderator linked list
  370. * @hw: pointer to hardware structure
  371. *
  372. * This function walks through the MSI-X vector table to determine the
  373. * number of active interrupts and based on that information updates the
  374. * interrupt moderator linked list.
  375. **/
  376. static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
  377. {
  378. u32 i;
  379. /* Disable interrupt moderator */
  380. fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
  381. /* loop through PF from last to first looking enabled vectors */
  382. for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
  383. if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
  384. break;
  385. }
  386. /* always reset VFITR2[0] to point to last enabled PF vector */
  387. fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
  388. /* reset ITR2[0] to point to last enabled PF vector */
  389. if (!hw->iov.num_vfs)
  390. fm10k_write_reg(hw, FM10K_ITR2(0), i);
  391. /* Enable interrupt moderator */
  392. fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
  393. }
  394. /**
  395. * fm10k_update_lport_state_pf - Notify the switch of a change in port state
  396. * @hw: pointer to the HW structure
  397. * @glort: base resource tag for this request
  398. * @count: number of logical ports being updated
  399. * @enable: boolean value indicating enable or disable
  400. *
  401. * This function is used to add/remove a logical port from the switch.
  402. **/
  403. static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
  404. u16 count, bool enable)
  405. {
  406. struct fm10k_mbx_info *mbx = &hw->mbx;
  407. u32 msg[3], lport_msg;
  408. /* do nothing if we are being asked to create or destroy 0 ports */
  409. if (!count)
  410. return 0;
  411. /* if glort is not valid return error */
  412. if (!fm10k_glort_valid_pf(hw, glort))
  413. return FM10K_ERR_PARAM;
  414. /* reset multicast mode if deleting lport */
  415. if (!enable)
  416. fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
  417. /* construct the lport message from the 2 pieces of data we have */
  418. lport_msg = ((u32)count << 16) | glort;
  419. /* generate lport create/delete message */
  420. fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
  421. FM10K_PF_MSG_ID_LPORT_DELETE);
  422. fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
  423. /* load onto outgoing mailbox */
  424. return mbx->ops.enqueue_tx(hw, mbx, msg);
  425. }
  426. /**
  427. * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
  428. * @hw: pointer to hardware structure
  429. * @dglort: pointer to dglort configuration structure
  430. *
  431. * Reads the configuration structure contained in dglort_cfg and uses
  432. * that information to then populate a DGLORTMAP/DEC entry and the queues
  433. * to which it has been assigned.
  434. **/
  435. static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
  436. struct fm10k_dglort_cfg *dglort)
  437. {
  438. u16 glort, queue_count, vsi_count, pc_count;
  439. u16 vsi, queue, pc, q_idx;
  440. u32 txqctl, dglortdec, dglortmap;
  441. /* verify the dglort pointer */
  442. if (!dglort)
  443. return FM10K_ERR_PARAM;
  444. /* verify the dglort values */
  445. if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
  446. (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
  447. (dglort->queue_l > 8) || (dglort->queue_b >= 256))
  448. return FM10K_ERR_PARAM;
  449. /* determine count of VSIs and queues */
  450. queue_count = BIT(dglort->rss_l + dglort->pc_l);
  451. vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
  452. glort = dglort->glort;
  453. q_idx = dglort->queue_b;
  454. /* configure SGLORT for queues */
  455. for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
  456. for (queue = 0; queue < queue_count; queue++, q_idx++) {
  457. if (q_idx >= FM10K_MAX_QUEUES)
  458. break;
  459. fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort);
  460. fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort);
  461. }
  462. }
  463. /* determine count of PCs and queues */
  464. queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
  465. pc_count = BIT(dglort->pc_l);
  466. /* configure PC for Tx queues */
  467. for (pc = 0; pc < pc_count; pc++) {
  468. q_idx = pc + dglort->queue_b;
  469. for (queue = 0; queue < queue_count; queue++) {
  470. if (q_idx >= FM10K_MAX_QUEUES)
  471. break;
  472. txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx));
  473. txqctl &= ~FM10K_TXQCTL_PC_MASK;
  474. txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
  475. fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl);
  476. q_idx += pc_count;
  477. }
  478. }
  479. /* configure DGLORTDEC */
  480. dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
  481. ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
  482. ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
  483. ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
  484. ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
  485. ((u32)(dglort->queue_l));
  486. if (dglort->inner_rss)
  487. dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE;
  488. /* configure DGLORTMAP */
  489. dglortmap = (dglort->idx == fm10k_dglort_default) ?
  490. FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
  491. dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
  492. dglortmap |= dglort->glort;
  493. /* write values to hardware */
  494. fm10k_write_reg(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
  495. fm10k_write_reg(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
  496. return 0;
  497. }
  498. u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
  499. {
  500. u16 num_pools = hw->iov.num_pools;
  501. return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
  502. 8 : FM10K_MAX_QUEUES_POOL;
  503. }
  504. u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
  505. {
  506. u16 num_vfs = hw->iov.num_vfs;
  507. u16 vf_q_idx = FM10K_MAX_QUEUES;
  508. vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
  509. return vf_q_idx;
  510. }
  511. static u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
  512. {
  513. u16 num_pools = hw->iov.num_pools;
  514. return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
  515. FM10K_MAX_VECTORS_POOL;
  516. }
  517. static u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
  518. {
  519. u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
  520. vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
  521. return vf_v_idx;
  522. }
  523. /**
  524. * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
  525. * @hw: pointer to the HW structure
  526. * @num_vfs: number of VFs to be allocated
  527. * @num_pools: number of virtualization pools to be allocated
  528. *
  529. * Allocates queues and traffic classes to virtualization entities to prepare
  530. * the PF for SR-IOV and VMDq
  531. **/
  532. static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
  533. u16 num_pools)
  534. {
  535. u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
  536. u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
  537. int i, j;
  538. /* hardware only supports up to 64 pools */
  539. if (num_pools > 64)
  540. return FM10K_ERR_PARAM;
  541. /* the number of VFs cannot exceed the number of pools */
  542. if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
  543. return FM10K_ERR_PARAM;
  544. /* record number of virtualization entities */
  545. hw->iov.num_vfs = num_vfs;
  546. hw->iov.num_pools = num_pools;
  547. /* determine qmap offsets and counts */
  548. qmap_stride = (num_vfs > 8) ? 32 : 256;
  549. qpp = fm10k_queues_per_pool(hw);
  550. vpp = fm10k_vectors_per_pool(hw);
  551. /* calculate starting index for queues */
  552. vf_q_idx = fm10k_vf_queue_index(hw, 0);
  553. qmap_idx = 0;
  554. /* establish TCs with -1 credits and no quanta to prevent transmit */
  555. for (i = 0; i < num_vfs; i++) {
  556. fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(i), 0);
  557. fm10k_write_reg(hw, FM10K_TC_RATE(i), 0);
  558. fm10k_write_reg(hw, FM10K_TC_CREDIT(i),
  559. FM10K_TC_CREDIT_CREDIT_MASK);
  560. }
  561. /* zero out all mbmem registers */
  562. for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
  563. fm10k_write_reg(hw, FM10K_MBMEM(i), 0);
  564. /* clear event notification of VF FLR */
  565. fm10k_write_reg(hw, FM10K_PFVFLREC(0), ~0);
  566. fm10k_write_reg(hw, FM10K_PFVFLREC(1), ~0);
  567. /* loop through unallocated rings assigning them back to PF */
  568. for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
  569. fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
  570. fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
  571. FM10K_TXQCTL_UNLIMITED_BW | vid);
  572. fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
  573. }
  574. /* PF should have already updated VFITR2[0] */
  575. /* update all ITR registers to flow to VFITR2[0] */
  576. for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
  577. if (!(i & (vpp - 1)))
  578. fm10k_write_reg(hw, FM10K_ITR2(i), i - vpp);
  579. else
  580. fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
  581. }
  582. /* update PF ITR2[0] to reference the last vector */
  583. fm10k_write_reg(hw, FM10K_ITR2(0),
  584. fm10k_vf_vector_index(hw, num_vfs - 1));
  585. /* loop through rings populating rings and TCs */
  586. for (i = 0; i < num_vfs; i++) {
  587. /* record index for VF queue 0 for use in end of loop */
  588. vf_q_idx0 = vf_q_idx;
  589. for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
  590. /* assign VF and locked TC to queues */
  591. fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
  592. fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx),
  593. (i << FM10K_TXQCTL_TC_SHIFT) | i |
  594. FM10K_TXQCTL_VF | vid);
  595. fm10k_write_reg(hw, FM10K_RXDCTL(vf_q_idx),
  596. FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
  597. FM10K_RXDCTL_DROP_ON_EMPTY);
  598. fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
  599. (i << FM10K_RXQCTL_VF_SHIFT) |
  600. FM10K_RXQCTL_VF);
  601. /* map queue pair to VF */
  602. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
  603. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
  604. }
  605. /* repeat the first ring for all of the remaining VF rings */
  606. for (; j < qmap_stride; j++, qmap_idx++) {
  607. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
  608. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
  609. }
  610. }
  611. /* loop through remaining indexes assigning all to queue 0 */
  612. while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
  613. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
  614. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), 0);
  615. qmap_idx++;
  616. }
  617. return 0;
  618. }
  619. /**
  620. * fm10k_iov_configure_tc_pf - Configure the shaping group for VF
  621. * @hw: pointer to the HW structure
  622. * @vf_idx: index of VF receiving GLORT
  623. * @rate: Rate indicated in Mb/s
  624. *
  625. * Configured the TC for a given VF to allow only up to a given number
  626. * of Mb/s of outgoing Tx throughput.
  627. **/
  628. static s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
  629. {
  630. /* configure defaults */
  631. u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
  632. u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
  633. /* verify vf is in range */
  634. if (vf_idx >= hw->iov.num_vfs)
  635. return FM10K_ERR_PARAM;
  636. /* set interval to align with 4.096 usec in all modes */
  637. switch (hw->bus.speed) {
  638. case fm10k_bus_speed_2500:
  639. interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
  640. break;
  641. case fm10k_bus_speed_5000:
  642. interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
  643. break;
  644. default:
  645. break;
  646. }
  647. if (rate) {
  648. if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
  649. return FM10K_ERR_PARAM;
  650. /* The quanta is measured in Bytes per 4.096 or 8.192 usec
  651. * The rate is provided in Mbits per second
  652. * To tralslate from rate to quanta we need to multiply the
  653. * rate by 8.192 usec and divide by 8 bits/byte. To avoid
  654. * dealing with floating point we can round the values up
  655. * to the nearest whole number ratio which gives us 128 / 125.
  656. */
  657. tc_rate = (rate * 128) / 125;
  658. /* try to keep the rate limiting accurate by increasing
  659. * the number of credits and interval for rates less than 4Gb/s
  660. */
  661. if (rate < 4000)
  662. interval <<= 1;
  663. else
  664. tc_rate >>= 1;
  665. }
  666. /* update rate limiter with new values */
  667. fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
  668. fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
  669. fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
  670. return 0;
  671. }
  672. /**
  673. * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
  674. * @hw: pointer to the HW structure
  675. * @vf_idx: index of VF receiving GLORT
  676. *
  677. * Update the interrupt moderator linked list to include any MSI-X
  678. * interrupts which the VF has enabled in the MSI-X vector table.
  679. **/
  680. static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
  681. {
  682. u16 vf_v_idx, vf_v_limit, i;
  683. /* verify vf is in range */
  684. if (vf_idx >= hw->iov.num_vfs)
  685. return FM10K_ERR_PARAM;
  686. /* determine vector offset and count */
  687. vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
  688. vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
  689. /* search for first vector that is not masked */
  690. for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
  691. if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
  692. break;
  693. }
  694. /* reset linked list so it now includes our active vectors */
  695. if (vf_idx == (hw->iov.num_vfs - 1))
  696. fm10k_write_reg(hw, FM10K_ITR2(0), i);
  697. else
  698. fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), i);
  699. return 0;
  700. }
  701. /**
  702. * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
  703. * @hw: pointer to the HW structure
  704. * @vf_info: pointer to VF information structure
  705. *
  706. * Assign a MAC address and default VLAN to a VF and notify it of the update
  707. **/
  708. static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
  709. struct fm10k_vf_info *vf_info)
  710. {
  711. u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
  712. u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
  713. s32 err = 0;
  714. u16 vf_idx, vf_vid;
  715. /* verify vf is in range */
  716. if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
  717. return FM10K_ERR_PARAM;
  718. /* determine qmap offsets and counts */
  719. qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
  720. queues_per_pool = fm10k_queues_per_pool(hw);
  721. /* calculate starting index for queues */
  722. vf_idx = vf_info->vf_idx;
  723. vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
  724. qmap_idx = qmap_stride * vf_idx;
  725. /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
  726. * used here to indicate to the VF that it will not have privilege to
  727. * write VLAN_TABLE. All policy is enforced on the PF but this allows
  728. * the VF to correctly report errors to userspace rqeuests.
  729. */
  730. if (vf_info->pf_vid)
  731. vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
  732. else
  733. vf_vid = vf_info->sw_vid;
  734. /* generate MAC_ADDR request */
  735. fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
  736. fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
  737. vf_info->mac, vf_vid);
  738. /* Configure Queue control register with new VLAN ID. The TXQCTL
  739. * register is RO from the VF, so the PF must do this even in the
  740. * case of notifying the VF of a new VID via the mailbox.
  741. */
  742. txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
  743. FM10K_TXQCTL_VID_MASK;
  744. txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
  745. FM10K_TXQCTL_VF | vf_idx;
  746. for (i = 0; i < queues_per_pool; i++)
  747. fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
  748. /* try loading a message onto outgoing mailbox first */
  749. if (vf_info->mbx.ops.enqueue_tx) {
  750. err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
  751. if (err != FM10K_MBX_ERR_NO_MBX)
  752. return err;
  753. err = 0;
  754. }
  755. /* If we aren't connected to a mailbox, this is most likely because
  756. * the VF driver is not running. It should thus be safe to re-map
  757. * queues and use the registers to pass the MAC address so that the VF
  758. * driver gets correct information during its initialization.
  759. */
  760. /* MAP Tx queue back to 0 temporarily, and disable it */
  761. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
  762. fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
  763. /* verify ring has disabled before modifying base address registers */
  764. txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
  765. for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
  766. /* limit ourselves to a 1ms timeout */
  767. if (timeout == 10) {
  768. err = FM10K_ERR_DMA_PENDING;
  769. goto err_out;
  770. }
  771. usleep_range(100, 200);
  772. txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
  773. }
  774. /* Update base address registers to contain MAC address */
  775. if (is_valid_ether_addr(vf_info->mac)) {
  776. tdbal = (((u32)vf_info->mac[3]) << 24) |
  777. (((u32)vf_info->mac[4]) << 16) |
  778. (((u32)vf_info->mac[5]) << 8);
  779. tdbah = (((u32)0xFF) << 24) |
  780. (((u32)vf_info->mac[0]) << 16) |
  781. (((u32)vf_info->mac[1]) << 8) |
  782. ((u32)vf_info->mac[2]);
  783. }
  784. /* Record the base address into queue 0 */
  785. fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal);
  786. fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah);
  787. /* Provide the VF the ITR scale, using software-defined fields in TDLEN
  788. * to pass the information during VF initialization. See definition of
  789. * FM10K_TDLEN_ITR_SCALE_SHIFT for more details.
  790. */
  791. fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale <<
  792. FM10K_TDLEN_ITR_SCALE_SHIFT);
  793. err_out:
  794. /* restore the queue back to VF ownership */
  795. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
  796. return err;
  797. }
  798. /**
  799. * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
  800. * @hw: pointer to the HW structure
  801. * @vf_info: pointer to VF information structure
  802. *
  803. * Reassign the interrupts and queues to a VF following an FLR
  804. **/
  805. static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
  806. struct fm10k_vf_info *vf_info)
  807. {
  808. u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
  809. u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
  810. u16 vf_v_idx, vf_v_limit, vf_vid;
  811. u8 vf_idx = vf_info->vf_idx;
  812. int i;
  813. /* verify vf is in range */
  814. if (vf_idx >= hw->iov.num_vfs)
  815. return FM10K_ERR_PARAM;
  816. /* clear event notification of VF FLR */
  817. fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
  818. /* force timeout and then disconnect the mailbox */
  819. vf_info->mbx.timeout = 0;
  820. if (vf_info->mbx.ops.disconnect)
  821. vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
  822. /* determine vector offset and count */
  823. vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
  824. vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
  825. /* determine qmap offsets and counts */
  826. qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
  827. queues_per_pool = fm10k_queues_per_pool(hw);
  828. qmap_idx = qmap_stride * vf_idx;
  829. /* make all the queues inaccessible to the VF */
  830. for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
  831. fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
  832. fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
  833. }
  834. /* calculate starting index for queues */
  835. vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
  836. /* determine correct default VLAN ID */
  837. if (vf_info->pf_vid)
  838. vf_vid = vf_info->pf_vid;
  839. else
  840. vf_vid = vf_info->sw_vid;
  841. /* configure Queue control register */
  842. txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
  843. (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
  844. FM10K_TXQCTL_VF | vf_idx;
  845. rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
  846. /* stop further DMA and reset queue ownership back to VF */
  847. for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
  848. fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
  849. fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
  850. fm10k_write_reg(hw, FM10K_RXDCTL(i),
  851. FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
  852. FM10K_RXDCTL_DROP_ON_EMPTY);
  853. fm10k_write_reg(hw, FM10K_RXQCTL(i), rxqctl);
  854. }
  855. /* reset TC with -1 credits and no quanta to prevent transmit */
  856. fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
  857. fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), 0);
  858. fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx),
  859. FM10K_TC_CREDIT_CREDIT_MASK);
  860. /* update our first entry in the table based on previous VF */
  861. if (!vf_idx)
  862. hw->mac.ops.update_int_moderator(hw);
  863. else
  864. hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
  865. /* reset linked list so it now includes our active vectors */
  866. if (vf_idx == (hw->iov.num_vfs - 1))
  867. fm10k_write_reg(hw, FM10K_ITR2(0), vf_v_idx);
  868. else
  869. fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
  870. /* link remaining vectors so that next points to previous */
  871. for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
  872. fm10k_write_reg(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
  873. /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
  874. for (i = FM10K_VFMBMEM_LEN; i--;)
  875. fm10k_write_reg(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
  876. for (i = FM10K_VLAN_TABLE_SIZE; i--;)
  877. fm10k_write_reg(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
  878. for (i = FM10K_RETA_SIZE; i--;)
  879. fm10k_write_reg(hw, FM10K_RETA(vf_info->vsi, i), 0);
  880. for (i = FM10K_RSSRK_SIZE; i--;)
  881. fm10k_write_reg(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
  882. fm10k_write_reg(hw, FM10K_MRQC(vf_info->vsi), 0);
  883. /* Update base address registers to contain MAC address */
  884. if (is_valid_ether_addr(vf_info->mac)) {
  885. tdbal = (((u32)vf_info->mac[3]) << 24) |
  886. (((u32)vf_info->mac[4]) << 16) |
  887. (((u32)vf_info->mac[5]) << 8);
  888. tdbah = (((u32)0xFF) << 24) |
  889. (((u32)vf_info->mac[0]) << 16) |
  890. (((u32)vf_info->mac[1]) << 8) |
  891. ((u32)vf_info->mac[2]);
  892. }
  893. /* map queue pairs back to VF from last to first */
  894. for (i = queues_per_pool; i--;) {
  895. fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
  896. fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
  897. /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an
  898. * explanation of how TDLEN is used.
  899. */
  900. fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx + i),
  901. hw->mac.itr_scale <<
  902. FM10K_TDLEN_ITR_SCALE_SHIFT);
  903. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
  904. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
  905. }
  906. /* repeat the first ring for all the remaining VF rings */
  907. for (i = queues_per_pool; i < qmap_stride; i++) {
  908. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
  909. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
  910. }
  911. return 0;
  912. }
  913. /**
  914. * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
  915. * @hw: pointer to hardware structure
  916. * @vf_info: pointer to VF information structure
  917. * @lport_idx: Logical port offset from the hardware glort
  918. * @flags: Set of capability flags to extend port beyond basic functionality
  919. *
  920. * This function allows enabling a VF port by assigning it a GLORT and
  921. * setting the flags so that it can enable an Rx mode.
  922. **/
  923. static s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
  924. struct fm10k_vf_info *vf_info,
  925. u16 lport_idx, u8 flags)
  926. {
  927. u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
  928. /* if glort is not valid return error */
  929. if (!fm10k_glort_valid_pf(hw, glort))
  930. return FM10K_ERR_PARAM;
  931. vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
  932. vf_info->glort = glort;
  933. return 0;
  934. }
  935. /**
  936. * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
  937. * @hw: pointer to hardware structure
  938. * @vf_info: pointer to VF information structure
  939. *
  940. * This function disables a VF port by stripping it of a GLORT and
  941. * setting the flags so that it cannot enable any Rx mode.
  942. **/
  943. static void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
  944. struct fm10k_vf_info *vf_info)
  945. {
  946. u32 msg[1];
  947. /* need to disable the port if it is already enabled */
  948. if (FM10K_VF_FLAG_ENABLED(vf_info)) {
  949. /* notify switch that this port has been disabled */
  950. fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
  951. /* generate port state response to notify VF it is not ready */
  952. fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
  953. vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
  954. }
  955. /* clear flags and glort if it exists */
  956. vf_info->vf_flags = 0;
  957. vf_info->glort = 0;
  958. }
  959. /**
  960. * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
  961. * @hw: pointer to hardware structure
  962. * @q: stats for all queues of a VF
  963. * @vf_idx: index of VF
  964. *
  965. * This function collects queue stats for VFs.
  966. **/
  967. static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
  968. struct fm10k_hw_stats_q *q,
  969. u16 vf_idx)
  970. {
  971. u32 idx, qpp;
  972. /* get stats for all of the queues */
  973. qpp = fm10k_queues_per_pool(hw);
  974. idx = fm10k_vf_queue_index(hw, vf_idx);
  975. fm10k_update_hw_stats_q(hw, q, idx, qpp);
  976. }
  977. /**
  978. * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
  979. * @hw: Pointer to hardware structure
  980. * @results: Pointer array to message, results[0] is pointer to message
  981. * @mbx: Pointer to mailbox information structure
  982. *
  983. * This function is a default handler for MSI-X requests from the VF. The
  984. * assumption is that in this case it is acceptable to just directly
  985. * hand off the message from the VF to the underlying shared code.
  986. **/
  987. s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
  988. struct fm10k_mbx_info *mbx)
  989. {
  990. struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
  991. u8 vf_idx = vf_info->vf_idx;
  992. return hw->iov.ops.assign_int_moderator(hw, vf_idx);
  993. }
  994. /**
  995. * fm10k_iov_select_vid - Select correct default VLAN ID
  996. * @hw: Pointer to hardware structure
  997. * @vid: VLAN ID to correct
  998. *
  999. * Will report an error if the VLAN ID is out of range. For VID = 0, it will
  1000. * return either the pf_vid or sw_vid depending on which one is set.
  1001. */
  1002. static s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
  1003. {
  1004. if (!vid)
  1005. return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
  1006. else if (vf_info->pf_vid && vid != vf_info->pf_vid)
  1007. return FM10K_ERR_PARAM;
  1008. else
  1009. return vid;
  1010. }
  1011. /**
  1012. * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
  1013. * @hw: Pointer to hardware structure
  1014. * @results: Pointer array to message, results[0] is pointer to message
  1015. * @mbx: Pointer to mailbox information structure
  1016. *
  1017. * This function is a default handler for MAC/VLAN requests from the VF.
  1018. * The assumption is that in this case it is acceptable to just directly
  1019. * hand off the message from the VF to the underlying shared code.
  1020. **/
  1021. s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
  1022. struct fm10k_mbx_info *mbx)
  1023. {
  1024. struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
  1025. u8 mac[ETH_ALEN];
  1026. u32 *result;
  1027. int err = 0;
  1028. bool set;
  1029. u16 vlan;
  1030. u32 vid;
  1031. /* we shouldn't be updating rules on a disabled interface */
  1032. if (!FM10K_VF_FLAG_ENABLED(vf_info))
  1033. err = FM10K_ERR_PARAM;
  1034. if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
  1035. result = results[FM10K_MAC_VLAN_MSG_VLAN];
  1036. /* record VLAN id requested */
  1037. err = fm10k_tlv_attr_get_u32(result, &vid);
  1038. if (err)
  1039. return err;
  1040. set = !(vid & FM10K_VLAN_CLEAR);
  1041. vid &= ~FM10K_VLAN_CLEAR;
  1042. /* if the length field has been set, this is a multi-bit
  1043. * update request. For multi-bit requests, simply disallow
  1044. * them when the pf_vid has been set. In this case, the PF
  1045. * should have already cleared the VLAN_TABLE, and if we
  1046. * allowed them, it could allow a rogue VF to receive traffic
  1047. * on a VLAN it was not assigned. In the single-bit case, we
  1048. * need to modify requests for VLAN 0 to use the default PF or
  1049. * SW vid when assigned.
  1050. */
  1051. if (vid >> 16) {
  1052. /* prevent multi-bit requests when PF has
  1053. * administratively set the VLAN for this VF
  1054. */
  1055. if (vf_info->pf_vid)
  1056. return FM10K_ERR_PARAM;
  1057. } else {
  1058. err = fm10k_iov_select_vid(vf_info, (u16)vid);
  1059. if (err < 0)
  1060. return err;
  1061. vid = err;
  1062. }
  1063. /* update VSI info for VF in regards to VLAN table */
  1064. err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
  1065. }
  1066. if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
  1067. result = results[FM10K_MAC_VLAN_MSG_MAC];
  1068. /* record unicast MAC address requested */
  1069. err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
  1070. if (err)
  1071. return err;
  1072. /* block attempts to set MAC for a locked device */
  1073. if (is_valid_ether_addr(vf_info->mac) &&
  1074. !ether_addr_equal(mac, vf_info->mac))
  1075. return FM10K_ERR_PARAM;
  1076. set = !(vlan & FM10K_VLAN_CLEAR);
  1077. vlan &= ~FM10K_VLAN_CLEAR;
  1078. err = fm10k_iov_select_vid(vf_info, vlan);
  1079. if (err < 0)
  1080. return err;
  1081. vlan = (u16)err;
  1082. /* notify switch of request for new unicast address */
  1083. err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
  1084. mac, vlan, set, 0);
  1085. }
  1086. if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
  1087. result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
  1088. /* record multicast MAC address requested */
  1089. err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
  1090. if (err)
  1091. return err;
  1092. /* verify that the VF is allowed to request multicast */
  1093. if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
  1094. return FM10K_ERR_PARAM;
  1095. set = !(vlan & FM10K_VLAN_CLEAR);
  1096. vlan &= ~FM10K_VLAN_CLEAR;
  1097. err = fm10k_iov_select_vid(vf_info, vlan);
  1098. if (err < 0)
  1099. return err;
  1100. vlan = (u16)err;
  1101. /* notify switch of request for new multicast address */
  1102. err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
  1103. mac, vlan, set);
  1104. }
  1105. return err;
  1106. }
  1107. /**
  1108. * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
  1109. * @vf_info: VF info structure containing capability flags
  1110. * @mode: Requested xcast mode
  1111. *
  1112. * This function outputs the mode that most closely matches the requested
  1113. * mode. If not modes match it will request we disable the port
  1114. **/
  1115. static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
  1116. u8 mode)
  1117. {
  1118. u8 vf_flags = vf_info->vf_flags;
  1119. /* match up mode to capabilities as best as possible */
  1120. switch (mode) {
  1121. case FM10K_XCAST_MODE_PROMISC:
  1122. if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
  1123. return FM10K_XCAST_MODE_PROMISC;
  1124. /* fallthough */
  1125. case FM10K_XCAST_MODE_ALLMULTI:
  1126. if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
  1127. return FM10K_XCAST_MODE_ALLMULTI;
  1128. /* fallthough */
  1129. case FM10K_XCAST_MODE_MULTI:
  1130. if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
  1131. return FM10K_XCAST_MODE_MULTI;
  1132. /* fallthough */
  1133. case FM10K_XCAST_MODE_NONE:
  1134. if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
  1135. return FM10K_XCAST_MODE_NONE;
  1136. /* fallthough */
  1137. default:
  1138. break;
  1139. }
  1140. /* disable interface as it should not be able to request any */
  1141. return FM10K_XCAST_MODE_DISABLE;
  1142. }
  1143. /**
  1144. * fm10k_iov_msg_lport_state_pf - Message handler for port state requests
  1145. * @hw: Pointer to hardware structure
  1146. * @results: Pointer array to message, results[0] is pointer to message
  1147. * @mbx: Pointer to mailbox information structure
  1148. *
  1149. * This function is a default handler for port state requests. The port
  1150. * state requests for now are basic and consist of enabling or disabling
  1151. * the port.
  1152. **/
  1153. s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
  1154. struct fm10k_mbx_info *mbx)
  1155. {
  1156. struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
  1157. u32 *result;
  1158. s32 err = 0;
  1159. u32 msg[2];
  1160. u8 mode = 0;
  1161. /* verify VF is allowed to enable even minimal mode */
  1162. if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
  1163. return FM10K_ERR_PARAM;
  1164. if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
  1165. result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
  1166. /* XCAST mode update requested */
  1167. err = fm10k_tlv_attr_get_u8(result, &mode);
  1168. if (err)
  1169. return FM10K_ERR_PARAM;
  1170. /* prep for possible demotion depending on capabilities */
  1171. mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
  1172. /* if mode is not currently enabled, enable it */
  1173. if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
  1174. fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
  1175. /* swap mode back to a bit flag */
  1176. mode = FM10K_VF_FLAG_SET_MODE(mode);
  1177. } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
  1178. /* need to disable the port if it is already enabled */
  1179. if (FM10K_VF_FLAG_ENABLED(vf_info))
  1180. err = fm10k_update_lport_state_pf(hw, vf_info->glort,
  1181. 1, false);
  1182. /* we need to clear VF_FLAG_ENABLED flags in order to ensure
  1183. * that we actually re-enable the LPORT state below. Note that
  1184. * this has no impact if the VF is already disabled, as the
  1185. * flags are already cleared.
  1186. */
  1187. if (!err)
  1188. vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
  1189. /* when enabling the port we should reset the rate limiters */
  1190. hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
  1191. /* set mode for minimal functionality */
  1192. mode = FM10K_VF_FLAG_SET_MODE_NONE;
  1193. /* generate port state response to notify VF it is ready */
  1194. fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
  1195. fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
  1196. mbx->ops.enqueue_tx(hw, mbx, msg);
  1197. }
  1198. /* if enable state toggled note the update */
  1199. if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
  1200. err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
  1201. !!mode);
  1202. /* if state change succeeded, then update our stored state */
  1203. mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
  1204. if (!err)
  1205. vf_info->vf_flags = mode;
  1206. return err;
  1207. }
  1208. /**
  1209. * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
  1210. * @hw: pointer to hardware structure
  1211. * @stats: pointer to the stats structure to update
  1212. *
  1213. * This function collects and aggregates global and per queue hardware
  1214. * statistics.
  1215. **/
  1216. static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
  1217. struct fm10k_hw_stats *stats)
  1218. {
  1219. u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
  1220. u32 id, id_prev;
  1221. /* Use Tx queue 0 as a canary to detect a reset */
  1222. id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
  1223. /* Read Global Statistics */
  1224. do {
  1225. timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
  1226. &stats->timeout);
  1227. ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
  1228. ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
  1229. um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
  1230. xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
  1231. vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
  1232. &stats->vlan_drop);
  1233. loopback_drop =
  1234. fm10k_read_hw_stats_32b(hw,
  1235. FM10K_STATS_LOOPBACK_DROP,
  1236. &stats->loopback_drop);
  1237. nodesc_drop = fm10k_read_hw_stats_32b(hw,
  1238. FM10K_STATS_NODESC_DROP,
  1239. &stats->nodesc_drop);
  1240. /* if value has not changed then we have consistent data */
  1241. id_prev = id;
  1242. id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
  1243. } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
  1244. /* drop non-ID bits and set VALID ID bit */
  1245. id &= FM10K_TXQCTL_ID_MASK;
  1246. id |= FM10K_STAT_VALID;
  1247. /* Update Global Statistics */
  1248. if (stats->stats_idx == id) {
  1249. stats->timeout.count += timeout;
  1250. stats->ur.count += ur;
  1251. stats->ca.count += ca;
  1252. stats->um.count += um;
  1253. stats->xec.count += xec;
  1254. stats->vlan_drop.count += vlan_drop;
  1255. stats->loopback_drop.count += loopback_drop;
  1256. stats->nodesc_drop.count += nodesc_drop;
  1257. }
  1258. /* Update bases and record current PF id */
  1259. fm10k_update_hw_base_32b(&stats->timeout, timeout);
  1260. fm10k_update_hw_base_32b(&stats->ur, ur);
  1261. fm10k_update_hw_base_32b(&stats->ca, ca);
  1262. fm10k_update_hw_base_32b(&stats->um, um);
  1263. fm10k_update_hw_base_32b(&stats->xec, xec);
  1264. fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
  1265. fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
  1266. fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
  1267. stats->stats_idx = id;
  1268. /* Update Queue Statistics */
  1269. fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
  1270. }
  1271. /**
  1272. * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
  1273. * @hw: pointer to hardware structure
  1274. * @stats: pointer to the stats structure to update
  1275. *
  1276. * This function resets the base for global and per queue hardware
  1277. * statistics.
  1278. **/
  1279. static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
  1280. struct fm10k_hw_stats *stats)
  1281. {
  1282. /* Unbind Global Statistics */
  1283. fm10k_unbind_hw_stats_32b(&stats->timeout);
  1284. fm10k_unbind_hw_stats_32b(&stats->ur);
  1285. fm10k_unbind_hw_stats_32b(&stats->ca);
  1286. fm10k_unbind_hw_stats_32b(&stats->um);
  1287. fm10k_unbind_hw_stats_32b(&stats->xec);
  1288. fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
  1289. fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
  1290. fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
  1291. /* Unbind Queue Statistics */
  1292. fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
  1293. /* Reinitialize bases for all stats */
  1294. fm10k_update_hw_stats_pf(hw, stats);
  1295. }
  1296. /**
  1297. * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
  1298. * @hw: pointer to hardware structure
  1299. * @dma_mask: 64 bit DMA mask required for platform
  1300. *
  1301. * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
  1302. * to limit the access to memory beyond what is physically in the system.
  1303. **/
  1304. static void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
  1305. {
  1306. /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
  1307. u32 phyaddr = (u32)(dma_mask >> 32);
  1308. fm10k_write_reg(hw, FM10K_PHYADDR, phyaddr);
  1309. }
  1310. /**
  1311. * fm10k_get_fault_pf - Record a fault in one of the interface units
  1312. * @hw: pointer to hardware structure
  1313. * @type: pointer to fault type register offset
  1314. * @fault: pointer to memory location to record the fault
  1315. *
  1316. * Record the fault register contents to the fault data structure and
  1317. * clear the entry from the register.
  1318. *
  1319. * Returns ERR_PARAM if invalid register is specified or no error is present.
  1320. **/
  1321. static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
  1322. struct fm10k_fault *fault)
  1323. {
  1324. u32 func;
  1325. /* verify the fault register is in range and is aligned */
  1326. switch (type) {
  1327. case FM10K_PCA_FAULT:
  1328. case FM10K_THI_FAULT:
  1329. case FM10K_FUM_FAULT:
  1330. break;
  1331. default:
  1332. return FM10K_ERR_PARAM;
  1333. }
  1334. /* only service faults that are valid */
  1335. func = fm10k_read_reg(hw, type + FM10K_FAULT_FUNC);
  1336. if (!(func & FM10K_FAULT_FUNC_VALID))
  1337. return FM10K_ERR_PARAM;
  1338. /* read remaining fields */
  1339. fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI);
  1340. fault->address <<= 32;
  1341. fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
  1342. fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO);
  1343. /* clear valid bit to allow for next error */
  1344. fm10k_write_reg(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
  1345. /* Record which function triggered the error */
  1346. if (func & FM10K_FAULT_FUNC_PF)
  1347. fault->func = 0;
  1348. else
  1349. fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
  1350. FM10K_FAULT_FUNC_VF_SHIFT);
  1351. /* record fault type */
  1352. fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
  1353. return 0;
  1354. }
  1355. /**
  1356. * fm10k_request_lport_map_pf - Request LPORT map from the switch API
  1357. * @hw: pointer to hardware structure
  1358. *
  1359. **/
  1360. static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
  1361. {
  1362. struct fm10k_mbx_info *mbx = &hw->mbx;
  1363. u32 msg[1];
  1364. /* issue request asking for LPORT map */
  1365. fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
  1366. /* load onto outgoing mailbox */
  1367. return mbx->ops.enqueue_tx(hw, mbx, msg);
  1368. }
  1369. /**
  1370. * fm10k_get_host_state_pf - Returns the state of the switch and mailbox
  1371. * @hw: pointer to hardware structure
  1372. * @switch_ready: pointer to boolean value that will record switch state
  1373. *
  1374. * This function will check the DMA_CTRL2 register and mailbox in order
  1375. * to determine if the switch is ready for the PF to begin requesting
  1376. * addresses and mapping traffic to the local interface.
  1377. **/
  1378. static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
  1379. {
  1380. u32 dma_ctrl2;
  1381. /* verify the switch is ready for interaction */
  1382. dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
  1383. if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
  1384. return 0;
  1385. /* retrieve generic host state info */
  1386. return fm10k_get_host_state_generic(hw, switch_ready);
  1387. }
  1388. /* This structure defines the attibutes to be parsed below */
  1389. const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
  1390. FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
  1391. sizeof(struct fm10k_swapi_error)),
  1392. FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
  1393. FM10K_TLV_ATTR_LAST
  1394. };
  1395. /**
  1396. * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
  1397. * @hw: Pointer to hardware structure
  1398. * @results: pointer array containing parsed data
  1399. * @mbx: Pointer to mailbox information structure
  1400. *
  1401. * This handler configures the lport mapping based on the reply from the
  1402. * switch API.
  1403. **/
  1404. s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
  1405. struct fm10k_mbx_info *mbx)
  1406. {
  1407. u16 glort, mask;
  1408. u32 dglort_map;
  1409. s32 err;
  1410. err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
  1411. &dglort_map);
  1412. if (err)
  1413. return err;
  1414. /* extract values out of the header */
  1415. glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
  1416. mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
  1417. /* verify mask is set and none of the masked bits in glort are set */
  1418. if (!mask || (glort & ~mask))
  1419. return FM10K_ERR_PARAM;
  1420. /* verify the mask is contiguous, and that it is 1's followed by 0's */
  1421. if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
  1422. return FM10K_ERR_PARAM;
  1423. /* record the glort, mask, and port count */
  1424. hw->mac.dglort_map = dglort_map;
  1425. return 0;
  1426. }
  1427. const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
  1428. FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
  1429. FM10K_TLV_ATTR_LAST
  1430. };
  1431. /**
  1432. * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
  1433. * @hw: Pointer to hardware structure
  1434. * @results: pointer array containing parsed data
  1435. * @mbx: Pointer to mailbox information structure
  1436. *
  1437. * This handler configures the default VLAN for the PF
  1438. **/
  1439. static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
  1440. struct fm10k_mbx_info *mbx)
  1441. {
  1442. u16 glort, pvid;
  1443. u32 pvid_update;
  1444. s32 err;
  1445. err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
  1446. &pvid_update);
  1447. if (err)
  1448. return err;
  1449. /* extract values from the pvid update */
  1450. glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
  1451. pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
  1452. /* if glort is not valid return error */
  1453. if (!fm10k_glort_valid_pf(hw, glort))
  1454. return FM10K_ERR_PARAM;
  1455. /* verify VLAN ID is valid */
  1456. if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
  1457. return FM10K_ERR_PARAM;
  1458. /* record the port VLAN ID value */
  1459. hw->mac.default_vid = pvid;
  1460. return 0;
  1461. }
  1462. /**
  1463. * fm10k_record_global_table_data - Move global table data to swapi table info
  1464. * @from: pointer to source table data structure
  1465. * @to: pointer to destination table info structure
  1466. *
  1467. * This function is will copy table_data to the table_info contained in
  1468. * the hw struct.
  1469. **/
  1470. static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
  1471. struct fm10k_swapi_table_info *to)
  1472. {
  1473. /* convert from le32 struct to CPU byte ordered values */
  1474. to->used = le32_to_cpu(from->used);
  1475. to->avail = le32_to_cpu(from->avail);
  1476. }
  1477. const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
  1478. FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
  1479. sizeof(struct fm10k_swapi_error)),
  1480. FM10K_TLV_ATTR_LAST
  1481. };
  1482. /**
  1483. * fm10k_msg_err_pf - Message handler for error reply
  1484. * @hw: Pointer to hardware structure
  1485. * @results: pointer array containing parsed data
  1486. * @mbx: Pointer to mailbox information structure
  1487. *
  1488. * This handler will capture the data for any error replies to previous
  1489. * messages that the PF has sent.
  1490. **/
  1491. s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
  1492. struct fm10k_mbx_info *mbx)
  1493. {
  1494. struct fm10k_swapi_error err_msg;
  1495. s32 err;
  1496. /* extract structure from message */
  1497. err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
  1498. &err_msg, sizeof(err_msg));
  1499. if (err)
  1500. return err;
  1501. /* record table status */
  1502. fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
  1503. fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
  1504. fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
  1505. /* record SW API status value */
  1506. hw->swapi.status = le32_to_cpu(err_msg.status);
  1507. return 0;
  1508. }
  1509. static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
  1510. FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
  1511. FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
  1512. FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
  1513. FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
  1514. FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
  1515. FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
  1516. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
  1517. };
  1518. static const struct fm10k_mac_ops mac_ops_pf = {
  1519. .get_bus_info = fm10k_get_bus_info_generic,
  1520. .reset_hw = fm10k_reset_hw_pf,
  1521. .init_hw = fm10k_init_hw_pf,
  1522. .start_hw = fm10k_start_hw_generic,
  1523. .stop_hw = fm10k_stop_hw_generic,
  1524. .update_vlan = fm10k_update_vlan_pf,
  1525. .read_mac_addr = fm10k_read_mac_addr_pf,
  1526. .update_uc_addr = fm10k_update_uc_addr_pf,
  1527. .update_mc_addr = fm10k_update_mc_addr_pf,
  1528. .update_xcast_mode = fm10k_update_xcast_mode_pf,
  1529. .update_int_moderator = fm10k_update_int_moderator_pf,
  1530. .update_lport_state = fm10k_update_lport_state_pf,
  1531. .update_hw_stats = fm10k_update_hw_stats_pf,
  1532. .rebind_hw_stats = fm10k_rebind_hw_stats_pf,
  1533. .configure_dglort_map = fm10k_configure_dglort_map_pf,
  1534. .set_dma_mask = fm10k_set_dma_mask_pf,
  1535. .get_fault = fm10k_get_fault_pf,
  1536. .get_host_state = fm10k_get_host_state_pf,
  1537. .request_lport_map = fm10k_request_lport_map_pf,
  1538. };
  1539. static const struct fm10k_iov_ops iov_ops_pf = {
  1540. .assign_resources = fm10k_iov_assign_resources_pf,
  1541. .configure_tc = fm10k_iov_configure_tc_pf,
  1542. .assign_int_moderator = fm10k_iov_assign_int_moderator_pf,
  1543. .assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf,
  1544. .reset_resources = fm10k_iov_reset_resources_pf,
  1545. .set_lport = fm10k_iov_set_lport_pf,
  1546. .reset_lport = fm10k_iov_reset_lport_pf,
  1547. .update_stats = fm10k_iov_update_stats_pf,
  1548. };
  1549. static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
  1550. {
  1551. fm10k_get_invariants_generic(hw);
  1552. return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
  1553. }
  1554. const struct fm10k_info fm10k_pf_info = {
  1555. .mac = fm10k_mac_pf,
  1556. .get_invariants = fm10k_get_invariants_pf,
  1557. .mac_ops = &mac_ops_pf,
  1558. .iov_ops = &iov_ops_pf,
  1559. };