ice_switch.c 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_switch.h"
  4. #define ICE_ETH_DA_OFFSET 0
  5. #define ICE_ETH_ETHTYPE_OFFSET 12
  6. #define ICE_ETH_VLAN_TCI_OFFSET 14
  7. #define ICE_MAX_VLAN_ID 0xFFF
  8. /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
  9. * struct to configure any switch filter rules.
  10. * {DA (6 bytes), SA(6 bytes),
  11. * Ether type (2 bytes for header without VLAN tag) OR
  12. * VLAN tag (4 bytes for header with VLAN tag) }
  13. *
  14. * Word on Hardcoded values
  15. * byte 0 = 0x2: to identify it as locally administered DA MAC
  16. * byte 6 = 0x2: to identify it as locally administered SA MAC
  17. * byte 12 = 0x81 & byte 13 = 0x00:
  18. * In case of VLAN filter first two bytes defines ether type (0x8100)
  19. * and remaining two bytes are placeholder for programming a given VLAN id
  20. * In case of Ether type filter it is treated as header without VLAN tag
  21. * and byte 12 and 13 is used to program a given Ether type instead
  22. */
  23. #define DUMMY_ETH_HDR_LEN 16
  24. static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
  25. 0x2, 0, 0, 0, 0, 0,
  26. 0x81, 0, 0, 0};
  27. #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
  28. (sizeof(struct ice_aqc_sw_rules_elem) - \
  29. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  30. sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
  31. #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
  32. (sizeof(struct ice_aqc_sw_rules_elem) - \
  33. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  34. sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
  35. #define ICE_SW_RULE_LG_ACT_SIZE(n) \
  36. (sizeof(struct ice_aqc_sw_rules_elem) - \
  37. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  38. sizeof(struct ice_sw_rule_lg_act) - \
  39. sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
  40. ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
  41. #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
  42. (sizeof(struct ice_aqc_sw_rules_elem) - \
  43. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  44. sizeof(struct ice_sw_rule_vsi_list) - \
  45. sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
  46. ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
  47. /**
  48. * ice_aq_alloc_free_res - command to allocate/free resources
  49. * @hw: pointer to the hw struct
  50. * @num_entries: number of resource entries in buffer
  51. * @buf: Indirect buffer to hold data parameters and response
  52. * @buf_size: size of buffer for indirect commands
  53. * @opc: pass in the command opcode
  54. * @cd: pointer to command details structure or NULL
  55. *
  56. * Helper function to allocate/free resources using the admin queue commands
  57. */
  58. static enum ice_status
  59. ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
  60. struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
  61. enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  62. {
  63. struct ice_aqc_alloc_free_res_cmd *cmd;
  64. struct ice_aq_desc desc;
  65. cmd = &desc.params.sw_res_ctrl;
  66. if (!buf)
  67. return ICE_ERR_PARAM;
  68. if (buf_size < (num_entries * sizeof(buf->elem[0])))
  69. return ICE_ERR_PARAM;
  70. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  71. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  72. cmd->num_entries = cpu_to_le16(num_entries);
  73. return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  74. }
  75. /**
  76. * ice_init_def_sw_recp - initialize the recipe book keeping tables
  77. * @hw: pointer to the hw struct
  78. *
  79. * Allocate memory for the entire recipe table and initialize the structures/
  80. * entries corresponding to basic recipes.
  81. */
  82. enum ice_status
  83. ice_init_def_sw_recp(struct ice_hw *hw)
  84. {
  85. struct ice_sw_recipe *recps;
  86. u8 i;
  87. recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
  88. sizeof(struct ice_sw_recipe), GFP_KERNEL);
  89. if (!recps)
  90. return ICE_ERR_NO_MEMORY;
  91. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  92. recps[i].root_rid = i;
  93. INIT_LIST_HEAD(&recps[i].filt_rules);
  94. INIT_LIST_HEAD(&recps[i].filt_replay_rules);
  95. mutex_init(&recps[i].filt_rule_lock);
  96. }
  97. hw->switch_info->recp_list = recps;
  98. return 0;
  99. }
  100. /**
  101. * ice_aq_get_sw_cfg - get switch configuration
  102. * @hw: pointer to the hardware structure
  103. * @buf: pointer to the result buffer
  104. * @buf_size: length of the buffer available for response
  105. * @req_desc: pointer to requested descriptor
  106. * @num_elems: pointer to number of elements
  107. * @cd: pointer to command details structure or NULL
  108. *
  109. * Get switch configuration (0x0200) to be placed in 'buff'.
  110. * This admin command returns information such as initial VSI/port number
  111. * and switch ID it belongs to.
  112. *
  113. * NOTE: *req_desc is both an input/output parameter.
  114. * The caller of this function first calls this function with *request_desc set
  115. * to 0. If the response from f/w has *req_desc set to 0, all the switch
  116. * configuration information has been returned; if non-zero (meaning not all
  117. * the information was returned), the caller should call this function again
  118. * with *req_desc set to the previous value returned by f/w to get the
  119. * next block of switch configuration information.
  120. *
  121. * *num_elems is output only parameter. This reflects the number of elements
  122. * in response buffer. The caller of this function to use *num_elems while
  123. * parsing the response buffer.
  124. */
  125. static enum ice_status
  126. ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
  127. u16 buf_size, u16 *req_desc, u16 *num_elems,
  128. struct ice_sq_cd *cd)
  129. {
  130. struct ice_aqc_get_sw_cfg *cmd;
  131. enum ice_status status;
  132. struct ice_aq_desc desc;
  133. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
  134. cmd = &desc.params.get_sw_conf;
  135. cmd->element = cpu_to_le16(*req_desc);
  136. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  137. if (!status) {
  138. *req_desc = le16_to_cpu(cmd->element);
  139. *num_elems = le16_to_cpu(cmd->num_elems);
  140. }
  141. return status;
  142. }
  143. /**
  144. * ice_aq_add_vsi
  145. * @hw: pointer to the hw struct
  146. * @vsi_ctx: pointer to a VSI context struct
  147. * @cd: pointer to command details structure or NULL
  148. *
  149. * Add a VSI context to the hardware (0x0210)
  150. */
  151. static enum ice_status
  152. ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  153. struct ice_sq_cd *cd)
  154. {
  155. struct ice_aqc_add_update_free_vsi_resp *res;
  156. struct ice_aqc_add_get_update_free_vsi *cmd;
  157. struct ice_aq_desc desc;
  158. enum ice_status status;
  159. cmd = &desc.params.vsi_cmd;
  160. res = &desc.params.add_update_free_vsi_res;
  161. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
  162. if (!vsi_ctx->alloc_from_pool)
  163. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
  164. ICE_AQ_VSI_IS_VALID);
  165. cmd->vf_id = vsi_ctx->vf_num;
  166. cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
  167. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  168. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  169. sizeof(vsi_ctx->info), cd);
  170. if (!status) {
  171. vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
  172. vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
  173. vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
  174. }
  175. return status;
  176. }
  177. /**
  178. * ice_aq_free_vsi
  179. * @hw: pointer to the hw struct
  180. * @vsi_ctx: pointer to a VSI context struct
  181. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  182. * @cd: pointer to command details structure or NULL
  183. *
  184. * Free VSI context info from hardware (0x0213)
  185. */
  186. static enum ice_status
  187. ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  188. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  189. {
  190. struct ice_aqc_add_update_free_vsi_resp *resp;
  191. struct ice_aqc_add_get_update_free_vsi *cmd;
  192. struct ice_aq_desc desc;
  193. enum ice_status status;
  194. cmd = &desc.params.vsi_cmd;
  195. resp = &desc.params.add_update_free_vsi_res;
  196. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
  197. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  198. if (keep_vsi_alloc)
  199. cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
  200. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  201. if (!status) {
  202. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  203. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  204. }
  205. return status;
  206. }
  207. /**
  208. * ice_aq_update_vsi
  209. * @hw: pointer to the hw struct
  210. * @vsi_ctx: pointer to a VSI context struct
  211. * @cd: pointer to command details structure or NULL
  212. *
  213. * Update VSI context in the hardware (0x0211)
  214. */
  215. static enum ice_status
  216. ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  217. struct ice_sq_cd *cd)
  218. {
  219. struct ice_aqc_add_update_free_vsi_resp *resp;
  220. struct ice_aqc_add_get_update_free_vsi *cmd;
  221. struct ice_aq_desc desc;
  222. enum ice_status status;
  223. cmd = &desc.params.vsi_cmd;
  224. resp = &desc.params.add_update_free_vsi_res;
  225. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
  226. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  227. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  228. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  229. sizeof(vsi_ctx->info), cd);
  230. if (!status) {
  231. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  232. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  233. }
  234. return status;
  235. }
  236. /**
  237. * ice_is_vsi_valid - check whether the VSI is valid or not
  238. * @hw: pointer to the hw struct
  239. * @vsi_handle: VSI handle
  240. *
  241. * check whether the VSI is valid or not
  242. */
  243. bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
  244. {
  245. return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
  246. }
  247. /**
  248. * ice_get_hw_vsi_num - return the hw VSI number
  249. * @hw: pointer to the hw struct
  250. * @vsi_handle: VSI handle
  251. *
  252. * return the hw VSI number
  253. * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
  254. */
  255. u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
  256. {
  257. return hw->vsi_ctx[vsi_handle]->vsi_num;
  258. }
  259. /**
  260. * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
  261. * @hw: pointer to the hw struct
  262. * @vsi_handle: VSI handle
  263. *
  264. * return the VSI context entry for a given VSI handle
  265. */
  266. struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
  267. {
  268. return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
  269. }
  270. /**
  271. * ice_save_vsi_ctx - save the VSI context for a given VSI handle
  272. * @hw: pointer to the hw struct
  273. * @vsi_handle: VSI handle
  274. * @vsi: VSI context pointer
  275. *
  276. * save the VSI context entry for a given VSI handle
  277. */
  278. static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
  279. struct ice_vsi_ctx *vsi)
  280. {
  281. hw->vsi_ctx[vsi_handle] = vsi;
  282. }
  283. /**
  284. * ice_clear_vsi_ctx - clear the VSI context entry
  285. * @hw: pointer to the hw struct
  286. * @vsi_handle: VSI handle
  287. *
  288. * clear the VSI context entry
  289. */
  290. static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
  291. {
  292. struct ice_vsi_ctx *vsi;
  293. vsi = ice_get_vsi_ctx(hw, vsi_handle);
  294. if (vsi) {
  295. devm_kfree(ice_hw_to_dev(hw), vsi);
  296. hw->vsi_ctx[vsi_handle] = NULL;
  297. }
  298. }
  299. /**
  300. * ice_clear_all_vsi_ctx - clear all the VSI context entries
  301. * @hw: pointer to the hw struct
  302. */
  303. void ice_clear_all_vsi_ctx(struct ice_hw *hw)
  304. {
  305. u16 i;
  306. for (i = 0; i < ICE_MAX_VSI; i++)
  307. ice_clear_vsi_ctx(hw, i);
  308. }
  309. /**
  310. * ice_add_vsi - add VSI context to the hardware and VSI handle list
  311. * @hw: pointer to the hw struct
  312. * @vsi_handle: unique VSI handle provided by drivers
  313. * @vsi_ctx: pointer to a VSI context struct
  314. * @cd: pointer to command details structure or NULL
  315. *
  316. * Add a VSI context to the hardware also add it into the VSI handle list.
  317. * If this function gets called after reset for existing VSIs then update
  318. * with the new HW VSI number in the corresponding VSI handle list entry.
  319. */
  320. enum ice_status
  321. ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  322. struct ice_sq_cd *cd)
  323. {
  324. struct ice_vsi_ctx *tmp_vsi_ctx;
  325. enum ice_status status;
  326. if (vsi_handle >= ICE_MAX_VSI)
  327. return ICE_ERR_PARAM;
  328. status = ice_aq_add_vsi(hw, vsi_ctx, cd);
  329. if (status)
  330. return status;
  331. tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
  332. if (!tmp_vsi_ctx) {
  333. /* Create a new vsi context */
  334. tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
  335. sizeof(*tmp_vsi_ctx), GFP_KERNEL);
  336. if (!tmp_vsi_ctx) {
  337. ice_aq_free_vsi(hw, vsi_ctx, false, cd);
  338. return ICE_ERR_NO_MEMORY;
  339. }
  340. *tmp_vsi_ctx = *vsi_ctx;
  341. ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
  342. } else {
  343. /* update with new HW VSI num */
  344. if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
  345. tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
  346. }
  347. return status;
  348. }
  349. /**
  350. * ice_free_vsi- free VSI context from hardware and VSI handle list
  351. * @hw: pointer to the hw struct
  352. * @vsi_handle: unique VSI handle
  353. * @vsi_ctx: pointer to a VSI context struct
  354. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  355. * @cd: pointer to command details structure or NULL
  356. *
  357. * Free VSI context info from hardware as well as from VSI handle list
  358. */
  359. enum ice_status
  360. ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  361. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  362. {
  363. enum ice_status status;
  364. if (!ice_is_vsi_valid(hw, vsi_handle))
  365. return ICE_ERR_PARAM;
  366. vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
  367. status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
  368. if (!status)
  369. ice_clear_vsi_ctx(hw, vsi_handle);
  370. return status;
  371. }
  372. /**
  373. * ice_update_vsi
  374. * @hw: pointer to the hw struct
  375. * @vsi_handle: unique VSI handle
  376. * @vsi_ctx: pointer to a VSI context struct
  377. * @cd: pointer to command details structure or NULL
  378. *
  379. * Update VSI context in the hardware
  380. */
  381. enum ice_status
  382. ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  383. struct ice_sq_cd *cd)
  384. {
  385. if (!ice_is_vsi_valid(hw, vsi_handle))
  386. return ICE_ERR_PARAM;
  387. vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
  388. return ice_aq_update_vsi(hw, vsi_ctx, cd);
  389. }
  390. /**
  391. * ice_aq_alloc_free_vsi_list
  392. * @hw: pointer to the hw struct
  393. * @vsi_list_id: VSI list id returned or used for lookup
  394. * @lkup_type: switch rule filter lookup type
  395. * @opc: switch rules population command type - pass in the command opcode
  396. *
  397. * allocates or free a VSI list resource
  398. */
  399. static enum ice_status
  400. ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
  401. enum ice_sw_lkup_type lkup_type,
  402. enum ice_adminq_opc opc)
  403. {
  404. struct ice_aqc_alloc_free_res_elem *sw_buf;
  405. struct ice_aqc_res_elem *vsi_ele;
  406. enum ice_status status;
  407. u16 buf_len;
  408. buf_len = sizeof(*sw_buf);
  409. sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
  410. if (!sw_buf)
  411. return ICE_ERR_NO_MEMORY;
  412. sw_buf->num_elems = cpu_to_le16(1);
  413. if (lkup_type == ICE_SW_LKUP_MAC ||
  414. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  415. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  416. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  417. lkup_type == ICE_SW_LKUP_PROMISC ||
  418. lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
  419. sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
  420. } else if (lkup_type == ICE_SW_LKUP_VLAN) {
  421. sw_buf->res_type =
  422. cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
  423. } else {
  424. status = ICE_ERR_PARAM;
  425. goto ice_aq_alloc_free_vsi_list_exit;
  426. }
  427. if (opc == ice_aqc_opc_free_res)
  428. sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
  429. status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
  430. if (status)
  431. goto ice_aq_alloc_free_vsi_list_exit;
  432. if (opc == ice_aqc_opc_alloc_res) {
  433. vsi_ele = &sw_buf->elem[0];
  434. *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
  435. }
  436. ice_aq_alloc_free_vsi_list_exit:
  437. devm_kfree(ice_hw_to_dev(hw), sw_buf);
  438. return status;
  439. }
  440. /**
  441. * ice_aq_sw_rules - add/update/remove switch rules
  442. * @hw: pointer to the hw struct
  443. * @rule_list: pointer to switch rule population list
  444. * @rule_list_sz: total size of the rule list in bytes
  445. * @num_rules: number of switch rules in the rule_list
  446. * @opc: switch rules population command type - pass in the command opcode
  447. * @cd: pointer to command details structure or NULL
  448. *
  449. * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
  450. */
  451. static enum ice_status
  452. ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
  453. u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  454. {
  455. struct ice_aq_desc desc;
  456. if (opc != ice_aqc_opc_add_sw_rules &&
  457. opc != ice_aqc_opc_update_sw_rules &&
  458. opc != ice_aqc_opc_remove_sw_rules)
  459. return ICE_ERR_PARAM;
  460. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  461. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  462. desc.params.sw_rules.num_rules_fltr_entry_index =
  463. cpu_to_le16(num_rules);
  464. return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
  465. }
  466. /* ice_init_port_info - Initialize port_info with switch configuration data
  467. * @pi: pointer to port_info
  468. * @vsi_port_num: VSI number or port number
  469. * @type: Type of switch element (port or VSI)
  470. * @swid: switch ID of the switch the element is attached to
  471. * @pf_vf_num: PF or VF number
  472. * @is_vf: true if the element is a VF, false otherwise
  473. */
  474. static void
  475. ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
  476. u16 swid, u16 pf_vf_num, bool is_vf)
  477. {
  478. switch (type) {
  479. case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
  480. pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
  481. pi->sw_id = swid;
  482. pi->pf_vf_num = pf_vf_num;
  483. pi->is_vf = is_vf;
  484. pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  485. pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  486. break;
  487. default:
  488. ice_debug(pi->hw, ICE_DBG_SW,
  489. "incorrect VSI/port type received\n");
  490. break;
  491. }
  492. }
  493. /* ice_get_initial_sw_cfg - Get initial port and default VSI data
  494. * @hw: pointer to the hardware structure
  495. */
  496. enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
  497. {
  498. struct ice_aqc_get_sw_cfg_resp *rbuf;
  499. enum ice_status status;
  500. u16 req_desc = 0;
  501. u16 num_elems;
  502. u16 i;
  503. rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
  504. GFP_KERNEL);
  505. if (!rbuf)
  506. return ICE_ERR_NO_MEMORY;
  507. /* Multiple calls to ice_aq_get_sw_cfg may be required
  508. * to get all the switch configuration information. The need
  509. * for additional calls is indicated by ice_aq_get_sw_cfg
  510. * writing a non-zero value in req_desc
  511. */
  512. do {
  513. status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
  514. &req_desc, &num_elems, NULL);
  515. if (status)
  516. break;
  517. for (i = 0; i < num_elems; i++) {
  518. struct ice_aqc_get_sw_cfg_resp_elem *ele;
  519. u16 pf_vf_num, swid, vsi_port_num;
  520. bool is_vf = false;
  521. u8 type;
  522. ele = rbuf[i].elements;
  523. vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
  524. ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
  525. pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
  526. ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
  527. swid = le16_to_cpu(ele->swid);
  528. if (le16_to_cpu(ele->pf_vf_num) &
  529. ICE_AQC_GET_SW_CONF_RESP_IS_VF)
  530. is_vf = true;
  531. type = le16_to_cpu(ele->vsi_port_num) >>
  532. ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
  533. if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
  534. /* FW VSI is not needed. Just continue. */
  535. continue;
  536. }
  537. ice_init_port_info(hw->port_info, vsi_port_num,
  538. type, swid, pf_vf_num, is_vf);
  539. }
  540. } while (req_desc && !status);
  541. devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
  542. return status;
  543. }
  544. /**
  545. * ice_fill_sw_info - Helper function to populate lb_en and lan_en
  546. * @hw: pointer to the hardware structure
  547. * @f_info: filter info structure to fill/update
  548. *
  549. * This helper function populates the lb_en and lan_en elements of the provided
  550. * ice_fltr_info struct using the switch's type and characteristics of the
  551. * switch rule being configured.
  552. */
  553. static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
  554. {
  555. f_info->lb_en = false;
  556. f_info->lan_en = false;
  557. if ((f_info->flag & ICE_FLTR_TX) &&
  558. (f_info->fltr_act == ICE_FWD_TO_VSI ||
  559. f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
  560. f_info->fltr_act == ICE_FWD_TO_Q ||
  561. f_info->fltr_act == ICE_FWD_TO_QGRP)) {
  562. f_info->lb_en = true;
  563. if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
  564. is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
  565. f_info->lan_en = true;
  566. }
  567. }
  568. /**
  569. * ice_fill_sw_rule - Helper function to fill switch rule structure
  570. * @hw: pointer to the hardware structure
  571. * @f_info: entry containing packet forwarding information
  572. * @s_rule: switch rule structure to be filled in based on mac_entry
  573. * @opc: switch rules population command type - pass in the command opcode
  574. */
  575. static void
  576. ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
  577. struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
  578. {
  579. u16 vlan_id = ICE_MAX_VLAN_ID + 1;
  580. void *daddr = NULL;
  581. u16 eth_hdr_sz;
  582. u8 *eth_hdr;
  583. u32 act = 0;
  584. __be16 *off;
  585. u8 q_rgn;
  586. if (opc == ice_aqc_opc_remove_sw_rules) {
  587. s_rule->pdata.lkup_tx_rx.act = 0;
  588. s_rule->pdata.lkup_tx_rx.index =
  589. cpu_to_le16(f_info->fltr_rule_id);
  590. s_rule->pdata.lkup_tx_rx.hdr_len = 0;
  591. return;
  592. }
  593. eth_hdr_sz = sizeof(dummy_eth_header);
  594. eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
  595. /* initialize the ether header with a dummy header */
  596. memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
  597. ice_fill_sw_info(hw, f_info);
  598. switch (f_info->fltr_act) {
  599. case ICE_FWD_TO_VSI:
  600. act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
  601. ICE_SINGLE_ACT_VSI_ID_M;
  602. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  603. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  604. ICE_SINGLE_ACT_VALID_BIT;
  605. break;
  606. case ICE_FWD_TO_VSI_LIST:
  607. act |= ICE_SINGLE_ACT_VSI_LIST;
  608. act |= (f_info->fwd_id.vsi_list_id <<
  609. ICE_SINGLE_ACT_VSI_LIST_ID_S) &
  610. ICE_SINGLE_ACT_VSI_LIST_ID_M;
  611. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  612. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  613. ICE_SINGLE_ACT_VALID_BIT;
  614. break;
  615. case ICE_FWD_TO_Q:
  616. act |= ICE_SINGLE_ACT_TO_Q;
  617. act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
  618. ICE_SINGLE_ACT_Q_INDEX_M;
  619. break;
  620. case ICE_DROP_PACKET:
  621. act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
  622. ICE_SINGLE_ACT_VALID_BIT;
  623. break;
  624. case ICE_FWD_TO_QGRP:
  625. q_rgn = f_info->qgrp_size > 0 ?
  626. (u8)ilog2(f_info->qgrp_size) : 0;
  627. act |= ICE_SINGLE_ACT_TO_Q;
  628. act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
  629. ICE_SINGLE_ACT_Q_INDEX_M;
  630. act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
  631. ICE_SINGLE_ACT_Q_REGION_M;
  632. break;
  633. default:
  634. return;
  635. }
  636. if (f_info->lb_en)
  637. act |= ICE_SINGLE_ACT_LB_ENABLE;
  638. if (f_info->lan_en)
  639. act |= ICE_SINGLE_ACT_LAN_ENABLE;
  640. switch (f_info->lkup_type) {
  641. case ICE_SW_LKUP_MAC:
  642. daddr = f_info->l_data.mac.mac_addr;
  643. break;
  644. case ICE_SW_LKUP_VLAN:
  645. vlan_id = f_info->l_data.vlan.vlan_id;
  646. if (f_info->fltr_act == ICE_FWD_TO_VSI ||
  647. f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
  648. act |= ICE_SINGLE_ACT_PRUNE;
  649. act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
  650. }
  651. break;
  652. case ICE_SW_LKUP_ETHERTYPE_MAC:
  653. daddr = f_info->l_data.ethertype_mac.mac_addr;
  654. /* fall-through */
  655. case ICE_SW_LKUP_ETHERTYPE:
  656. off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
  657. *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
  658. break;
  659. case ICE_SW_LKUP_MAC_VLAN:
  660. daddr = f_info->l_data.mac_vlan.mac_addr;
  661. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  662. break;
  663. case ICE_SW_LKUP_PROMISC_VLAN:
  664. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  665. /* fall-through */
  666. case ICE_SW_LKUP_PROMISC:
  667. daddr = f_info->l_data.mac_vlan.mac_addr;
  668. break;
  669. default:
  670. break;
  671. }
  672. s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
  673. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
  674. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
  675. /* Recipe set depending on lookup type */
  676. s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
  677. s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
  678. s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
  679. if (daddr)
  680. ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
  681. if (!(vlan_id > ICE_MAX_VLAN_ID)) {
  682. off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
  683. *off = cpu_to_be16(vlan_id);
  684. }
  685. /* Create the switch rule with the final dummy Ethernet header */
  686. if (opc != ice_aqc_opc_update_sw_rules)
  687. s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
  688. }
  689. /**
  690. * ice_add_marker_act
  691. * @hw: pointer to the hardware structure
  692. * @m_ent: the management entry for which sw marker needs to be added
  693. * @sw_marker: sw marker to tag the Rx descriptor with
  694. * @l_id: large action resource id
  695. *
  696. * Create a large action to hold software marker and update the switch rule
  697. * entry pointed by m_ent with newly created large action
  698. */
  699. static enum ice_status
  700. ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
  701. u16 sw_marker, u16 l_id)
  702. {
  703. struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
  704. /* For software marker we need 3 large actions
  705. * 1. FWD action: FWD TO VSI or VSI LIST
  706. * 2. GENERIC VALUE action to hold the profile id
  707. * 3. GENERIC VALUE action to hold the software marker id
  708. */
  709. const u16 num_lg_acts = 3;
  710. enum ice_status status;
  711. u16 lg_act_size;
  712. u16 rules_size;
  713. u32 act;
  714. u16 id;
  715. if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
  716. return ICE_ERR_PARAM;
  717. /* Create two back-to-back switch rules and submit them to the HW using
  718. * one memory buffer:
  719. * 1. Large Action
  720. * 2. Look up tx rx
  721. */
  722. lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
  723. rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  724. lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
  725. if (!lg_act)
  726. return ICE_ERR_NO_MEMORY;
  727. rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
  728. /* Fill in the first switch rule i.e. large action */
  729. lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
  730. lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
  731. lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
  732. /* First action VSI forwarding or VSI list forwarding depending on how
  733. * many VSIs
  734. */
  735. id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
  736. m_ent->fltr_info.fwd_id.hw_vsi_id;
  737. act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
  738. act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
  739. ICE_LG_ACT_VSI_LIST_ID_M;
  740. if (m_ent->vsi_count > 1)
  741. act |= ICE_LG_ACT_VSI_LIST;
  742. lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
  743. /* Second action descriptor type */
  744. act = ICE_LG_ACT_GENERIC;
  745. act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
  746. lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
  747. act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
  748. ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
  749. /* Third action Marker value */
  750. act |= ICE_LG_ACT_GENERIC;
  751. act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
  752. ICE_LG_ACT_GENERIC_VALUE_M;
  753. lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
  754. /* call the fill switch rule to fill the lookup tx rx structure */
  755. ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
  756. ice_aqc_opc_update_sw_rules);
  757. /* Update the action to point to the large action id */
  758. rx_tx->pdata.lkup_tx_rx.act =
  759. cpu_to_le32(ICE_SINGLE_ACT_PTR |
  760. ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
  761. ICE_SINGLE_ACT_PTR_VAL_M));
  762. /* Use the filter rule id of the previously created rule with single
  763. * act. Once the update happens, hardware will treat this as large
  764. * action
  765. */
  766. rx_tx->pdata.lkup_tx_rx.index =
  767. cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
  768. status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
  769. ice_aqc_opc_update_sw_rules, NULL);
  770. if (!status) {
  771. m_ent->lg_act_idx = l_id;
  772. m_ent->sw_marker_id = sw_marker;
  773. }
  774. devm_kfree(ice_hw_to_dev(hw), lg_act);
  775. return status;
  776. }
  777. /**
  778. * ice_create_vsi_list_map
  779. * @hw: pointer to the hardware structure
  780. * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
  781. * @num_vsi: number of VSI handles in the array
  782. * @vsi_list_id: VSI list id generated as part of allocate resource
  783. *
  784. * Helper function to create a new entry of VSI list id to VSI mapping
  785. * using the given VSI list id
  786. */
  787. static struct ice_vsi_list_map_info *
  788. ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  789. u16 vsi_list_id)
  790. {
  791. struct ice_switch_info *sw = hw->switch_info;
  792. struct ice_vsi_list_map_info *v_map;
  793. int i;
  794. v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
  795. if (!v_map)
  796. return NULL;
  797. v_map->vsi_list_id = vsi_list_id;
  798. v_map->ref_cnt = 1;
  799. for (i = 0; i < num_vsi; i++)
  800. set_bit(vsi_handle_arr[i], v_map->vsi_map);
  801. list_add(&v_map->list_entry, &sw->vsi_list_map_head);
  802. return v_map;
  803. }
  804. /**
  805. * ice_update_vsi_list_rule
  806. * @hw: pointer to the hardware structure
  807. * @vsi_handle_arr: array of VSI handles to form a VSI list
  808. * @num_vsi: number of VSI handles in the array
  809. * @vsi_list_id: VSI list id generated as part of allocate resource
  810. * @remove: Boolean value to indicate if this is a remove action
  811. * @opc: switch rules population command type - pass in the command opcode
  812. * @lkup_type: lookup type of the filter
  813. *
  814. * Call AQ command to add a new switch rule or update existing switch rule
  815. * using the given VSI list id
  816. */
  817. static enum ice_status
  818. ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  819. u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
  820. enum ice_sw_lkup_type lkup_type)
  821. {
  822. struct ice_aqc_sw_rules_elem *s_rule;
  823. enum ice_status status;
  824. u16 s_rule_size;
  825. u16 type;
  826. int i;
  827. if (!num_vsi)
  828. return ICE_ERR_PARAM;
  829. if (lkup_type == ICE_SW_LKUP_MAC ||
  830. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  831. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  832. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  833. lkup_type == ICE_SW_LKUP_PROMISC ||
  834. lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
  835. type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
  836. ICE_AQC_SW_RULES_T_VSI_LIST_SET;
  837. else if (lkup_type == ICE_SW_LKUP_VLAN)
  838. type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
  839. ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
  840. else
  841. return ICE_ERR_PARAM;
  842. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
  843. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  844. if (!s_rule)
  845. return ICE_ERR_NO_MEMORY;
  846. for (i = 0; i < num_vsi; i++) {
  847. if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
  848. status = ICE_ERR_PARAM;
  849. goto exit;
  850. }
  851. /* AQ call requires hw_vsi_id(s) */
  852. s_rule->pdata.vsi_list.vsi[i] =
  853. cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
  854. }
  855. s_rule->type = cpu_to_le16(type);
  856. s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
  857. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  858. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
  859. exit:
  860. devm_kfree(ice_hw_to_dev(hw), s_rule);
  861. return status;
  862. }
  863. /**
  864. * ice_create_vsi_list_rule - Creates and populates a VSI list rule
  865. * @hw: pointer to the hw struct
  866. * @vsi_handle_arr: array of VSI handles to form a VSI list
  867. * @num_vsi: number of VSI handles in the array
  868. * @vsi_list_id: stores the ID of the VSI list to be created
  869. * @lkup_type: switch rule filter's lookup type
  870. */
  871. static enum ice_status
  872. ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  873. u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
  874. {
  875. enum ice_status status;
  876. status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
  877. ice_aqc_opc_alloc_res);
  878. if (status)
  879. return status;
  880. /* Update the newly created VSI list to include the specified VSIs */
  881. return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
  882. *vsi_list_id, false,
  883. ice_aqc_opc_add_sw_rules, lkup_type);
  884. }
  885. /**
  886. * ice_create_pkt_fwd_rule
  887. * @hw: pointer to the hardware structure
  888. * @f_entry: entry containing packet forwarding information
  889. *
  890. * Create switch rule with given filter information and add an entry
  891. * to the corresponding filter management list to track this switch rule
  892. * and VSI mapping
  893. */
  894. static enum ice_status
  895. ice_create_pkt_fwd_rule(struct ice_hw *hw,
  896. struct ice_fltr_list_entry *f_entry)
  897. {
  898. struct ice_fltr_mgmt_list_entry *fm_entry;
  899. struct ice_aqc_sw_rules_elem *s_rule;
  900. enum ice_sw_lkup_type l_type;
  901. struct ice_sw_recipe *recp;
  902. enum ice_status status;
  903. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  904. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  905. if (!s_rule)
  906. return ICE_ERR_NO_MEMORY;
  907. fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
  908. GFP_KERNEL);
  909. if (!fm_entry) {
  910. status = ICE_ERR_NO_MEMORY;
  911. goto ice_create_pkt_fwd_rule_exit;
  912. }
  913. fm_entry->fltr_info = f_entry->fltr_info;
  914. /* Initialize all the fields for the management entry */
  915. fm_entry->vsi_count = 1;
  916. fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
  917. fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
  918. fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
  919. ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
  920. ice_aqc_opc_add_sw_rules);
  921. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  922. ice_aqc_opc_add_sw_rules, NULL);
  923. if (status) {
  924. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  925. goto ice_create_pkt_fwd_rule_exit;
  926. }
  927. f_entry->fltr_info.fltr_rule_id =
  928. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  929. fm_entry->fltr_info.fltr_rule_id =
  930. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  931. /* The book keeping entries will get removed when base driver
  932. * calls remove filter AQ command
  933. */
  934. l_type = fm_entry->fltr_info.lkup_type;
  935. recp = &hw->switch_info->recp_list[l_type];
  936. list_add(&fm_entry->list_entry, &recp->filt_rules);
  937. ice_create_pkt_fwd_rule_exit:
  938. devm_kfree(ice_hw_to_dev(hw), s_rule);
  939. return status;
  940. }
  941. /**
  942. * ice_update_pkt_fwd_rule
  943. * @hw: pointer to the hardware structure
  944. * @f_info: filter information for switch rule
  945. *
  946. * Call AQ command to update a previously created switch rule with a
  947. * VSI list id
  948. */
  949. static enum ice_status
  950. ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
  951. {
  952. struct ice_aqc_sw_rules_elem *s_rule;
  953. enum ice_status status;
  954. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  955. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  956. if (!s_rule)
  957. return ICE_ERR_NO_MEMORY;
  958. ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
  959. s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
  960. /* Update switch rule with new rule set to forward VSI list */
  961. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  962. ice_aqc_opc_update_sw_rules, NULL);
  963. devm_kfree(ice_hw_to_dev(hw), s_rule);
  964. return status;
  965. }
  966. /**
  967. * ice_update_sw_rule_bridge_mode
  968. * @hw: pointer to the hw struct
  969. *
  970. * Updates unicast switch filter rules based on VEB/VEPA mode
  971. */
  972. enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
  973. {
  974. struct ice_switch_info *sw = hw->switch_info;
  975. struct ice_fltr_mgmt_list_entry *fm_entry;
  976. enum ice_status status = 0;
  977. struct list_head *rule_head;
  978. struct mutex *rule_lock; /* Lock to protect filter rule list */
  979. rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
  980. rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
  981. mutex_lock(rule_lock);
  982. list_for_each_entry(fm_entry, rule_head, list_entry) {
  983. struct ice_fltr_info *fi = &fm_entry->fltr_info;
  984. u8 *addr = fi->l_data.mac.mac_addr;
  985. /* Update unicast Tx rules to reflect the selected
  986. * VEB/VEPA mode
  987. */
  988. if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
  989. (fi->fltr_act == ICE_FWD_TO_VSI ||
  990. fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
  991. fi->fltr_act == ICE_FWD_TO_Q ||
  992. fi->fltr_act == ICE_FWD_TO_QGRP)) {
  993. status = ice_update_pkt_fwd_rule(hw, fi);
  994. if (status)
  995. break;
  996. }
  997. }
  998. mutex_unlock(rule_lock);
  999. return status;
  1000. }
  1001. /**
  1002. * ice_add_update_vsi_list
  1003. * @hw: pointer to the hardware structure
  1004. * @m_entry: pointer to current filter management list entry
  1005. * @cur_fltr: filter information from the book keeping entry
  1006. * @new_fltr: filter information with the new VSI to be added
  1007. *
  1008. * Call AQ command to add or update previously created VSI list with new VSI.
  1009. *
  1010. * Helper function to do book keeping associated with adding filter information
  1011. * The algorithm to do the booking keeping is described below :
  1012. * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
  1013. * if only one VSI has been added till now
  1014. * Allocate a new VSI list and add two VSIs
  1015. * to this list using switch rule command
  1016. * Update the previously created switch rule with the
  1017. * newly created VSI list id
  1018. * if a VSI list was previously created
  1019. * Add the new VSI to the previously created VSI list set
  1020. * using the update switch rule command
  1021. */
  1022. static enum ice_status
  1023. ice_add_update_vsi_list(struct ice_hw *hw,
  1024. struct ice_fltr_mgmt_list_entry *m_entry,
  1025. struct ice_fltr_info *cur_fltr,
  1026. struct ice_fltr_info *new_fltr)
  1027. {
  1028. enum ice_status status = 0;
  1029. u16 vsi_list_id = 0;
  1030. if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
  1031. cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
  1032. return ICE_ERR_NOT_IMPL;
  1033. if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
  1034. new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
  1035. (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
  1036. cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
  1037. return ICE_ERR_NOT_IMPL;
  1038. if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
  1039. /* Only one entry existed in the mapping and it was not already
  1040. * a part of a VSI list. So, create a VSI list with the old and
  1041. * new VSIs.
  1042. */
  1043. struct ice_fltr_info tmp_fltr;
  1044. u16 vsi_handle_arr[2];
  1045. /* A rule already exists with the new VSI being added */
  1046. if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
  1047. return ICE_ERR_ALREADY_EXISTS;
  1048. vsi_handle_arr[0] = cur_fltr->vsi_handle;
  1049. vsi_handle_arr[1] = new_fltr->vsi_handle;
  1050. status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
  1051. &vsi_list_id,
  1052. new_fltr->lkup_type);
  1053. if (status)
  1054. return status;
  1055. tmp_fltr = *new_fltr;
  1056. tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
  1057. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  1058. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  1059. /* Update the previous switch rule of "MAC forward to VSI" to
  1060. * "MAC fwd to VSI list"
  1061. */
  1062. status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
  1063. if (status)
  1064. return status;
  1065. cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1066. cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1067. m_entry->vsi_list_info =
  1068. ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
  1069. vsi_list_id);
  1070. /* If this entry was large action then the large action needs
  1071. * to be updated to point to FWD to VSI list
  1072. */
  1073. if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
  1074. status =
  1075. ice_add_marker_act(hw, m_entry,
  1076. m_entry->sw_marker_id,
  1077. m_entry->lg_act_idx);
  1078. } else {
  1079. u16 vsi_handle = new_fltr->vsi_handle;
  1080. enum ice_adminq_opc opcode;
  1081. /* A rule already exists with the new VSI being added */
  1082. if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
  1083. return 0;
  1084. /* Update the previously created VSI list set with
  1085. * the new VSI id passed in
  1086. */
  1087. vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
  1088. opcode = ice_aqc_opc_update_sw_rules;
  1089. status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
  1090. vsi_list_id, false, opcode,
  1091. new_fltr->lkup_type);
  1092. /* update VSI list mapping info with new VSI id */
  1093. if (!status)
  1094. set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
  1095. }
  1096. if (!status)
  1097. m_entry->vsi_count++;
  1098. return status;
  1099. }
  1100. /**
  1101. * ice_find_rule_entry - Search a rule entry
  1102. * @hw: pointer to the hardware structure
  1103. * @recp_id: lookup type for which the specified rule needs to be searched
  1104. * @f_info: rule information
  1105. *
  1106. * Helper function to search for a given rule entry
  1107. * Returns pointer to entry storing the rule if found
  1108. */
  1109. static struct ice_fltr_mgmt_list_entry *
  1110. ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
  1111. {
  1112. struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
  1113. struct ice_switch_info *sw = hw->switch_info;
  1114. struct list_head *list_head;
  1115. list_head = &sw->recp_list[recp_id].filt_rules;
  1116. list_for_each_entry(list_itr, list_head, list_entry) {
  1117. if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
  1118. sizeof(f_info->l_data)) &&
  1119. f_info->flag == list_itr->fltr_info.flag) {
  1120. ret = list_itr;
  1121. break;
  1122. }
  1123. }
  1124. return ret;
  1125. }
  1126. /**
  1127. * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
  1128. * @hw: pointer to the hardware structure
  1129. * @recp_id: lookup type for which VSI lists needs to be searched
  1130. * @vsi_handle: VSI handle to be found in VSI list
  1131. * @vsi_list_id: VSI list id found containing vsi_handle
  1132. *
  1133. * Helper function to search a VSI list with single entry containing given VSI
  1134. * handle element. This can be extended further to search VSI list with more
  1135. * than 1 vsi_count. Returns pointer to VSI list entry if found.
  1136. */
  1137. static struct ice_vsi_list_map_info *
  1138. ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
  1139. u16 *vsi_list_id)
  1140. {
  1141. struct ice_vsi_list_map_info *map_info = NULL;
  1142. struct ice_switch_info *sw = hw->switch_info;
  1143. struct ice_fltr_mgmt_list_entry *list_itr;
  1144. struct list_head *list_head;
  1145. list_head = &sw->recp_list[recp_id].filt_rules;
  1146. list_for_each_entry(list_itr, list_head, list_entry) {
  1147. if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
  1148. map_info = list_itr->vsi_list_info;
  1149. if (test_bit(vsi_handle, map_info->vsi_map)) {
  1150. *vsi_list_id = map_info->vsi_list_id;
  1151. return map_info;
  1152. }
  1153. }
  1154. }
  1155. return NULL;
  1156. }
  1157. /**
  1158. * ice_add_rule_internal - add rule for a given lookup type
  1159. * @hw: pointer to the hardware structure
  1160. * @recp_id: lookup type (recipe id) for which rule has to be added
  1161. * @f_entry: structure containing MAC forwarding information
  1162. *
  1163. * Adds or updates the rule lists for a given recipe
  1164. */
  1165. static enum ice_status
  1166. ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
  1167. struct ice_fltr_list_entry *f_entry)
  1168. {
  1169. struct ice_switch_info *sw = hw->switch_info;
  1170. struct ice_fltr_info *new_fltr, *cur_fltr;
  1171. struct ice_fltr_mgmt_list_entry *m_entry;
  1172. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1173. enum ice_status status = 0;
  1174. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1175. return ICE_ERR_PARAM;
  1176. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1177. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1178. rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
  1179. mutex_lock(rule_lock);
  1180. new_fltr = &f_entry->fltr_info;
  1181. if (new_fltr->flag & ICE_FLTR_RX)
  1182. new_fltr->src = hw->port_info->lport;
  1183. else if (new_fltr->flag & ICE_FLTR_TX)
  1184. new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
  1185. m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
  1186. if (!m_entry) {
  1187. mutex_unlock(rule_lock);
  1188. return ice_create_pkt_fwd_rule(hw, f_entry);
  1189. }
  1190. cur_fltr = &m_entry->fltr_info;
  1191. status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
  1192. mutex_unlock(rule_lock);
  1193. return status;
  1194. }
  1195. /**
  1196. * ice_remove_vsi_list_rule
  1197. * @hw: pointer to the hardware structure
  1198. * @vsi_list_id: VSI list id generated as part of allocate resource
  1199. * @lkup_type: switch rule filter lookup type
  1200. *
  1201. * The VSI list should be emptied before this function is called to remove the
  1202. * VSI list.
  1203. */
  1204. static enum ice_status
  1205. ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
  1206. enum ice_sw_lkup_type lkup_type)
  1207. {
  1208. struct ice_aqc_sw_rules_elem *s_rule;
  1209. enum ice_status status;
  1210. u16 s_rule_size;
  1211. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
  1212. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1213. if (!s_rule)
  1214. return ICE_ERR_NO_MEMORY;
  1215. s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
  1216. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  1217. /* Free the vsi_list resource that we allocated. It is assumed that the
  1218. * list is empty at this point.
  1219. */
  1220. status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
  1221. ice_aqc_opc_free_res);
  1222. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1223. return status;
  1224. }
  1225. /**
  1226. * ice_rem_update_vsi_list
  1227. * @hw: pointer to the hardware structure
  1228. * @vsi_handle: VSI handle of the VSI to remove
  1229. * @fm_list: filter management entry for which the VSI list management needs to
  1230. * be done
  1231. */
  1232. static enum ice_status
  1233. ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
  1234. struct ice_fltr_mgmt_list_entry *fm_list)
  1235. {
  1236. enum ice_sw_lkup_type lkup_type;
  1237. enum ice_status status = 0;
  1238. u16 vsi_list_id;
  1239. if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
  1240. fm_list->vsi_count == 0)
  1241. return ICE_ERR_PARAM;
  1242. /* A rule with the VSI being removed does not exist */
  1243. if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
  1244. return ICE_ERR_DOES_NOT_EXIST;
  1245. lkup_type = fm_list->fltr_info.lkup_type;
  1246. vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
  1247. status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
  1248. ice_aqc_opc_update_sw_rules,
  1249. lkup_type);
  1250. if (status)
  1251. return status;
  1252. fm_list->vsi_count--;
  1253. clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
  1254. if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
  1255. struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
  1256. struct ice_vsi_list_map_info *vsi_list_info =
  1257. fm_list->vsi_list_info;
  1258. u16 rem_vsi_handle;
  1259. rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
  1260. ICE_MAX_VSI);
  1261. if (!ice_is_vsi_valid(hw, rem_vsi_handle))
  1262. return ICE_ERR_OUT_OF_RANGE;
  1263. /* Make sure VSI list is empty before removing it below */
  1264. status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
  1265. vsi_list_id, true,
  1266. ice_aqc_opc_update_sw_rules,
  1267. lkup_type);
  1268. if (status)
  1269. return status;
  1270. tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1271. tmp_fltr_info.fwd_id.hw_vsi_id =
  1272. ice_get_hw_vsi_num(hw, rem_vsi_handle);
  1273. tmp_fltr_info.vsi_handle = rem_vsi_handle;
  1274. status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
  1275. if (status) {
  1276. ice_debug(hw, ICE_DBG_SW,
  1277. "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
  1278. tmp_fltr_info.fwd_id.hw_vsi_id, status);
  1279. return status;
  1280. }
  1281. fm_list->fltr_info = tmp_fltr_info;
  1282. }
  1283. if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
  1284. (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
  1285. struct ice_vsi_list_map_info *vsi_list_info =
  1286. fm_list->vsi_list_info;
  1287. /* Remove the VSI list since it is no longer used */
  1288. status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
  1289. if (status) {
  1290. ice_debug(hw, ICE_DBG_SW,
  1291. "Failed to remove VSI list %d, error %d\n",
  1292. vsi_list_id, status);
  1293. return status;
  1294. }
  1295. list_del(&vsi_list_info->list_entry);
  1296. devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
  1297. fm_list->vsi_list_info = NULL;
  1298. }
  1299. return status;
  1300. }
  1301. /**
  1302. * ice_remove_rule_internal - Remove a filter rule of a given type
  1303. * @hw: pointer to the hardware structure
  1304. * @recp_id: recipe id for which the rule needs to removed
  1305. * @f_entry: rule entry containing filter information
  1306. */
  1307. static enum ice_status
  1308. ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
  1309. struct ice_fltr_list_entry *f_entry)
  1310. {
  1311. struct ice_switch_info *sw = hw->switch_info;
  1312. struct ice_fltr_mgmt_list_entry *list_elem;
  1313. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1314. enum ice_status status = 0;
  1315. bool remove_rule = false;
  1316. u16 vsi_handle;
  1317. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1318. return ICE_ERR_PARAM;
  1319. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1320. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1321. rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
  1322. mutex_lock(rule_lock);
  1323. list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
  1324. if (!list_elem) {
  1325. status = ICE_ERR_DOES_NOT_EXIST;
  1326. goto exit;
  1327. }
  1328. if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
  1329. remove_rule = true;
  1330. } else if (!list_elem->vsi_list_info) {
  1331. status = ICE_ERR_DOES_NOT_EXIST;
  1332. goto exit;
  1333. } else {
  1334. if (list_elem->vsi_list_info->ref_cnt > 1)
  1335. list_elem->vsi_list_info->ref_cnt--;
  1336. vsi_handle = f_entry->fltr_info.vsi_handle;
  1337. status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
  1338. if (status)
  1339. goto exit;
  1340. /* if vsi count goes to zero after updating the vsi list */
  1341. if (list_elem->vsi_count == 0)
  1342. remove_rule = true;
  1343. }
  1344. if (remove_rule) {
  1345. /* Remove the lookup rule */
  1346. struct ice_aqc_sw_rules_elem *s_rule;
  1347. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  1348. ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
  1349. GFP_KERNEL);
  1350. if (!s_rule) {
  1351. status = ICE_ERR_NO_MEMORY;
  1352. goto exit;
  1353. }
  1354. ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
  1355. ice_aqc_opc_remove_sw_rules);
  1356. status = ice_aq_sw_rules(hw, s_rule,
  1357. ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
  1358. ice_aqc_opc_remove_sw_rules, NULL);
  1359. if (status)
  1360. goto exit;
  1361. /* Remove a book keeping from the list */
  1362. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1363. list_del(&list_elem->list_entry);
  1364. devm_kfree(ice_hw_to_dev(hw), list_elem);
  1365. }
  1366. exit:
  1367. mutex_unlock(rule_lock);
  1368. return status;
  1369. }
  1370. /**
  1371. * ice_add_mac - Add a MAC address based filter rule
  1372. * @hw: pointer to the hardware structure
  1373. * @m_list: list of MAC addresses and forwarding information
  1374. *
  1375. * IMPORTANT: When the ucast_shared flag is set to false and m_list has
  1376. * multiple unicast addresses, the function assumes that all the
  1377. * addresses are unique in a given add_mac call. It doesn't
  1378. * check for duplicates in this case, removing duplicates from a given
  1379. * list should be taken care of in the caller of this function.
  1380. */
  1381. enum ice_status
  1382. ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
  1383. {
  1384. struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
  1385. struct ice_fltr_list_entry *m_list_itr;
  1386. struct list_head *rule_head;
  1387. u16 elem_sent, total_elem_left;
  1388. struct ice_switch_info *sw;
  1389. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1390. enum ice_status status = 0;
  1391. u16 num_unicast = 0;
  1392. u16 s_rule_size;
  1393. if (!m_list || !hw)
  1394. return ICE_ERR_PARAM;
  1395. s_rule = NULL;
  1396. sw = hw->switch_info;
  1397. rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
  1398. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1399. u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
  1400. u16 vsi_handle;
  1401. u16 hw_vsi_id;
  1402. m_list_itr->fltr_info.flag = ICE_FLTR_TX;
  1403. vsi_handle = m_list_itr->fltr_info.vsi_handle;
  1404. if (!ice_is_vsi_valid(hw, vsi_handle))
  1405. return ICE_ERR_PARAM;
  1406. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1407. m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
  1408. /* update the src in case it is vsi num */
  1409. if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
  1410. return ICE_ERR_PARAM;
  1411. m_list_itr->fltr_info.src = hw_vsi_id;
  1412. if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
  1413. is_zero_ether_addr(add))
  1414. return ICE_ERR_PARAM;
  1415. if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
  1416. /* Don't overwrite the unicast address */
  1417. mutex_lock(rule_lock);
  1418. if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
  1419. &m_list_itr->fltr_info)) {
  1420. mutex_unlock(rule_lock);
  1421. return ICE_ERR_ALREADY_EXISTS;
  1422. }
  1423. mutex_unlock(rule_lock);
  1424. num_unicast++;
  1425. } else if (is_multicast_ether_addr(add) ||
  1426. (is_unicast_ether_addr(add) && hw->ucast_shared)) {
  1427. m_list_itr->status =
  1428. ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
  1429. m_list_itr);
  1430. if (m_list_itr->status)
  1431. return m_list_itr->status;
  1432. }
  1433. }
  1434. mutex_lock(rule_lock);
  1435. /* Exit if no suitable entries were found for adding bulk switch rule */
  1436. if (!num_unicast) {
  1437. status = 0;
  1438. goto ice_add_mac_exit;
  1439. }
  1440. rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
  1441. /* Allocate switch rule buffer for the bulk update for unicast */
  1442. s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  1443. s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
  1444. GFP_KERNEL);
  1445. if (!s_rule) {
  1446. status = ICE_ERR_NO_MEMORY;
  1447. goto ice_add_mac_exit;
  1448. }
  1449. r_iter = s_rule;
  1450. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1451. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1452. u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
  1453. if (is_unicast_ether_addr(mac_addr)) {
  1454. ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
  1455. ice_aqc_opc_add_sw_rules);
  1456. r_iter = (struct ice_aqc_sw_rules_elem *)
  1457. ((u8 *)r_iter + s_rule_size);
  1458. }
  1459. }
  1460. /* Call AQ bulk switch rule update for all unicast addresses */
  1461. r_iter = s_rule;
  1462. /* Call AQ switch rule in AQ_MAX chunk */
  1463. for (total_elem_left = num_unicast; total_elem_left > 0;
  1464. total_elem_left -= elem_sent) {
  1465. struct ice_aqc_sw_rules_elem *entry = r_iter;
  1466. elem_sent = min(total_elem_left,
  1467. (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
  1468. status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
  1469. elem_sent, ice_aqc_opc_add_sw_rules,
  1470. NULL);
  1471. if (status)
  1472. goto ice_add_mac_exit;
  1473. r_iter = (struct ice_aqc_sw_rules_elem *)
  1474. ((u8 *)r_iter + (elem_sent * s_rule_size));
  1475. }
  1476. /* Fill up rule id based on the value returned from FW */
  1477. r_iter = s_rule;
  1478. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1479. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1480. u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
  1481. struct ice_fltr_mgmt_list_entry *fm_entry;
  1482. if (is_unicast_ether_addr(mac_addr)) {
  1483. f_info->fltr_rule_id =
  1484. le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
  1485. f_info->fltr_act = ICE_FWD_TO_VSI;
  1486. /* Create an entry to track this MAC address */
  1487. fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
  1488. sizeof(*fm_entry), GFP_KERNEL);
  1489. if (!fm_entry) {
  1490. status = ICE_ERR_NO_MEMORY;
  1491. goto ice_add_mac_exit;
  1492. }
  1493. fm_entry->fltr_info = *f_info;
  1494. fm_entry->vsi_count = 1;
  1495. /* The book keeping entries will get removed when
  1496. * base driver calls remove filter AQ command
  1497. */
  1498. list_add(&fm_entry->list_entry, rule_head);
  1499. r_iter = (struct ice_aqc_sw_rules_elem *)
  1500. ((u8 *)r_iter + s_rule_size);
  1501. }
  1502. }
  1503. ice_add_mac_exit:
  1504. mutex_unlock(rule_lock);
  1505. if (s_rule)
  1506. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1507. return status;
  1508. }
  1509. /**
  1510. * ice_add_vlan_internal - Add one VLAN based filter rule
  1511. * @hw: pointer to the hardware structure
  1512. * @f_entry: filter entry containing one VLAN information
  1513. */
  1514. static enum ice_status
  1515. ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  1516. {
  1517. struct ice_switch_info *sw = hw->switch_info;
  1518. struct ice_fltr_mgmt_list_entry *v_list_itr;
  1519. struct ice_fltr_info *new_fltr, *cur_fltr;
  1520. enum ice_sw_lkup_type lkup_type;
  1521. u16 vsi_list_id = 0, vsi_handle;
  1522. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1523. enum ice_status status = 0;
  1524. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1525. return ICE_ERR_PARAM;
  1526. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1527. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1528. new_fltr = &f_entry->fltr_info;
  1529. /* VLAN id should only be 12 bits */
  1530. if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
  1531. return ICE_ERR_PARAM;
  1532. if (new_fltr->src_id != ICE_SRC_ID_VSI)
  1533. return ICE_ERR_PARAM;
  1534. new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
  1535. lkup_type = new_fltr->lkup_type;
  1536. vsi_handle = new_fltr->vsi_handle;
  1537. rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
  1538. mutex_lock(rule_lock);
  1539. v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
  1540. if (!v_list_itr) {
  1541. struct ice_vsi_list_map_info *map_info = NULL;
  1542. if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
  1543. /* All VLAN pruning rules use a VSI list. Check if
  1544. * there is already a VSI list containing VSI that we
  1545. * want to add. If found, use the same vsi_list_id for
  1546. * this new VLAN rule or else create a new list.
  1547. */
  1548. map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
  1549. vsi_handle,
  1550. &vsi_list_id);
  1551. if (!map_info) {
  1552. status = ice_create_vsi_list_rule(hw,
  1553. &vsi_handle,
  1554. 1,
  1555. &vsi_list_id,
  1556. lkup_type);
  1557. if (status)
  1558. goto exit;
  1559. }
  1560. /* Convert the action to forwarding to a VSI list. */
  1561. new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1562. new_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1563. }
  1564. status = ice_create_pkt_fwd_rule(hw, f_entry);
  1565. if (!status) {
  1566. v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
  1567. new_fltr);
  1568. if (!v_list_itr) {
  1569. status = ICE_ERR_DOES_NOT_EXIST;
  1570. goto exit;
  1571. }
  1572. /* reuse VSI list for new rule and increment ref_cnt */
  1573. if (map_info) {
  1574. v_list_itr->vsi_list_info = map_info;
  1575. map_info->ref_cnt++;
  1576. } else {
  1577. v_list_itr->vsi_list_info =
  1578. ice_create_vsi_list_map(hw, &vsi_handle,
  1579. 1, vsi_list_id);
  1580. }
  1581. }
  1582. } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
  1583. /* Update existing VSI list to add new VSI id only if it used
  1584. * by one VLAN rule.
  1585. */
  1586. cur_fltr = &v_list_itr->fltr_info;
  1587. status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
  1588. new_fltr);
  1589. } else {
  1590. /* If VLAN rule exists and VSI list being used by this rule is
  1591. * referenced by more than 1 VLAN rule. Then create a new VSI
  1592. * list appending previous VSI with new VSI and update existing
  1593. * VLAN rule to point to new VSI list id
  1594. */
  1595. struct ice_fltr_info tmp_fltr;
  1596. u16 vsi_handle_arr[2];
  1597. u16 cur_handle;
  1598. /* Current implementation only supports reusing VSI list with
  1599. * one VSI count. We should never hit below condition
  1600. */
  1601. if (v_list_itr->vsi_count > 1 &&
  1602. v_list_itr->vsi_list_info->ref_cnt > 1) {
  1603. ice_debug(hw, ICE_DBG_SW,
  1604. "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
  1605. status = ICE_ERR_CFG;
  1606. goto exit;
  1607. }
  1608. cur_handle =
  1609. find_first_bit(v_list_itr->vsi_list_info->vsi_map,
  1610. ICE_MAX_VSI);
  1611. /* A rule already exists with the new VSI being added */
  1612. if (cur_handle == vsi_handle) {
  1613. status = ICE_ERR_ALREADY_EXISTS;
  1614. goto exit;
  1615. }
  1616. vsi_handle_arr[0] = cur_handle;
  1617. vsi_handle_arr[1] = vsi_handle;
  1618. status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
  1619. &vsi_list_id, lkup_type);
  1620. if (status)
  1621. goto exit;
  1622. tmp_fltr = v_list_itr->fltr_info;
  1623. tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
  1624. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  1625. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  1626. /* Update the previous switch rule to a new VSI list which
  1627. * includes current VSI thats requested
  1628. */
  1629. status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
  1630. if (status)
  1631. goto exit;
  1632. /* before overriding VSI list map info. decrement ref_cnt of
  1633. * previous VSI list
  1634. */
  1635. v_list_itr->vsi_list_info->ref_cnt--;
  1636. /* now update to newly created list */
  1637. v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
  1638. v_list_itr->vsi_list_info =
  1639. ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
  1640. vsi_list_id);
  1641. v_list_itr->vsi_count++;
  1642. }
  1643. exit:
  1644. mutex_unlock(rule_lock);
  1645. return status;
  1646. }
  1647. /**
  1648. * ice_add_vlan - Add VLAN based filter rule
  1649. * @hw: pointer to the hardware structure
  1650. * @v_list: list of VLAN entries and forwarding information
  1651. */
  1652. enum ice_status
  1653. ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
  1654. {
  1655. struct ice_fltr_list_entry *v_list_itr;
  1656. if (!v_list || !hw)
  1657. return ICE_ERR_PARAM;
  1658. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1659. if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
  1660. return ICE_ERR_PARAM;
  1661. v_list_itr->fltr_info.flag = ICE_FLTR_TX;
  1662. v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
  1663. if (v_list_itr->status)
  1664. return v_list_itr->status;
  1665. }
  1666. return 0;
  1667. }
  1668. /**
  1669. * ice_rem_sw_rule_info
  1670. * @hw: pointer to the hardware structure
  1671. * @rule_head: pointer to the switch list structure that we want to delete
  1672. */
  1673. static void
  1674. ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
  1675. {
  1676. if (!list_empty(rule_head)) {
  1677. struct ice_fltr_mgmt_list_entry *entry;
  1678. struct ice_fltr_mgmt_list_entry *tmp;
  1679. list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
  1680. list_del(&entry->list_entry);
  1681. devm_kfree(ice_hw_to_dev(hw), entry);
  1682. }
  1683. }
  1684. }
  1685. /**
  1686. * ice_cfg_dflt_vsi - change state of VSI to set/clear default
  1687. * @hw: pointer to the hardware structure
  1688. * @vsi_handle: VSI handle to set as default
  1689. * @set: true to add the above mentioned switch rule, false to remove it
  1690. * @direction: ICE_FLTR_RX or ICE_FLTR_TX
  1691. *
  1692. * add filter rule to set/unset given VSI as default VSI for the switch
  1693. * (represented by swid)
  1694. */
  1695. enum ice_status
  1696. ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
  1697. {
  1698. struct ice_aqc_sw_rules_elem *s_rule;
  1699. struct ice_fltr_info f_info;
  1700. enum ice_adminq_opc opcode;
  1701. enum ice_status status;
  1702. u16 s_rule_size;
  1703. u16 hw_vsi_id;
  1704. if (!ice_is_vsi_valid(hw, vsi_handle))
  1705. return ICE_ERR_PARAM;
  1706. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1707. s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
  1708. ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
  1709. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1710. if (!s_rule)
  1711. return ICE_ERR_NO_MEMORY;
  1712. memset(&f_info, 0, sizeof(f_info));
  1713. f_info.lkup_type = ICE_SW_LKUP_DFLT;
  1714. f_info.flag = direction;
  1715. f_info.fltr_act = ICE_FWD_TO_VSI;
  1716. f_info.fwd_id.hw_vsi_id = hw_vsi_id;
  1717. if (f_info.flag & ICE_FLTR_RX) {
  1718. f_info.src = hw->port_info->lport;
  1719. f_info.src_id = ICE_SRC_ID_LPORT;
  1720. if (!set)
  1721. f_info.fltr_rule_id =
  1722. hw->port_info->dflt_rx_vsi_rule_id;
  1723. } else if (f_info.flag & ICE_FLTR_TX) {
  1724. f_info.src_id = ICE_SRC_ID_VSI;
  1725. f_info.src = hw_vsi_id;
  1726. if (!set)
  1727. f_info.fltr_rule_id =
  1728. hw->port_info->dflt_tx_vsi_rule_id;
  1729. }
  1730. if (set)
  1731. opcode = ice_aqc_opc_add_sw_rules;
  1732. else
  1733. opcode = ice_aqc_opc_remove_sw_rules;
  1734. ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
  1735. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
  1736. if (status || !(f_info.flag & ICE_FLTR_TX_RX))
  1737. goto out;
  1738. if (set) {
  1739. u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  1740. if (f_info.flag & ICE_FLTR_TX) {
  1741. hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
  1742. hw->port_info->dflt_tx_vsi_rule_id = index;
  1743. } else if (f_info.flag & ICE_FLTR_RX) {
  1744. hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
  1745. hw->port_info->dflt_rx_vsi_rule_id = index;
  1746. }
  1747. } else {
  1748. if (f_info.flag & ICE_FLTR_TX) {
  1749. hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  1750. hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
  1751. } else if (f_info.flag & ICE_FLTR_RX) {
  1752. hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  1753. hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
  1754. }
  1755. }
  1756. out:
  1757. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1758. return status;
  1759. }
  1760. /**
  1761. * ice_remove_mac - remove a MAC address based filter rule
  1762. * @hw: pointer to the hardware structure
  1763. * @m_list: list of MAC addresses and forwarding information
  1764. *
  1765. * This function removes either a MAC filter rule or a specific VSI from a
  1766. * VSI list for a multicast MAC address.
  1767. *
  1768. * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
  1769. * ice_add_mac. Caller should be aware that this call will only work if all
  1770. * the entries passed into m_list were added previously. It will not attempt to
  1771. * do a partial remove of entries that were found.
  1772. */
  1773. enum ice_status
  1774. ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
  1775. {
  1776. struct ice_fltr_list_entry *list_itr, *tmp;
  1777. if (!m_list)
  1778. return ICE_ERR_PARAM;
  1779. list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
  1780. enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
  1781. if (l_type != ICE_SW_LKUP_MAC)
  1782. return ICE_ERR_PARAM;
  1783. list_itr->status = ice_remove_rule_internal(hw,
  1784. ICE_SW_LKUP_MAC,
  1785. list_itr);
  1786. if (list_itr->status)
  1787. return list_itr->status;
  1788. }
  1789. return 0;
  1790. }
  1791. /**
  1792. * ice_remove_vlan - Remove VLAN based filter rule
  1793. * @hw: pointer to the hardware structure
  1794. * @v_list: list of VLAN entries and forwarding information
  1795. */
  1796. enum ice_status
  1797. ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
  1798. {
  1799. struct ice_fltr_list_entry *v_list_itr, *tmp;
  1800. if (!v_list || !hw)
  1801. return ICE_ERR_PARAM;
  1802. list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
  1803. enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
  1804. if (l_type != ICE_SW_LKUP_VLAN)
  1805. return ICE_ERR_PARAM;
  1806. v_list_itr->status = ice_remove_rule_internal(hw,
  1807. ICE_SW_LKUP_VLAN,
  1808. v_list_itr);
  1809. if (v_list_itr->status)
  1810. return v_list_itr->status;
  1811. }
  1812. return 0;
  1813. }
  1814. /**
  1815. * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
  1816. * @fm_entry: filter entry to inspect
  1817. * @vsi_handle: VSI handle to compare with filter info
  1818. */
  1819. static bool
  1820. ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
  1821. {
  1822. return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
  1823. fm_entry->fltr_info.vsi_handle == vsi_handle) ||
  1824. (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
  1825. (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
  1826. }
  1827. /**
  1828. * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
  1829. * @hw: pointer to the hardware structure
  1830. * @vsi_handle: VSI handle to remove filters from
  1831. * @vsi_list_head: pointer to the list to add entry to
  1832. * @fi: pointer to fltr_info of filter entry to copy & add
  1833. *
  1834. * Helper function, used when creating a list of filters to remove from
  1835. * a specific VSI. The entry added to vsi_list_head is a COPY of the
  1836. * original filter entry, with the exception of fltr_info.fltr_act and
  1837. * fltr_info.fwd_id fields. These are set such that later logic can
  1838. * extract which VSI to remove the fltr from, and pass on that information.
  1839. */
  1840. static enum ice_status
  1841. ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
  1842. struct list_head *vsi_list_head,
  1843. struct ice_fltr_info *fi)
  1844. {
  1845. struct ice_fltr_list_entry *tmp;
  1846. /* this memory is freed up in the caller function
  1847. * once filters for this VSI are removed
  1848. */
  1849. tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
  1850. if (!tmp)
  1851. return ICE_ERR_NO_MEMORY;
  1852. tmp->fltr_info = *fi;
  1853. /* Overwrite these fields to indicate which VSI to remove filter from,
  1854. * so find and remove logic can extract the information from the
  1855. * list entries. Note that original entries will still have proper
  1856. * values.
  1857. */
  1858. tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1859. tmp->fltr_info.vsi_handle = vsi_handle;
  1860. tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1861. list_add(&tmp->list_entry, vsi_list_head);
  1862. return 0;
  1863. }
  1864. /**
  1865. * ice_add_to_vsi_fltr_list - Add VSI filters to the list
  1866. * @hw: pointer to the hardware structure
  1867. * @vsi_handle: VSI handle to remove filters from
  1868. * @lkup_list_head: pointer to the list that has certain lookup type filters
  1869. * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
  1870. *
  1871. * Locates all filters in lkup_list_head that are used by the given VSI,
  1872. * and adds COPIES of those entries to vsi_list_head (intended to be used
  1873. * to remove the listed filters).
  1874. * Note that this means all entries in vsi_list_head must be explicitly
  1875. * deallocated by the caller when done with list.
  1876. */
  1877. static enum ice_status
  1878. ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
  1879. struct list_head *lkup_list_head,
  1880. struct list_head *vsi_list_head)
  1881. {
  1882. struct ice_fltr_mgmt_list_entry *fm_entry;
  1883. enum ice_status status = 0;
  1884. /* check to make sure VSI id is valid and within boundary */
  1885. if (!ice_is_vsi_valid(hw, vsi_handle))
  1886. return ICE_ERR_PARAM;
  1887. list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
  1888. struct ice_fltr_info *fi;
  1889. fi = &fm_entry->fltr_info;
  1890. if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
  1891. continue;
  1892. status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
  1893. vsi_list_head, fi);
  1894. if (status)
  1895. return status;
  1896. }
  1897. return status;
  1898. }
  1899. /**
  1900. * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
  1901. * @hw: pointer to the hardware structure
  1902. * @vsi_handle: VSI handle to remove filters from
  1903. * @lkup: switch rule filter lookup type
  1904. */
  1905. static void
  1906. ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
  1907. enum ice_sw_lkup_type lkup)
  1908. {
  1909. struct ice_switch_info *sw = hw->switch_info;
  1910. struct ice_fltr_list_entry *fm_entry;
  1911. struct list_head remove_list_head;
  1912. struct list_head *rule_head;
  1913. struct ice_fltr_list_entry *tmp;
  1914. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1915. enum ice_status status;
  1916. INIT_LIST_HEAD(&remove_list_head);
  1917. rule_lock = &sw->recp_list[lkup].filt_rule_lock;
  1918. rule_head = &sw->recp_list[lkup].filt_rules;
  1919. mutex_lock(rule_lock);
  1920. status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
  1921. &remove_list_head);
  1922. mutex_unlock(rule_lock);
  1923. if (status)
  1924. return;
  1925. switch (lkup) {
  1926. case ICE_SW_LKUP_MAC:
  1927. ice_remove_mac(hw, &remove_list_head);
  1928. break;
  1929. case ICE_SW_LKUP_VLAN:
  1930. ice_remove_vlan(hw, &remove_list_head);
  1931. break;
  1932. case ICE_SW_LKUP_MAC_VLAN:
  1933. case ICE_SW_LKUP_ETHERTYPE:
  1934. case ICE_SW_LKUP_ETHERTYPE_MAC:
  1935. case ICE_SW_LKUP_PROMISC:
  1936. case ICE_SW_LKUP_DFLT:
  1937. case ICE_SW_LKUP_PROMISC_VLAN:
  1938. case ICE_SW_LKUP_LAST:
  1939. default:
  1940. ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
  1941. break;
  1942. }
  1943. list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
  1944. list_del(&fm_entry->list_entry);
  1945. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  1946. }
  1947. }
  1948. /**
  1949. * ice_remove_vsi_fltr - Remove all filters for a VSI
  1950. * @hw: pointer to the hardware structure
  1951. * @vsi_handle: VSI handle to remove filters from
  1952. */
  1953. void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
  1954. {
  1955. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
  1956. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
  1957. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
  1958. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
  1959. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
  1960. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
  1961. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
  1962. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
  1963. }
  1964. /**
  1965. * ice_replay_vsi_fltr - Replay filters for requested VSI
  1966. * @hw: pointer to the hardware structure
  1967. * @vsi_handle: driver VSI handle
  1968. * @recp_id: Recipe id for which rules need to be replayed
  1969. * @list_head: list for which filters need to be replayed
  1970. *
  1971. * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
  1972. * It is required to pass valid VSI handle.
  1973. */
  1974. static enum ice_status
  1975. ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
  1976. struct list_head *list_head)
  1977. {
  1978. struct ice_fltr_mgmt_list_entry *itr;
  1979. enum ice_status status = 0;
  1980. u16 hw_vsi_id;
  1981. if (list_empty(list_head))
  1982. return status;
  1983. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1984. list_for_each_entry(itr, list_head, list_entry) {
  1985. struct ice_fltr_list_entry f_entry;
  1986. f_entry.fltr_info = itr->fltr_info;
  1987. if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
  1988. itr->fltr_info.vsi_handle == vsi_handle) {
  1989. /* update the src in case it is vsi num */
  1990. if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
  1991. f_entry.fltr_info.src = hw_vsi_id;
  1992. status = ice_add_rule_internal(hw, recp_id, &f_entry);
  1993. if (status)
  1994. goto end;
  1995. continue;
  1996. }
  1997. if (!itr->vsi_list_info ||
  1998. !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
  1999. continue;
  2000. /* Clearing it so that the logic can add it back */
  2001. clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
  2002. f_entry.fltr_info.vsi_handle = vsi_handle;
  2003. f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
  2004. /* update the src in case it is vsi num */
  2005. if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
  2006. f_entry.fltr_info.src = hw_vsi_id;
  2007. if (recp_id == ICE_SW_LKUP_VLAN)
  2008. status = ice_add_vlan_internal(hw, &f_entry);
  2009. else
  2010. status = ice_add_rule_internal(hw, recp_id, &f_entry);
  2011. if (status)
  2012. goto end;
  2013. }
  2014. end:
  2015. return status;
  2016. }
  2017. /**
  2018. * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
  2019. * @hw: pointer to the hardware structure
  2020. * @vsi_handle: driver VSI handle
  2021. *
  2022. * Replays filters for requested VSI via vsi_handle.
  2023. */
  2024. enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
  2025. {
  2026. struct ice_switch_info *sw = hw->switch_info;
  2027. enum ice_status status = 0;
  2028. u8 i;
  2029. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  2030. struct list_head *head;
  2031. head = &sw->recp_list[i].filt_replay_rules;
  2032. status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
  2033. if (status)
  2034. return status;
  2035. }
  2036. return status;
  2037. }
  2038. /**
  2039. * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
  2040. * @hw: pointer to the hw struct
  2041. *
  2042. * Deletes the filter replay rules.
  2043. */
  2044. void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
  2045. {
  2046. struct ice_switch_info *sw = hw->switch_info;
  2047. u8 i;
  2048. if (!sw)
  2049. return;
  2050. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  2051. if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
  2052. struct list_head *l_head;
  2053. l_head = &sw->recp_list[i].filt_replay_rules;
  2054. ice_rem_sw_rule_info(hw, l_head);
  2055. }
  2056. }
  2057. }