ice_switch.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_switch.h"
  4. #define ICE_ETH_DA_OFFSET 0
  5. #define ICE_ETH_ETHTYPE_OFFSET 12
  6. #define ICE_ETH_VLAN_TCI_OFFSET 14
  7. #define ICE_MAX_VLAN_ID 0xFFF
  8. /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
  9. * struct to configure any switch filter rules.
  10. * {DA (6 bytes), SA(6 bytes),
  11. * Ether type (2 bytes for header without VLAN tag) OR
  12. * VLAN tag (4 bytes for header with VLAN tag) }
  13. *
  14. * Word on Hardcoded values
  15. * byte 0 = 0x2: to identify it as locally administered DA MAC
  16. * byte 6 = 0x2: to identify it as locally administered SA MAC
  17. * byte 12 = 0x81 & byte 13 = 0x00:
  18. * In case of VLAN filter first two bytes defines ether type (0x8100)
  19. * and remaining two bytes are placeholder for programming a given VLAN id
  20. * In case of Ether type filter it is treated as header without VLAN tag
  21. * and byte 12 and 13 is used to program a given Ether type instead
  22. */
  23. #define DUMMY_ETH_HDR_LEN 16
  24. static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
  25. 0x2, 0, 0, 0, 0, 0,
  26. 0x81, 0, 0, 0};
  27. #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
  28. (sizeof(struct ice_aqc_sw_rules_elem) - \
  29. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  30. sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
  31. #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
  32. (sizeof(struct ice_aqc_sw_rules_elem) - \
  33. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  34. sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
  35. #define ICE_SW_RULE_LG_ACT_SIZE(n) \
  36. (sizeof(struct ice_aqc_sw_rules_elem) - \
  37. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  38. sizeof(struct ice_sw_rule_lg_act) - \
  39. sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
  40. ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
  41. #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
  42. (sizeof(struct ice_aqc_sw_rules_elem) - \
  43. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  44. sizeof(struct ice_sw_rule_vsi_list) - \
  45. sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
  46. ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
  47. /**
  48. * ice_aq_alloc_free_res - command to allocate/free resources
  49. * @hw: pointer to the hw struct
  50. * @num_entries: number of resource entries in buffer
  51. * @buf: Indirect buffer to hold data parameters and response
  52. * @buf_size: size of buffer for indirect commands
  53. * @opc: pass in the command opcode
  54. * @cd: pointer to command details structure or NULL
  55. *
  56. * Helper function to allocate/free resources using the admin queue commands
  57. */
  58. static enum ice_status
  59. ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
  60. struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
  61. enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  62. {
  63. struct ice_aqc_alloc_free_res_cmd *cmd;
  64. struct ice_aq_desc desc;
  65. cmd = &desc.params.sw_res_ctrl;
  66. if (!buf)
  67. return ICE_ERR_PARAM;
  68. if (buf_size < (num_entries * sizeof(buf->elem[0])))
  69. return ICE_ERR_PARAM;
  70. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  71. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  72. cmd->num_entries = cpu_to_le16(num_entries);
  73. return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  74. }
  75. /**
  76. * ice_init_def_sw_recp - initialize the recipe book keeping tables
  77. * @hw: pointer to the hw struct
  78. *
  79. * Allocate memory for the entire recipe table and initialize the structures/
  80. * entries corresponding to basic recipes.
  81. */
  82. enum ice_status
  83. ice_init_def_sw_recp(struct ice_hw *hw)
  84. {
  85. struct ice_sw_recipe *recps;
  86. u8 i;
  87. recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
  88. sizeof(struct ice_sw_recipe), GFP_KERNEL);
  89. if (!recps)
  90. return ICE_ERR_NO_MEMORY;
  91. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  92. recps[i].root_rid = i;
  93. INIT_LIST_HEAD(&recps[i].filt_rules);
  94. INIT_LIST_HEAD(&recps[i].filt_replay_rules);
  95. mutex_init(&recps[i].filt_rule_lock);
  96. }
  97. hw->switch_info->recp_list = recps;
  98. return 0;
  99. }
  100. /**
  101. * ice_aq_get_sw_cfg - get switch configuration
  102. * @hw: pointer to the hardware structure
  103. * @buf: pointer to the result buffer
  104. * @buf_size: length of the buffer available for response
  105. * @req_desc: pointer to requested descriptor
  106. * @num_elems: pointer to number of elements
  107. * @cd: pointer to command details structure or NULL
  108. *
  109. * Get switch configuration (0x0200) to be placed in 'buff'.
  110. * This admin command returns information such as initial VSI/port number
  111. * and switch ID it belongs to.
  112. *
  113. * NOTE: *req_desc is both an input/output parameter.
  114. * The caller of this function first calls this function with *request_desc set
  115. * to 0. If the response from f/w has *req_desc set to 0, all the switch
  116. * configuration information has been returned; if non-zero (meaning not all
  117. * the information was returned), the caller should call this function again
  118. * with *req_desc set to the previous value returned by f/w to get the
  119. * next block of switch configuration information.
  120. *
  121. * *num_elems is output only parameter. This reflects the number of elements
  122. * in response buffer. The caller of this function to use *num_elems while
  123. * parsing the response buffer.
  124. */
  125. static enum ice_status
  126. ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
  127. u16 buf_size, u16 *req_desc, u16 *num_elems,
  128. struct ice_sq_cd *cd)
  129. {
  130. struct ice_aqc_get_sw_cfg *cmd;
  131. enum ice_status status;
  132. struct ice_aq_desc desc;
  133. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
  134. cmd = &desc.params.get_sw_conf;
  135. cmd->element = cpu_to_le16(*req_desc);
  136. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  137. if (!status) {
  138. *req_desc = le16_to_cpu(cmd->element);
  139. *num_elems = le16_to_cpu(cmd->num_elems);
  140. }
  141. return status;
  142. }
  143. /**
  144. * ice_aq_add_vsi
  145. * @hw: pointer to the hw struct
  146. * @vsi_ctx: pointer to a VSI context struct
  147. * @cd: pointer to command details structure or NULL
  148. *
  149. * Add a VSI context to the hardware (0x0210)
  150. */
  151. static enum ice_status
  152. ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  153. struct ice_sq_cd *cd)
  154. {
  155. struct ice_aqc_add_update_free_vsi_resp *res;
  156. struct ice_aqc_add_get_update_free_vsi *cmd;
  157. struct ice_aq_desc desc;
  158. enum ice_status status;
  159. cmd = &desc.params.vsi_cmd;
  160. res = &desc.params.add_update_free_vsi_res;
  161. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
  162. if (!vsi_ctx->alloc_from_pool)
  163. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
  164. ICE_AQ_VSI_IS_VALID);
  165. cmd->vf_id = vsi_ctx->vf_num;
  166. cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
  167. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  168. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  169. sizeof(vsi_ctx->info), cd);
  170. if (!status) {
  171. vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
  172. vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
  173. vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
  174. }
  175. return status;
  176. }
  177. /**
  178. * ice_aq_free_vsi
  179. * @hw: pointer to the hw struct
  180. * @vsi_ctx: pointer to a VSI context struct
  181. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  182. * @cd: pointer to command details structure or NULL
  183. *
  184. * Free VSI context info from hardware (0x0213)
  185. */
  186. static enum ice_status
  187. ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  188. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  189. {
  190. struct ice_aqc_add_update_free_vsi_resp *resp;
  191. struct ice_aqc_add_get_update_free_vsi *cmd;
  192. struct ice_aq_desc desc;
  193. enum ice_status status;
  194. cmd = &desc.params.vsi_cmd;
  195. resp = &desc.params.add_update_free_vsi_res;
  196. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
  197. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  198. if (keep_vsi_alloc)
  199. cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
  200. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  201. if (!status) {
  202. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  203. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  204. }
  205. return status;
  206. }
  207. /**
  208. * ice_aq_update_vsi
  209. * @hw: pointer to the hw struct
  210. * @vsi_ctx: pointer to a VSI context struct
  211. * @cd: pointer to command details structure or NULL
  212. *
  213. * Update VSI context in the hardware (0x0211)
  214. */
  215. static enum ice_status
  216. ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  217. struct ice_sq_cd *cd)
  218. {
  219. struct ice_aqc_add_update_free_vsi_resp *resp;
  220. struct ice_aqc_add_get_update_free_vsi *cmd;
  221. struct ice_aq_desc desc;
  222. enum ice_status status;
  223. cmd = &desc.params.vsi_cmd;
  224. resp = &desc.params.add_update_free_vsi_res;
  225. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
  226. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  227. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  228. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  229. sizeof(vsi_ctx->info), cd);
  230. if (!status) {
  231. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  232. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  233. }
  234. return status;
  235. }
  236. /**
  237. * ice_is_vsi_valid - check whether the VSI is valid or not
  238. * @hw: pointer to the hw struct
  239. * @vsi_handle: VSI handle
  240. *
  241. * check whether the VSI is valid or not
  242. */
  243. bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
  244. {
  245. return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
  246. }
  247. /**
  248. * ice_get_hw_vsi_num - return the hw VSI number
  249. * @hw: pointer to the hw struct
  250. * @vsi_handle: VSI handle
  251. *
  252. * return the hw VSI number
  253. * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
  254. */
  255. u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
  256. {
  257. return hw->vsi_ctx[vsi_handle]->vsi_num;
  258. }
  259. /**
  260. * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
  261. * @hw: pointer to the hw struct
  262. * @vsi_handle: VSI handle
  263. *
  264. * return the VSI context entry for a given VSI handle
  265. */
  266. struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
  267. {
  268. return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
  269. }
  270. /**
  271. * ice_save_vsi_ctx - save the VSI context for a given VSI handle
  272. * @hw: pointer to the hw struct
  273. * @vsi_handle: VSI handle
  274. * @vsi: VSI context pointer
  275. *
  276. * save the VSI context entry for a given VSI handle
  277. */
  278. static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
  279. struct ice_vsi_ctx *vsi)
  280. {
  281. hw->vsi_ctx[vsi_handle] = vsi;
  282. }
  283. /**
  284. * ice_clear_vsi_ctx - clear the VSI context entry
  285. * @hw: pointer to the hw struct
  286. * @vsi_handle: VSI handle
  287. *
  288. * clear the VSI context entry
  289. */
  290. static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
  291. {
  292. struct ice_vsi_ctx *vsi;
  293. vsi = ice_get_vsi_ctx(hw, vsi_handle);
  294. if (vsi) {
  295. devm_kfree(ice_hw_to_dev(hw), vsi);
  296. hw->vsi_ctx[vsi_handle] = NULL;
  297. }
  298. }
  299. /**
  300. * ice_add_vsi - add VSI context to the hardware and VSI handle list
  301. * @hw: pointer to the hw struct
  302. * @vsi_handle: unique VSI handle provided by drivers
  303. * @vsi_ctx: pointer to a VSI context struct
  304. * @cd: pointer to command details structure or NULL
  305. *
  306. * Add a VSI context to the hardware also add it into the VSI handle list.
  307. * If this function gets called after reset for existing VSIs then update
  308. * with the new HW VSI number in the corresponding VSI handle list entry.
  309. */
  310. enum ice_status
  311. ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  312. struct ice_sq_cd *cd)
  313. {
  314. struct ice_vsi_ctx *tmp_vsi_ctx;
  315. enum ice_status status;
  316. if (vsi_handle >= ICE_MAX_VSI)
  317. return ICE_ERR_PARAM;
  318. status = ice_aq_add_vsi(hw, vsi_ctx, cd);
  319. if (status)
  320. return status;
  321. tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
  322. if (!tmp_vsi_ctx) {
  323. /* Create a new vsi context */
  324. tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
  325. sizeof(*tmp_vsi_ctx), GFP_KERNEL);
  326. if (!tmp_vsi_ctx) {
  327. ice_aq_free_vsi(hw, vsi_ctx, false, cd);
  328. return ICE_ERR_NO_MEMORY;
  329. }
  330. *tmp_vsi_ctx = *vsi_ctx;
  331. ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
  332. } else {
  333. /* update with new HW VSI num */
  334. if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
  335. tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
  336. }
  337. return status;
  338. }
  339. /**
  340. * ice_free_vsi- free VSI context from hardware and VSI handle list
  341. * @hw: pointer to the hw struct
  342. * @vsi_handle: unique VSI handle
  343. * @vsi_ctx: pointer to a VSI context struct
  344. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  345. * @cd: pointer to command details structure or NULL
  346. *
  347. * Free VSI context info from hardware as well as from VSI handle list
  348. */
  349. enum ice_status
  350. ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  351. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  352. {
  353. enum ice_status status;
  354. if (!ice_is_vsi_valid(hw, vsi_handle))
  355. return ICE_ERR_PARAM;
  356. vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
  357. status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
  358. if (!status)
  359. ice_clear_vsi_ctx(hw, vsi_handle);
  360. return status;
  361. }
  362. /**
  363. * ice_update_vsi
  364. * @hw: pointer to the hw struct
  365. * @vsi_handle: unique VSI handle
  366. * @vsi_ctx: pointer to a VSI context struct
  367. * @cd: pointer to command details structure or NULL
  368. *
  369. * Update VSI context in the hardware
  370. */
  371. enum ice_status
  372. ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  373. struct ice_sq_cd *cd)
  374. {
  375. if (!ice_is_vsi_valid(hw, vsi_handle))
  376. return ICE_ERR_PARAM;
  377. vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
  378. return ice_aq_update_vsi(hw, vsi_ctx, cd);
  379. }
  380. /**
  381. * ice_aq_alloc_free_vsi_list
  382. * @hw: pointer to the hw struct
  383. * @vsi_list_id: VSI list id returned or used for lookup
  384. * @lkup_type: switch rule filter lookup type
  385. * @opc: switch rules population command type - pass in the command opcode
  386. *
  387. * allocates or free a VSI list resource
  388. */
  389. static enum ice_status
  390. ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
  391. enum ice_sw_lkup_type lkup_type,
  392. enum ice_adminq_opc opc)
  393. {
  394. struct ice_aqc_alloc_free_res_elem *sw_buf;
  395. struct ice_aqc_res_elem *vsi_ele;
  396. enum ice_status status;
  397. u16 buf_len;
  398. buf_len = sizeof(*sw_buf);
  399. sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
  400. if (!sw_buf)
  401. return ICE_ERR_NO_MEMORY;
  402. sw_buf->num_elems = cpu_to_le16(1);
  403. if (lkup_type == ICE_SW_LKUP_MAC ||
  404. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  405. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  406. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  407. lkup_type == ICE_SW_LKUP_PROMISC ||
  408. lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
  409. sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
  410. } else if (lkup_type == ICE_SW_LKUP_VLAN) {
  411. sw_buf->res_type =
  412. cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
  413. } else {
  414. status = ICE_ERR_PARAM;
  415. goto ice_aq_alloc_free_vsi_list_exit;
  416. }
  417. if (opc == ice_aqc_opc_free_res)
  418. sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
  419. status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
  420. if (status)
  421. goto ice_aq_alloc_free_vsi_list_exit;
  422. if (opc == ice_aqc_opc_alloc_res) {
  423. vsi_ele = &sw_buf->elem[0];
  424. *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
  425. }
  426. ice_aq_alloc_free_vsi_list_exit:
  427. devm_kfree(ice_hw_to_dev(hw), sw_buf);
  428. return status;
  429. }
  430. /**
  431. * ice_aq_sw_rules - add/update/remove switch rules
  432. * @hw: pointer to the hw struct
  433. * @rule_list: pointer to switch rule population list
  434. * @rule_list_sz: total size of the rule list in bytes
  435. * @num_rules: number of switch rules in the rule_list
  436. * @opc: switch rules population command type - pass in the command opcode
  437. * @cd: pointer to command details structure or NULL
  438. *
  439. * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
  440. */
  441. static enum ice_status
  442. ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
  443. u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  444. {
  445. struct ice_aq_desc desc;
  446. if (opc != ice_aqc_opc_add_sw_rules &&
  447. opc != ice_aqc_opc_update_sw_rules &&
  448. opc != ice_aqc_opc_remove_sw_rules)
  449. return ICE_ERR_PARAM;
  450. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  451. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  452. desc.params.sw_rules.num_rules_fltr_entry_index =
  453. cpu_to_le16(num_rules);
  454. return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
  455. }
  456. /* ice_init_port_info - Initialize port_info with switch configuration data
  457. * @pi: pointer to port_info
  458. * @vsi_port_num: VSI number or port number
  459. * @type: Type of switch element (port or VSI)
  460. * @swid: switch ID of the switch the element is attached to
  461. * @pf_vf_num: PF or VF number
  462. * @is_vf: true if the element is a VF, false otherwise
  463. */
  464. static void
  465. ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
  466. u16 swid, u16 pf_vf_num, bool is_vf)
  467. {
  468. switch (type) {
  469. case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
  470. pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
  471. pi->sw_id = swid;
  472. pi->pf_vf_num = pf_vf_num;
  473. pi->is_vf = is_vf;
  474. pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  475. pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  476. break;
  477. default:
  478. ice_debug(pi->hw, ICE_DBG_SW,
  479. "incorrect VSI/port type received\n");
  480. break;
  481. }
  482. }
  483. /* ice_get_initial_sw_cfg - Get initial port and default VSI data
  484. * @hw: pointer to the hardware structure
  485. */
  486. enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
  487. {
  488. struct ice_aqc_get_sw_cfg_resp *rbuf;
  489. enum ice_status status;
  490. u16 req_desc = 0;
  491. u16 num_elems;
  492. u16 i;
  493. rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
  494. GFP_KERNEL);
  495. if (!rbuf)
  496. return ICE_ERR_NO_MEMORY;
  497. /* Multiple calls to ice_aq_get_sw_cfg may be required
  498. * to get all the switch configuration information. The need
  499. * for additional calls is indicated by ice_aq_get_sw_cfg
  500. * writing a non-zero value in req_desc
  501. */
  502. do {
  503. status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
  504. &req_desc, &num_elems, NULL);
  505. if (status)
  506. break;
  507. for (i = 0; i < num_elems; i++) {
  508. struct ice_aqc_get_sw_cfg_resp_elem *ele;
  509. u16 pf_vf_num, swid, vsi_port_num;
  510. bool is_vf = false;
  511. u8 type;
  512. ele = rbuf[i].elements;
  513. vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
  514. ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
  515. pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
  516. ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
  517. swid = le16_to_cpu(ele->swid);
  518. if (le16_to_cpu(ele->pf_vf_num) &
  519. ICE_AQC_GET_SW_CONF_RESP_IS_VF)
  520. is_vf = true;
  521. type = le16_to_cpu(ele->vsi_port_num) >>
  522. ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
  523. if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
  524. /* FW VSI is not needed. Just continue. */
  525. continue;
  526. }
  527. ice_init_port_info(hw->port_info, vsi_port_num,
  528. type, swid, pf_vf_num, is_vf);
  529. }
  530. } while (req_desc && !status);
  531. devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
  532. return status;
  533. }
  534. /**
  535. * ice_fill_sw_info - Helper function to populate lb_en and lan_en
  536. * @hw: pointer to the hardware structure
  537. * @f_info: filter info structure to fill/update
  538. *
  539. * This helper function populates the lb_en and lan_en elements of the provided
  540. * ice_fltr_info struct using the switch's type and characteristics of the
  541. * switch rule being configured.
  542. */
  543. static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
  544. {
  545. f_info->lb_en = false;
  546. f_info->lan_en = false;
  547. if ((f_info->flag & ICE_FLTR_TX) &&
  548. (f_info->fltr_act == ICE_FWD_TO_VSI ||
  549. f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
  550. f_info->fltr_act == ICE_FWD_TO_Q ||
  551. f_info->fltr_act == ICE_FWD_TO_QGRP)) {
  552. f_info->lb_en = true;
  553. if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
  554. is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
  555. f_info->lan_en = true;
  556. }
  557. }
  558. /**
  559. * ice_fill_sw_rule - Helper function to fill switch rule structure
  560. * @hw: pointer to the hardware structure
  561. * @f_info: entry containing packet forwarding information
  562. * @s_rule: switch rule structure to be filled in based on mac_entry
  563. * @opc: switch rules population command type - pass in the command opcode
  564. */
  565. static void
  566. ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
  567. struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
  568. {
  569. u16 vlan_id = ICE_MAX_VLAN_ID + 1;
  570. void *daddr = NULL;
  571. u16 eth_hdr_sz;
  572. u8 *eth_hdr;
  573. u32 act = 0;
  574. __be16 *off;
  575. if (opc == ice_aqc_opc_remove_sw_rules) {
  576. s_rule->pdata.lkup_tx_rx.act = 0;
  577. s_rule->pdata.lkup_tx_rx.index =
  578. cpu_to_le16(f_info->fltr_rule_id);
  579. s_rule->pdata.lkup_tx_rx.hdr_len = 0;
  580. return;
  581. }
  582. eth_hdr_sz = sizeof(dummy_eth_header);
  583. eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
  584. /* initialize the ether header with a dummy header */
  585. memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
  586. ice_fill_sw_info(hw, f_info);
  587. switch (f_info->fltr_act) {
  588. case ICE_FWD_TO_VSI:
  589. act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
  590. ICE_SINGLE_ACT_VSI_ID_M;
  591. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  592. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  593. ICE_SINGLE_ACT_VALID_BIT;
  594. break;
  595. case ICE_FWD_TO_VSI_LIST:
  596. act |= ICE_SINGLE_ACT_VSI_LIST;
  597. act |= (f_info->fwd_id.vsi_list_id <<
  598. ICE_SINGLE_ACT_VSI_LIST_ID_S) &
  599. ICE_SINGLE_ACT_VSI_LIST_ID_M;
  600. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  601. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  602. ICE_SINGLE_ACT_VALID_BIT;
  603. break;
  604. case ICE_FWD_TO_Q:
  605. act |= ICE_SINGLE_ACT_TO_Q;
  606. act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
  607. ICE_SINGLE_ACT_Q_INDEX_M;
  608. break;
  609. case ICE_FWD_TO_QGRP:
  610. act |= ICE_SINGLE_ACT_TO_Q;
  611. act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
  612. ICE_SINGLE_ACT_Q_REGION_M;
  613. break;
  614. case ICE_DROP_PACKET:
  615. act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
  616. break;
  617. default:
  618. return;
  619. }
  620. if (f_info->lb_en)
  621. act |= ICE_SINGLE_ACT_LB_ENABLE;
  622. if (f_info->lan_en)
  623. act |= ICE_SINGLE_ACT_LAN_ENABLE;
  624. switch (f_info->lkup_type) {
  625. case ICE_SW_LKUP_MAC:
  626. daddr = f_info->l_data.mac.mac_addr;
  627. break;
  628. case ICE_SW_LKUP_VLAN:
  629. vlan_id = f_info->l_data.vlan.vlan_id;
  630. if (f_info->fltr_act == ICE_FWD_TO_VSI ||
  631. f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
  632. act |= ICE_SINGLE_ACT_PRUNE;
  633. act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
  634. }
  635. break;
  636. case ICE_SW_LKUP_ETHERTYPE_MAC:
  637. daddr = f_info->l_data.ethertype_mac.mac_addr;
  638. /* fall-through */
  639. case ICE_SW_LKUP_ETHERTYPE:
  640. off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
  641. *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
  642. break;
  643. case ICE_SW_LKUP_MAC_VLAN:
  644. daddr = f_info->l_data.mac_vlan.mac_addr;
  645. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  646. break;
  647. case ICE_SW_LKUP_PROMISC_VLAN:
  648. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  649. /* fall-through */
  650. case ICE_SW_LKUP_PROMISC:
  651. daddr = f_info->l_data.mac_vlan.mac_addr;
  652. break;
  653. default:
  654. break;
  655. }
  656. s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
  657. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
  658. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
  659. /* Recipe set depending on lookup type */
  660. s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
  661. s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
  662. s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
  663. if (daddr)
  664. ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
  665. if (!(vlan_id > ICE_MAX_VLAN_ID)) {
  666. off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
  667. *off = cpu_to_be16(vlan_id);
  668. }
  669. /* Create the switch rule with the final dummy Ethernet header */
  670. if (opc != ice_aqc_opc_update_sw_rules)
  671. s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
  672. }
  673. /**
  674. * ice_add_marker_act
  675. * @hw: pointer to the hardware structure
  676. * @m_ent: the management entry for which sw marker needs to be added
  677. * @sw_marker: sw marker to tag the Rx descriptor with
  678. * @l_id: large action resource id
  679. *
  680. * Create a large action to hold software marker and update the switch rule
  681. * entry pointed by m_ent with newly created large action
  682. */
  683. static enum ice_status
  684. ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
  685. u16 sw_marker, u16 l_id)
  686. {
  687. struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
  688. /* For software marker we need 3 large actions
  689. * 1. FWD action: FWD TO VSI or VSI LIST
  690. * 2. GENERIC VALUE action to hold the profile id
  691. * 3. GENERIC VALUE action to hold the software marker id
  692. */
  693. const u16 num_lg_acts = 3;
  694. enum ice_status status;
  695. u16 lg_act_size;
  696. u16 rules_size;
  697. u32 act;
  698. u16 id;
  699. if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
  700. return ICE_ERR_PARAM;
  701. /* Create two back-to-back switch rules and submit them to the HW using
  702. * one memory buffer:
  703. * 1. Large Action
  704. * 2. Look up tx rx
  705. */
  706. lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
  707. rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  708. lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
  709. if (!lg_act)
  710. return ICE_ERR_NO_MEMORY;
  711. rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
  712. /* Fill in the first switch rule i.e. large action */
  713. lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
  714. lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
  715. lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
  716. /* First action VSI forwarding or VSI list forwarding depending on how
  717. * many VSIs
  718. */
  719. id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
  720. m_ent->fltr_info.fwd_id.hw_vsi_id;
  721. act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
  722. act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
  723. ICE_LG_ACT_VSI_LIST_ID_M;
  724. if (m_ent->vsi_count > 1)
  725. act |= ICE_LG_ACT_VSI_LIST;
  726. lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
  727. /* Second action descriptor type */
  728. act = ICE_LG_ACT_GENERIC;
  729. act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
  730. lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
  731. act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
  732. ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
  733. /* Third action Marker value */
  734. act |= ICE_LG_ACT_GENERIC;
  735. act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
  736. ICE_LG_ACT_GENERIC_VALUE_M;
  737. lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
  738. /* call the fill switch rule to fill the lookup tx rx structure */
  739. ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
  740. ice_aqc_opc_update_sw_rules);
  741. /* Update the action to point to the large action id */
  742. rx_tx->pdata.lkup_tx_rx.act =
  743. cpu_to_le32(ICE_SINGLE_ACT_PTR |
  744. ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
  745. ICE_SINGLE_ACT_PTR_VAL_M));
  746. /* Use the filter rule id of the previously created rule with single
  747. * act. Once the update happens, hardware will treat this as large
  748. * action
  749. */
  750. rx_tx->pdata.lkup_tx_rx.index =
  751. cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
  752. status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
  753. ice_aqc_opc_update_sw_rules, NULL);
  754. if (!status) {
  755. m_ent->lg_act_idx = l_id;
  756. m_ent->sw_marker_id = sw_marker;
  757. }
  758. devm_kfree(ice_hw_to_dev(hw), lg_act);
  759. return status;
  760. }
  761. /**
  762. * ice_create_vsi_list_map
  763. * @hw: pointer to the hardware structure
  764. * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
  765. * @num_vsi: number of VSI handles in the array
  766. * @vsi_list_id: VSI list id generated as part of allocate resource
  767. *
  768. * Helper function to create a new entry of VSI list id to VSI mapping
  769. * using the given VSI list id
  770. */
  771. static struct ice_vsi_list_map_info *
  772. ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  773. u16 vsi_list_id)
  774. {
  775. struct ice_switch_info *sw = hw->switch_info;
  776. struct ice_vsi_list_map_info *v_map;
  777. int i;
  778. v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
  779. if (!v_map)
  780. return NULL;
  781. v_map->vsi_list_id = vsi_list_id;
  782. v_map->ref_cnt = 1;
  783. for (i = 0; i < num_vsi; i++)
  784. set_bit(vsi_handle_arr[i], v_map->vsi_map);
  785. list_add(&v_map->list_entry, &sw->vsi_list_map_head);
  786. return v_map;
  787. }
  788. /**
  789. * ice_update_vsi_list_rule
  790. * @hw: pointer to the hardware structure
  791. * @vsi_handle_arr: array of VSI handles to form a VSI list
  792. * @num_vsi: number of VSI handles in the array
  793. * @vsi_list_id: VSI list id generated as part of allocate resource
  794. * @remove: Boolean value to indicate if this is a remove action
  795. * @opc: switch rules population command type - pass in the command opcode
  796. * @lkup_type: lookup type of the filter
  797. *
  798. * Call AQ command to add a new switch rule or update existing switch rule
  799. * using the given VSI list id
  800. */
  801. static enum ice_status
  802. ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  803. u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
  804. enum ice_sw_lkup_type lkup_type)
  805. {
  806. struct ice_aqc_sw_rules_elem *s_rule;
  807. enum ice_status status;
  808. u16 s_rule_size;
  809. u16 type;
  810. int i;
  811. if (!num_vsi)
  812. return ICE_ERR_PARAM;
  813. if (lkup_type == ICE_SW_LKUP_MAC ||
  814. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  815. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  816. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  817. lkup_type == ICE_SW_LKUP_PROMISC ||
  818. lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
  819. type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
  820. ICE_AQC_SW_RULES_T_VSI_LIST_SET;
  821. else if (lkup_type == ICE_SW_LKUP_VLAN)
  822. type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
  823. ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
  824. else
  825. return ICE_ERR_PARAM;
  826. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
  827. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  828. if (!s_rule)
  829. return ICE_ERR_NO_MEMORY;
  830. for (i = 0; i < num_vsi; i++) {
  831. if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
  832. status = ICE_ERR_PARAM;
  833. goto exit;
  834. }
  835. /* AQ call requires hw_vsi_id(s) */
  836. s_rule->pdata.vsi_list.vsi[i] =
  837. cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
  838. }
  839. s_rule->type = cpu_to_le16(type);
  840. s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
  841. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  842. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
  843. exit:
  844. devm_kfree(ice_hw_to_dev(hw), s_rule);
  845. return status;
  846. }
  847. /**
  848. * ice_create_vsi_list_rule - Creates and populates a VSI list rule
  849. * @hw: pointer to the hw struct
  850. * @vsi_handle_arr: array of VSI handles to form a VSI list
  851. * @num_vsi: number of VSI handles in the array
  852. * @vsi_list_id: stores the ID of the VSI list to be created
  853. * @lkup_type: switch rule filter's lookup type
  854. */
  855. static enum ice_status
  856. ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  857. u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
  858. {
  859. enum ice_status status;
  860. status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
  861. ice_aqc_opc_alloc_res);
  862. if (status)
  863. return status;
  864. /* Update the newly created VSI list to include the specified VSIs */
  865. return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
  866. *vsi_list_id, false,
  867. ice_aqc_opc_add_sw_rules, lkup_type);
  868. }
  869. /**
  870. * ice_create_pkt_fwd_rule
  871. * @hw: pointer to the hardware structure
  872. * @f_entry: entry containing packet forwarding information
  873. *
  874. * Create switch rule with given filter information and add an entry
  875. * to the corresponding filter management list to track this switch rule
  876. * and VSI mapping
  877. */
  878. static enum ice_status
  879. ice_create_pkt_fwd_rule(struct ice_hw *hw,
  880. struct ice_fltr_list_entry *f_entry)
  881. {
  882. struct ice_fltr_mgmt_list_entry *fm_entry;
  883. struct ice_aqc_sw_rules_elem *s_rule;
  884. enum ice_sw_lkup_type l_type;
  885. struct ice_sw_recipe *recp;
  886. enum ice_status status;
  887. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  888. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  889. if (!s_rule)
  890. return ICE_ERR_NO_MEMORY;
  891. fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
  892. GFP_KERNEL);
  893. if (!fm_entry) {
  894. status = ICE_ERR_NO_MEMORY;
  895. goto ice_create_pkt_fwd_rule_exit;
  896. }
  897. fm_entry->fltr_info = f_entry->fltr_info;
  898. /* Initialize all the fields for the management entry */
  899. fm_entry->vsi_count = 1;
  900. fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
  901. fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
  902. fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
  903. ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
  904. ice_aqc_opc_add_sw_rules);
  905. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  906. ice_aqc_opc_add_sw_rules, NULL);
  907. if (status) {
  908. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  909. goto ice_create_pkt_fwd_rule_exit;
  910. }
  911. f_entry->fltr_info.fltr_rule_id =
  912. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  913. fm_entry->fltr_info.fltr_rule_id =
  914. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  915. /* The book keeping entries will get removed when base driver
  916. * calls remove filter AQ command
  917. */
  918. l_type = fm_entry->fltr_info.lkup_type;
  919. recp = &hw->switch_info->recp_list[l_type];
  920. list_add(&fm_entry->list_entry, &recp->filt_rules);
  921. ice_create_pkt_fwd_rule_exit:
  922. devm_kfree(ice_hw_to_dev(hw), s_rule);
  923. return status;
  924. }
  925. /**
  926. * ice_update_pkt_fwd_rule
  927. * @hw: pointer to the hardware structure
  928. * @f_info: filter information for switch rule
  929. *
  930. * Call AQ command to update a previously created switch rule with a
  931. * VSI list id
  932. */
  933. static enum ice_status
  934. ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
  935. {
  936. struct ice_aqc_sw_rules_elem *s_rule;
  937. enum ice_status status;
  938. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  939. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  940. if (!s_rule)
  941. return ICE_ERR_NO_MEMORY;
  942. ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
  943. s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
  944. /* Update switch rule with new rule set to forward VSI list */
  945. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  946. ice_aqc_opc_update_sw_rules, NULL);
  947. devm_kfree(ice_hw_to_dev(hw), s_rule);
  948. return status;
  949. }
  950. /**
  951. * ice_update_sw_rule_bridge_mode
  952. * @hw: pointer to the hw struct
  953. *
  954. * Updates unicast switch filter rules based on VEB/VEPA mode
  955. */
  956. enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
  957. {
  958. struct ice_switch_info *sw = hw->switch_info;
  959. struct ice_fltr_mgmt_list_entry *fm_entry;
  960. enum ice_status status = 0;
  961. struct list_head *rule_head;
  962. struct mutex *rule_lock; /* Lock to protect filter rule list */
  963. rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
  964. rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
  965. mutex_lock(rule_lock);
  966. list_for_each_entry(fm_entry, rule_head, list_entry) {
  967. struct ice_fltr_info *fi = &fm_entry->fltr_info;
  968. u8 *addr = fi->l_data.mac.mac_addr;
  969. /* Update unicast Tx rules to reflect the selected
  970. * VEB/VEPA mode
  971. */
  972. if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
  973. (fi->fltr_act == ICE_FWD_TO_VSI ||
  974. fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
  975. fi->fltr_act == ICE_FWD_TO_Q ||
  976. fi->fltr_act == ICE_FWD_TO_QGRP)) {
  977. status = ice_update_pkt_fwd_rule(hw, fi);
  978. if (status)
  979. break;
  980. }
  981. }
  982. mutex_unlock(rule_lock);
  983. return status;
  984. }
  985. /**
  986. * ice_add_update_vsi_list
  987. * @hw: pointer to the hardware structure
  988. * @m_entry: pointer to current filter management list entry
  989. * @cur_fltr: filter information from the book keeping entry
  990. * @new_fltr: filter information with the new VSI to be added
  991. *
  992. * Call AQ command to add or update previously created VSI list with new VSI.
  993. *
  994. * Helper function to do book keeping associated with adding filter information
  995. * The algorithm to do the booking keeping is described below :
  996. * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
  997. * if only one VSI has been added till now
  998. * Allocate a new VSI list and add two VSIs
  999. * to this list using switch rule command
  1000. * Update the previously created switch rule with the
  1001. * newly created VSI list id
  1002. * if a VSI list was previously created
  1003. * Add the new VSI to the previously created VSI list set
  1004. * using the update switch rule command
  1005. */
  1006. static enum ice_status
  1007. ice_add_update_vsi_list(struct ice_hw *hw,
  1008. struct ice_fltr_mgmt_list_entry *m_entry,
  1009. struct ice_fltr_info *cur_fltr,
  1010. struct ice_fltr_info *new_fltr)
  1011. {
  1012. enum ice_status status = 0;
  1013. u16 vsi_list_id = 0;
  1014. if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
  1015. cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
  1016. return ICE_ERR_NOT_IMPL;
  1017. if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
  1018. new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
  1019. (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
  1020. cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
  1021. return ICE_ERR_NOT_IMPL;
  1022. if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
  1023. /* Only one entry existed in the mapping and it was not already
  1024. * a part of a VSI list. So, create a VSI list with the old and
  1025. * new VSIs.
  1026. */
  1027. struct ice_fltr_info tmp_fltr;
  1028. u16 vsi_handle_arr[2];
  1029. /* A rule already exists with the new VSI being added */
  1030. if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
  1031. return ICE_ERR_ALREADY_EXISTS;
  1032. vsi_handle_arr[0] = cur_fltr->vsi_handle;
  1033. vsi_handle_arr[1] = new_fltr->vsi_handle;
  1034. status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
  1035. &vsi_list_id,
  1036. new_fltr->lkup_type);
  1037. if (status)
  1038. return status;
  1039. tmp_fltr = *new_fltr;
  1040. tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
  1041. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  1042. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  1043. /* Update the previous switch rule of "MAC forward to VSI" to
  1044. * "MAC fwd to VSI list"
  1045. */
  1046. status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
  1047. if (status)
  1048. return status;
  1049. cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1050. cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1051. m_entry->vsi_list_info =
  1052. ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
  1053. vsi_list_id);
  1054. /* If this entry was large action then the large action needs
  1055. * to be updated to point to FWD to VSI list
  1056. */
  1057. if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
  1058. status =
  1059. ice_add_marker_act(hw, m_entry,
  1060. m_entry->sw_marker_id,
  1061. m_entry->lg_act_idx);
  1062. } else {
  1063. u16 vsi_handle = new_fltr->vsi_handle;
  1064. enum ice_adminq_opc opcode;
  1065. /* A rule already exists with the new VSI being added */
  1066. if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
  1067. return 0;
  1068. /* Update the previously created VSI list set with
  1069. * the new VSI id passed in
  1070. */
  1071. vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
  1072. opcode = ice_aqc_opc_update_sw_rules;
  1073. status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
  1074. vsi_list_id, false, opcode,
  1075. new_fltr->lkup_type);
  1076. /* update VSI list mapping info with new VSI id */
  1077. if (!status)
  1078. set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
  1079. }
  1080. if (!status)
  1081. m_entry->vsi_count++;
  1082. return status;
  1083. }
  1084. /**
  1085. * ice_find_rule_entry - Search a rule entry
  1086. * @hw: pointer to the hardware structure
  1087. * @recp_id: lookup type for which the specified rule needs to be searched
  1088. * @f_info: rule information
  1089. *
  1090. * Helper function to search for a given rule entry
  1091. * Returns pointer to entry storing the rule if found
  1092. */
  1093. static struct ice_fltr_mgmt_list_entry *
  1094. ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
  1095. {
  1096. struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
  1097. struct ice_switch_info *sw = hw->switch_info;
  1098. struct list_head *list_head;
  1099. list_head = &sw->recp_list[recp_id].filt_rules;
  1100. list_for_each_entry(list_itr, list_head, list_entry) {
  1101. if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
  1102. sizeof(f_info->l_data)) &&
  1103. f_info->flag == list_itr->fltr_info.flag) {
  1104. ret = list_itr;
  1105. break;
  1106. }
  1107. }
  1108. return ret;
  1109. }
  1110. /**
  1111. * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
  1112. * @hw: pointer to the hardware structure
  1113. * @recp_id: lookup type for which VSI lists needs to be searched
  1114. * @vsi_handle: VSI handle to be found in VSI list
  1115. * @vsi_list_id: VSI list id found containing vsi_handle
  1116. *
  1117. * Helper function to search a VSI list with single entry containing given VSI
  1118. * handle element. This can be extended further to search VSI list with more
  1119. * than 1 vsi_count. Returns pointer to VSI list entry if found.
  1120. */
  1121. static struct ice_vsi_list_map_info *
  1122. ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
  1123. u16 *vsi_list_id)
  1124. {
  1125. struct ice_vsi_list_map_info *map_info = NULL;
  1126. struct ice_switch_info *sw = hw->switch_info;
  1127. struct ice_fltr_mgmt_list_entry *list_itr;
  1128. struct list_head *list_head;
  1129. list_head = &sw->recp_list[recp_id].filt_rules;
  1130. list_for_each_entry(list_itr, list_head, list_entry) {
  1131. if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
  1132. map_info = list_itr->vsi_list_info;
  1133. if (test_bit(vsi_handle, map_info->vsi_map)) {
  1134. *vsi_list_id = map_info->vsi_list_id;
  1135. return map_info;
  1136. }
  1137. }
  1138. }
  1139. return NULL;
  1140. }
  1141. /**
  1142. * ice_add_rule_internal - add rule for a given lookup type
  1143. * @hw: pointer to the hardware structure
  1144. * @recp_id: lookup type (recipe id) for which rule has to be added
  1145. * @f_entry: structure containing MAC forwarding information
  1146. *
  1147. * Adds or updates the rule lists for a given recipe
  1148. */
  1149. static enum ice_status
  1150. ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
  1151. struct ice_fltr_list_entry *f_entry)
  1152. {
  1153. struct ice_switch_info *sw = hw->switch_info;
  1154. struct ice_fltr_info *new_fltr, *cur_fltr;
  1155. struct ice_fltr_mgmt_list_entry *m_entry;
  1156. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1157. enum ice_status status = 0;
  1158. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1159. return ICE_ERR_PARAM;
  1160. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1161. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1162. rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
  1163. mutex_lock(rule_lock);
  1164. new_fltr = &f_entry->fltr_info;
  1165. if (new_fltr->flag & ICE_FLTR_RX)
  1166. new_fltr->src = hw->port_info->lport;
  1167. else if (new_fltr->flag & ICE_FLTR_TX)
  1168. new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
  1169. m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
  1170. if (!m_entry) {
  1171. mutex_unlock(rule_lock);
  1172. return ice_create_pkt_fwd_rule(hw, f_entry);
  1173. }
  1174. cur_fltr = &m_entry->fltr_info;
  1175. status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
  1176. mutex_unlock(rule_lock);
  1177. return status;
  1178. }
  1179. /**
  1180. * ice_remove_vsi_list_rule
  1181. * @hw: pointer to the hardware structure
  1182. * @vsi_list_id: VSI list id generated as part of allocate resource
  1183. * @lkup_type: switch rule filter lookup type
  1184. *
  1185. * The VSI list should be emptied before this function is called to remove the
  1186. * VSI list.
  1187. */
  1188. static enum ice_status
  1189. ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
  1190. enum ice_sw_lkup_type lkup_type)
  1191. {
  1192. struct ice_aqc_sw_rules_elem *s_rule;
  1193. enum ice_status status;
  1194. u16 s_rule_size;
  1195. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
  1196. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1197. if (!s_rule)
  1198. return ICE_ERR_NO_MEMORY;
  1199. s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
  1200. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  1201. /* Free the vsi_list resource that we allocated. It is assumed that the
  1202. * list is empty at this point.
  1203. */
  1204. status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
  1205. ice_aqc_opc_free_res);
  1206. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1207. return status;
  1208. }
  1209. /**
  1210. * ice_rem_update_vsi_list
  1211. * @hw: pointer to the hardware structure
  1212. * @vsi_handle: VSI handle of the VSI to remove
  1213. * @fm_list: filter management entry for which the VSI list management needs to
  1214. * be done
  1215. */
  1216. static enum ice_status
  1217. ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
  1218. struct ice_fltr_mgmt_list_entry *fm_list)
  1219. {
  1220. enum ice_sw_lkup_type lkup_type;
  1221. enum ice_status status = 0;
  1222. u16 vsi_list_id;
  1223. if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
  1224. fm_list->vsi_count == 0)
  1225. return ICE_ERR_PARAM;
  1226. /* A rule with the VSI being removed does not exist */
  1227. if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
  1228. return ICE_ERR_DOES_NOT_EXIST;
  1229. lkup_type = fm_list->fltr_info.lkup_type;
  1230. vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
  1231. status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
  1232. ice_aqc_opc_update_sw_rules,
  1233. lkup_type);
  1234. if (status)
  1235. return status;
  1236. fm_list->vsi_count--;
  1237. clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
  1238. if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
  1239. (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
  1240. struct ice_vsi_list_map_info *vsi_list_info =
  1241. fm_list->vsi_list_info;
  1242. u16 rem_vsi_handle;
  1243. rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
  1244. ICE_MAX_VSI);
  1245. if (!ice_is_vsi_valid(hw, rem_vsi_handle))
  1246. return ICE_ERR_OUT_OF_RANGE;
  1247. status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
  1248. vsi_list_id, true,
  1249. ice_aqc_opc_update_sw_rules,
  1250. lkup_type);
  1251. if (status)
  1252. return status;
  1253. /* Remove the VSI list since it is no longer used */
  1254. status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
  1255. if (status)
  1256. return status;
  1257. /* Change the list entry action from VSI_LIST to VSI */
  1258. fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1259. fm_list->fltr_info.fwd_id.hw_vsi_id =
  1260. ice_get_hw_vsi_num(hw, rem_vsi_handle);
  1261. fm_list->fltr_info.vsi_handle = rem_vsi_handle;
  1262. list_del(&vsi_list_info->list_entry);
  1263. devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
  1264. fm_list->vsi_list_info = NULL;
  1265. }
  1266. return status;
  1267. }
  1268. /**
  1269. * ice_remove_rule_internal - Remove a filter rule of a given type
  1270. * @hw: pointer to the hardware structure
  1271. * @recp_id: recipe id for which the rule needs to removed
  1272. * @f_entry: rule entry containing filter information
  1273. */
  1274. static enum ice_status
  1275. ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
  1276. struct ice_fltr_list_entry *f_entry)
  1277. {
  1278. struct ice_switch_info *sw = hw->switch_info;
  1279. struct ice_fltr_mgmt_list_entry *list_elem;
  1280. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1281. enum ice_status status = 0;
  1282. bool remove_rule = false;
  1283. u16 vsi_handle;
  1284. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1285. return ICE_ERR_PARAM;
  1286. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1287. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1288. rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
  1289. mutex_lock(rule_lock);
  1290. list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
  1291. if (!list_elem) {
  1292. status = ICE_ERR_DOES_NOT_EXIST;
  1293. goto exit;
  1294. }
  1295. if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
  1296. remove_rule = true;
  1297. } else if (!list_elem->vsi_list_info) {
  1298. status = ICE_ERR_DOES_NOT_EXIST;
  1299. goto exit;
  1300. } else {
  1301. if (list_elem->vsi_list_info->ref_cnt > 1)
  1302. list_elem->vsi_list_info->ref_cnt--;
  1303. vsi_handle = f_entry->fltr_info.vsi_handle;
  1304. status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
  1305. if (status)
  1306. goto exit;
  1307. /* if vsi count goes to zero after updating the vsi list */
  1308. if (list_elem->vsi_count == 0)
  1309. remove_rule = true;
  1310. }
  1311. if (remove_rule) {
  1312. /* Remove the lookup rule */
  1313. struct ice_aqc_sw_rules_elem *s_rule;
  1314. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  1315. ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
  1316. GFP_KERNEL);
  1317. if (!s_rule) {
  1318. status = ICE_ERR_NO_MEMORY;
  1319. goto exit;
  1320. }
  1321. ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
  1322. ice_aqc_opc_remove_sw_rules);
  1323. status = ice_aq_sw_rules(hw, s_rule,
  1324. ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
  1325. ice_aqc_opc_remove_sw_rules, NULL);
  1326. if (status)
  1327. goto exit;
  1328. /* Remove a book keeping from the list */
  1329. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1330. list_del(&list_elem->list_entry);
  1331. devm_kfree(ice_hw_to_dev(hw), list_elem);
  1332. }
  1333. exit:
  1334. mutex_unlock(rule_lock);
  1335. return status;
  1336. }
  1337. /**
  1338. * ice_add_mac - Add a MAC address based filter rule
  1339. * @hw: pointer to the hardware structure
  1340. * @m_list: list of MAC addresses and forwarding information
  1341. *
  1342. * IMPORTANT: When the ucast_shared flag is set to false and m_list has
  1343. * multiple unicast addresses, the function assumes that all the
  1344. * addresses are unique in a given add_mac call. It doesn't
  1345. * check for duplicates in this case, removing duplicates from a given
  1346. * list should be taken care of in the caller of this function.
  1347. */
  1348. enum ice_status
  1349. ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
  1350. {
  1351. struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
  1352. struct ice_fltr_list_entry *m_list_itr;
  1353. struct list_head *rule_head;
  1354. u16 elem_sent, total_elem_left;
  1355. struct ice_switch_info *sw;
  1356. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1357. enum ice_status status = 0;
  1358. u16 num_unicast = 0;
  1359. u16 s_rule_size;
  1360. if (!m_list || !hw)
  1361. return ICE_ERR_PARAM;
  1362. s_rule = NULL;
  1363. sw = hw->switch_info;
  1364. rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
  1365. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1366. u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
  1367. u16 vsi_handle;
  1368. u16 hw_vsi_id;
  1369. m_list_itr->fltr_info.flag = ICE_FLTR_TX;
  1370. vsi_handle = m_list_itr->fltr_info.vsi_handle;
  1371. if (!ice_is_vsi_valid(hw, vsi_handle))
  1372. return ICE_ERR_PARAM;
  1373. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1374. m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
  1375. /* update the src in case it is vsi num */
  1376. if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
  1377. return ICE_ERR_PARAM;
  1378. m_list_itr->fltr_info.src = hw_vsi_id;
  1379. if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
  1380. is_zero_ether_addr(add))
  1381. return ICE_ERR_PARAM;
  1382. if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
  1383. /* Don't overwrite the unicast address */
  1384. mutex_lock(rule_lock);
  1385. if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
  1386. &m_list_itr->fltr_info)) {
  1387. mutex_unlock(rule_lock);
  1388. return ICE_ERR_ALREADY_EXISTS;
  1389. }
  1390. mutex_unlock(rule_lock);
  1391. num_unicast++;
  1392. } else if (is_multicast_ether_addr(add) ||
  1393. (is_unicast_ether_addr(add) && hw->ucast_shared)) {
  1394. m_list_itr->status =
  1395. ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
  1396. m_list_itr);
  1397. if (m_list_itr->status)
  1398. return m_list_itr->status;
  1399. }
  1400. }
  1401. mutex_lock(rule_lock);
  1402. /* Exit if no suitable entries were found for adding bulk switch rule */
  1403. if (!num_unicast) {
  1404. status = 0;
  1405. goto ice_add_mac_exit;
  1406. }
  1407. rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
  1408. /* Allocate switch rule buffer for the bulk update for unicast */
  1409. s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  1410. s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
  1411. GFP_KERNEL);
  1412. if (!s_rule) {
  1413. status = ICE_ERR_NO_MEMORY;
  1414. goto ice_add_mac_exit;
  1415. }
  1416. r_iter = s_rule;
  1417. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1418. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1419. u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
  1420. if (is_unicast_ether_addr(mac_addr)) {
  1421. ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
  1422. ice_aqc_opc_add_sw_rules);
  1423. r_iter = (struct ice_aqc_sw_rules_elem *)
  1424. ((u8 *)r_iter + s_rule_size);
  1425. }
  1426. }
  1427. /* Call AQ bulk switch rule update for all unicast addresses */
  1428. r_iter = s_rule;
  1429. /* Call AQ switch rule in AQ_MAX chunk */
  1430. for (total_elem_left = num_unicast; total_elem_left > 0;
  1431. total_elem_left -= elem_sent) {
  1432. struct ice_aqc_sw_rules_elem *entry = r_iter;
  1433. elem_sent = min(total_elem_left,
  1434. (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
  1435. status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
  1436. elem_sent, ice_aqc_opc_add_sw_rules,
  1437. NULL);
  1438. if (status)
  1439. goto ice_add_mac_exit;
  1440. r_iter = (struct ice_aqc_sw_rules_elem *)
  1441. ((u8 *)r_iter + (elem_sent * s_rule_size));
  1442. }
  1443. /* Fill up rule id based on the value returned from FW */
  1444. r_iter = s_rule;
  1445. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1446. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1447. u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
  1448. struct ice_fltr_mgmt_list_entry *fm_entry;
  1449. if (is_unicast_ether_addr(mac_addr)) {
  1450. f_info->fltr_rule_id =
  1451. le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
  1452. f_info->fltr_act = ICE_FWD_TO_VSI;
  1453. /* Create an entry to track this MAC address */
  1454. fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
  1455. sizeof(*fm_entry), GFP_KERNEL);
  1456. if (!fm_entry) {
  1457. status = ICE_ERR_NO_MEMORY;
  1458. goto ice_add_mac_exit;
  1459. }
  1460. fm_entry->fltr_info = *f_info;
  1461. fm_entry->vsi_count = 1;
  1462. /* The book keeping entries will get removed when
  1463. * base driver calls remove filter AQ command
  1464. */
  1465. list_add(&fm_entry->list_entry, rule_head);
  1466. r_iter = (struct ice_aqc_sw_rules_elem *)
  1467. ((u8 *)r_iter + s_rule_size);
  1468. }
  1469. }
  1470. ice_add_mac_exit:
  1471. mutex_unlock(rule_lock);
  1472. if (s_rule)
  1473. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1474. return status;
  1475. }
  1476. /**
  1477. * ice_add_vlan_internal - Add one VLAN based filter rule
  1478. * @hw: pointer to the hardware structure
  1479. * @f_entry: filter entry containing one VLAN information
  1480. */
  1481. static enum ice_status
  1482. ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  1483. {
  1484. struct ice_switch_info *sw = hw->switch_info;
  1485. struct ice_fltr_mgmt_list_entry *v_list_itr;
  1486. struct ice_fltr_info *new_fltr, *cur_fltr;
  1487. enum ice_sw_lkup_type lkup_type;
  1488. u16 vsi_list_id = 0, vsi_handle;
  1489. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1490. enum ice_status status = 0;
  1491. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1492. return ICE_ERR_PARAM;
  1493. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1494. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1495. new_fltr = &f_entry->fltr_info;
  1496. /* VLAN id should only be 12 bits */
  1497. if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
  1498. return ICE_ERR_PARAM;
  1499. if (new_fltr->src_id != ICE_SRC_ID_VSI)
  1500. return ICE_ERR_PARAM;
  1501. new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
  1502. lkup_type = new_fltr->lkup_type;
  1503. vsi_handle = new_fltr->vsi_handle;
  1504. rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
  1505. mutex_lock(rule_lock);
  1506. v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
  1507. if (!v_list_itr) {
  1508. struct ice_vsi_list_map_info *map_info = NULL;
  1509. if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
  1510. /* All VLAN pruning rules use a VSI list. Check if
  1511. * there is already a VSI list containing VSI that we
  1512. * want to add. If found, use the same vsi_list_id for
  1513. * this new VLAN rule or else create a new list.
  1514. */
  1515. map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
  1516. vsi_handle,
  1517. &vsi_list_id);
  1518. if (!map_info) {
  1519. status = ice_create_vsi_list_rule(hw,
  1520. &vsi_handle,
  1521. 1,
  1522. &vsi_list_id,
  1523. lkup_type);
  1524. if (status)
  1525. goto exit;
  1526. }
  1527. /* Convert the action to forwarding to a VSI list. */
  1528. new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1529. new_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1530. }
  1531. status = ice_create_pkt_fwd_rule(hw, f_entry);
  1532. if (!status) {
  1533. v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
  1534. new_fltr);
  1535. if (!v_list_itr) {
  1536. status = ICE_ERR_DOES_NOT_EXIST;
  1537. goto exit;
  1538. }
  1539. /* reuse VSI list for new rule and increment ref_cnt */
  1540. if (map_info) {
  1541. v_list_itr->vsi_list_info = map_info;
  1542. map_info->ref_cnt++;
  1543. } else {
  1544. v_list_itr->vsi_list_info =
  1545. ice_create_vsi_list_map(hw, &vsi_handle,
  1546. 1, vsi_list_id);
  1547. }
  1548. }
  1549. } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
  1550. /* Update existing VSI list to add new VSI id only if it used
  1551. * by one VLAN rule.
  1552. */
  1553. cur_fltr = &v_list_itr->fltr_info;
  1554. status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
  1555. new_fltr);
  1556. } else {
  1557. /* If VLAN rule exists and VSI list being used by this rule is
  1558. * referenced by more than 1 VLAN rule. Then create a new VSI
  1559. * list appending previous VSI with new VSI and update existing
  1560. * VLAN rule to point to new VSI list id
  1561. */
  1562. struct ice_fltr_info tmp_fltr;
  1563. u16 vsi_handle_arr[2];
  1564. u16 cur_handle;
  1565. /* Current implementation only supports reusing VSI list with
  1566. * one VSI count. We should never hit below condition
  1567. */
  1568. if (v_list_itr->vsi_count > 1 &&
  1569. v_list_itr->vsi_list_info->ref_cnt > 1) {
  1570. ice_debug(hw, ICE_DBG_SW,
  1571. "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
  1572. status = ICE_ERR_CFG;
  1573. goto exit;
  1574. }
  1575. cur_handle =
  1576. find_first_bit(v_list_itr->vsi_list_info->vsi_map,
  1577. ICE_MAX_VSI);
  1578. /* A rule already exists with the new VSI being added */
  1579. if (cur_handle == vsi_handle) {
  1580. status = ICE_ERR_ALREADY_EXISTS;
  1581. goto exit;
  1582. }
  1583. vsi_handle_arr[0] = cur_handle;
  1584. vsi_handle_arr[1] = vsi_handle;
  1585. status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
  1586. &vsi_list_id, lkup_type);
  1587. if (status)
  1588. goto exit;
  1589. tmp_fltr = v_list_itr->fltr_info;
  1590. tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
  1591. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  1592. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  1593. /* Update the previous switch rule to a new VSI list which
  1594. * includes current VSI thats requested
  1595. */
  1596. status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
  1597. if (status)
  1598. goto exit;
  1599. /* before overriding VSI list map info. decrement ref_cnt of
  1600. * previous VSI list
  1601. */
  1602. v_list_itr->vsi_list_info->ref_cnt--;
  1603. /* now update to newly created list */
  1604. v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
  1605. v_list_itr->vsi_list_info =
  1606. ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
  1607. vsi_list_id);
  1608. v_list_itr->vsi_count++;
  1609. }
  1610. exit:
  1611. mutex_unlock(rule_lock);
  1612. return status;
  1613. }
  1614. /**
  1615. * ice_add_vlan - Add VLAN based filter rule
  1616. * @hw: pointer to the hardware structure
  1617. * @v_list: list of VLAN entries and forwarding information
  1618. */
  1619. enum ice_status
  1620. ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
  1621. {
  1622. struct ice_fltr_list_entry *v_list_itr;
  1623. if (!v_list || !hw)
  1624. return ICE_ERR_PARAM;
  1625. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1626. if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
  1627. return ICE_ERR_PARAM;
  1628. v_list_itr->fltr_info.flag = ICE_FLTR_TX;
  1629. v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
  1630. if (v_list_itr->status)
  1631. return v_list_itr->status;
  1632. }
  1633. return 0;
  1634. }
  1635. /**
  1636. * ice_rem_sw_rule_info
  1637. * @hw: pointer to the hardware structure
  1638. * @rule_head: pointer to the switch list structure that we want to delete
  1639. */
  1640. static void
  1641. ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
  1642. {
  1643. if (!list_empty(rule_head)) {
  1644. struct ice_fltr_mgmt_list_entry *entry;
  1645. struct ice_fltr_mgmt_list_entry *tmp;
  1646. list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
  1647. list_del(&entry->list_entry);
  1648. devm_kfree(ice_hw_to_dev(hw), entry);
  1649. }
  1650. }
  1651. }
  1652. /**
  1653. * ice_cfg_dflt_vsi - change state of VSI to set/clear default
  1654. * @hw: pointer to the hardware structure
  1655. * @vsi_handle: VSI handle to set as default
  1656. * @set: true to add the above mentioned switch rule, false to remove it
  1657. * @direction: ICE_FLTR_RX or ICE_FLTR_TX
  1658. *
  1659. * add filter rule to set/unset given VSI as default VSI for the switch
  1660. * (represented by swid)
  1661. */
  1662. enum ice_status
  1663. ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
  1664. {
  1665. struct ice_aqc_sw_rules_elem *s_rule;
  1666. struct ice_fltr_info f_info;
  1667. enum ice_adminq_opc opcode;
  1668. enum ice_status status;
  1669. u16 s_rule_size;
  1670. u16 hw_vsi_id;
  1671. if (!ice_is_vsi_valid(hw, vsi_handle))
  1672. return ICE_ERR_PARAM;
  1673. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1674. s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
  1675. ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
  1676. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1677. if (!s_rule)
  1678. return ICE_ERR_NO_MEMORY;
  1679. memset(&f_info, 0, sizeof(f_info));
  1680. f_info.lkup_type = ICE_SW_LKUP_DFLT;
  1681. f_info.flag = direction;
  1682. f_info.fltr_act = ICE_FWD_TO_VSI;
  1683. f_info.fwd_id.hw_vsi_id = hw_vsi_id;
  1684. if (f_info.flag & ICE_FLTR_RX) {
  1685. f_info.src = hw->port_info->lport;
  1686. f_info.src_id = ICE_SRC_ID_LPORT;
  1687. if (!set)
  1688. f_info.fltr_rule_id =
  1689. hw->port_info->dflt_rx_vsi_rule_id;
  1690. } else if (f_info.flag & ICE_FLTR_TX) {
  1691. f_info.src_id = ICE_SRC_ID_VSI;
  1692. f_info.src = hw_vsi_id;
  1693. if (!set)
  1694. f_info.fltr_rule_id =
  1695. hw->port_info->dflt_tx_vsi_rule_id;
  1696. }
  1697. if (set)
  1698. opcode = ice_aqc_opc_add_sw_rules;
  1699. else
  1700. opcode = ice_aqc_opc_remove_sw_rules;
  1701. ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
  1702. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
  1703. if (status || !(f_info.flag & ICE_FLTR_TX_RX))
  1704. goto out;
  1705. if (set) {
  1706. u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  1707. if (f_info.flag & ICE_FLTR_TX) {
  1708. hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
  1709. hw->port_info->dflt_tx_vsi_rule_id = index;
  1710. } else if (f_info.flag & ICE_FLTR_RX) {
  1711. hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
  1712. hw->port_info->dflt_rx_vsi_rule_id = index;
  1713. }
  1714. } else {
  1715. if (f_info.flag & ICE_FLTR_TX) {
  1716. hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  1717. hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
  1718. } else if (f_info.flag & ICE_FLTR_RX) {
  1719. hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  1720. hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
  1721. }
  1722. }
  1723. out:
  1724. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1725. return status;
  1726. }
  1727. /**
  1728. * ice_remove_mac - remove a MAC address based filter rule
  1729. * @hw: pointer to the hardware structure
  1730. * @m_list: list of MAC addresses and forwarding information
  1731. *
  1732. * This function removes either a MAC filter rule or a specific VSI from a
  1733. * VSI list for a multicast MAC address.
  1734. *
  1735. * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
  1736. * ice_add_mac. Caller should be aware that this call will only work if all
  1737. * the entries passed into m_list were added previously. It will not attempt to
  1738. * do a partial remove of entries that were found.
  1739. */
  1740. enum ice_status
  1741. ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
  1742. {
  1743. struct ice_fltr_list_entry *list_itr;
  1744. if (!m_list)
  1745. return ICE_ERR_PARAM;
  1746. list_for_each_entry(list_itr, m_list, list_entry) {
  1747. enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
  1748. if (l_type != ICE_SW_LKUP_MAC)
  1749. return ICE_ERR_PARAM;
  1750. list_itr->status = ice_remove_rule_internal(hw,
  1751. ICE_SW_LKUP_MAC,
  1752. list_itr);
  1753. if (list_itr->status)
  1754. return list_itr->status;
  1755. }
  1756. return 0;
  1757. }
  1758. /**
  1759. * ice_remove_vlan - Remove VLAN based filter rule
  1760. * @hw: pointer to the hardware structure
  1761. * @v_list: list of VLAN entries and forwarding information
  1762. */
  1763. enum ice_status
  1764. ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
  1765. {
  1766. struct ice_fltr_list_entry *v_list_itr;
  1767. if (!v_list || !hw)
  1768. return ICE_ERR_PARAM;
  1769. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1770. enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
  1771. if (l_type != ICE_SW_LKUP_VLAN)
  1772. return ICE_ERR_PARAM;
  1773. v_list_itr->status = ice_remove_rule_internal(hw,
  1774. ICE_SW_LKUP_VLAN,
  1775. v_list_itr);
  1776. if (v_list_itr->status)
  1777. return v_list_itr->status;
  1778. }
  1779. return 0;
  1780. }
  1781. /**
  1782. * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
  1783. * @fm_entry: filter entry to inspect
  1784. * @vsi_handle: VSI handle to compare with filter info
  1785. */
  1786. static bool
  1787. ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
  1788. {
  1789. return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
  1790. fm_entry->fltr_info.vsi_handle == vsi_handle) ||
  1791. (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
  1792. (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
  1793. }
  1794. /**
  1795. * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
  1796. * @hw: pointer to the hardware structure
  1797. * @vsi_handle: VSI handle to remove filters from
  1798. * @vsi_list_head: pointer to the list to add entry to
  1799. * @fi: pointer to fltr_info of filter entry to copy & add
  1800. *
  1801. * Helper function, used when creating a list of filters to remove from
  1802. * a specific VSI. The entry added to vsi_list_head is a COPY of the
  1803. * original filter entry, with the exception of fltr_info.fltr_act and
  1804. * fltr_info.fwd_id fields. These are set such that later logic can
  1805. * extract which VSI to remove the fltr from, and pass on that information.
  1806. */
  1807. static enum ice_status
  1808. ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
  1809. struct list_head *vsi_list_head,
  1810. struct ice_fltr_info *fi)
  1811. {
  1812. struct ice_fltr_list_entry *tmp;
  1813. /* this memory is freed up in the caller function
  1814. * once filters for this VSI are removed
  1815. */
  1816. tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
  1817. if (!tmp)
  1818. return ICE_ERR_NO_MEMORY;
  1819. tmp->fltr_info = *fi;
  1820. /* Overwrite these fields to indicate which VSI to remove filter from,
  1821. * so find and remove logic can extract the information from the
  1822. * list entries. Note that original entries will still have proper
  1823. * values.
  1824. */
  1825. tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1826. tmp->fltr_info.vsi_handle = vsi_handle;
  1827. tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1828. list_add(&tmp->list_entry, vsi_list_head);
  1829. return 0;
  1830. }
  1831. /**
  1832. * ice_add_to_vsi_fltr_list - Add VSI filters to the list
  1833. * @hw: pointer to the hardware structure
  1834. * @vsi_handle: VSI handle to remove filters from
  1835. * @lkup_list_head: pointer to the list that has certain lookup type filters
  1836. * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
  1837. *
  1838. * Locates all filters in lkup_list_head that are used by the given VSI,
  1839. * and adds COPIES of those entries to vsi_list_head (intended to be used
  1840. * to remove the listed filters).
  1841. * Note that this means all entries in vsi_list_head must be explicitly
  1842. * deallocated by the caller when done with list.
  1843. */
  1844. static enum ice_status
  1845. ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
  1846. struct list_head *lkup_list_head,
  1847. struct list_head *vsi_list_head)
  1848. {
  1849. struct ice_fltr_mgmt_list_entry *fm_entry;
  1850. enum ice_status status = 0;
  1851. /* check to make sure VSI id is valid and within boundary */
  1852. if (!ice_is_vsi_valid(hw, vsi_handle))
  1853. return ICE_ERR_PARAM;
  1854. list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
  1855. struct ice_fltr_info *fi;
  1856. fi = &fm_entry->fltr_info;
  1857. if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
  1858. continue;
  1859. status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
  1860. vsi_list_head, fi);
  1861. if (status)
  1862. return status;
  1863. }
  1864. return status;
  1865. }
  1866. /**
  1867. * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
  1868. * @hw: pointer to the hardware structure
  1869. * @vsi_handle: VSI handle to remove filters from
  1870. * @lkup: switch rule filter lookup type
  1871. */
  1872. static void
  1873. ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
  1874. enum ice_sw_lkup_type lkup)
  1875. {
  1876. struct ice_switch_info *sw = hw->switch_info;
  1877. struct ice_fltr_list_entry *fm_entry;
  1878. struct list_head remove_list_head;
  1879. struct list_head *rule_head;
  1880. struct ice_fltr_list_entry *tmp;
  1881. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1882. enum ice_status status;
  1883. INIT_LIST_HEAD(&remove_list_head);
  1884. rule_lock = &sw->recp_list[lkup].filt_rule_lock;
  1885. rule_head = &sw->recp_list[lkup].filt_rules;
  1886. mutex_lock(rule_lock);
  1887. status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
  1888. &remove_list_head);
  1889. mutex_unlock(rule_lock);
  1890. if (status)
  1891. return;
  1892. switch (lkup) {
  1893. case ICE_SW_LKUP_MAC:
  1894. ice_remove_mac(hw, &remove_list_head);
  1895. break;
  1896. case ICE_SW_LKUP_VLAN:
  1897. ice_remove_vlan(hw, &remove_list_head);
  1898. break;
  1899. case ICE_SW_LKUP_MAC_VLAN:
  1900. case ICE_SW_LKUP_ETHERTYPE:
  1901. case ICE_SW_LKUP_ETHERTYPE_MAC:
  1902. case ICE_SW_LKUP_PROMISC:
  1903. case ICE_SW_LKUP_DFLT:
  1904. case ICE_SW_LKUP_PROMISC_VLAN:
  1905. case ICE_SW_LKUP_LAST:
  1906. default:
  1907. ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
  1908. break;
  1909. }
  1910. list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
  1911. list_del(&fm_entry->list_entry);
  1912. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  1913. }
  1914. }
  1915. /**
  1916. * ice_remove_vsi_fltr - Remove all filters for a VSI
  1917. * @hw: pointer to the hardware structure
  1918. * @vsi_handle: VSI handle to remove filters from
  1919. */
  1920. void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
  1921. {
  1922. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
  1923. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
  1924. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
  1925. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
  1926. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
  1927. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
  1928. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
  1929. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
  1930. }
  1931. /**
  1932. * ice_replay_vsi_fltr - Replay filters for requested VSI
  1933. * @hw: pointer to the hardware structure
  1934. * @vsi_handle: driver VSI handle
  1935. * @recp_id: Recipe id for which rules need to be replayed
  1936. * @list_head: list for which filters need to be replayed
  1937. *
  1938. * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
  1939. * It is required to pass valid VSI handle.
  1940. */
  1941. static enum ice_status
  1942. ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
  1943. struct list_head *list_head)
  1944. {
  1945. struct ice_fltr_mgmt_list_entry *itr;
  1946. enum ice_status status = 0;
  1947. u16 hw_vsi_id;
  1948. if (list_empty(list_head))
  1949. return status;
  1950. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1951. list_for_each_entry(itr, list_head, list_entry) {
  1952. struct ice_fltr_list_entry f_entry;
  1953. f_entry.fltr_info = itr->fltr_info;
  1954. if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
  1955. itr->fltr_info.vsi_handle == vsi_handle) {
  1956. /* update the src in case it is vsi num */
  1957. if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
  1958. f_entry.fltr_info.src = hw_vsi_id;
  1959. status = ice_add_rule_internal(hw, recp_id, &f_entry);
  1960. if (status)
  1961. goto end;
  1962. continue;
  1963. }
  1964. if (!test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
  1965. continue;
  1966. /* Clearing it so that the logic can add it back */
  1967. clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
  1968. f_entry.fltr_info.vsi_handle = vsi_handle;
  1969. f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1970. /* update the src in case it is vsi num */
  1971. if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
  1972. f_entry.fltr_info.src = hw_vsi_id;
  1973. if (recp_id == ICE_SW_LKUP_VLAN)
  1974. status = ice_add_vlan_internal(hw, &f_entry);
  1975. else
  1976. status = ice_add_rule_internal(hw, recp_id, &f_entry);
  1977. if (status)
  1978. goto end;
  1979. }
  1980. end:
  1981. return status;
  1982. }
  1983. /**
  1984. * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
  1985. * @hw: pointer to the hardware structure
  1986. * @vsi_handle: driver VSI handle
  1987. *
  1988. * Replays filters for requested VSI via vsi_handle.
  1989. */
  1990. enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
  1991. {
  1992. struct ice_switch_info *sw = hw->switch_info;
  1993. enum ice_status status = 0;
  1994. u8 i;
  1995. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  1996. struct list_head *head;
  1997. head = &sw->recp_list[i].filt_replay_rules;
  1998. status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
  1999. if (status)
  2000. return status;
  2001. }
  2002. return status;
  2003. }
  2004. /**
  2005. * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
  2006. * @hw: pointer to the hw struct
  2007. *
  2008. * Deletes the filter replay rules.
  2009. */
  2010. void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
  2011. {
  2012. struct ice_switch_info *sw = hw->switch_info;
  2013. u8 i;
  2014. if (!sw)
  2015. return;
  2016. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  2017. if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
  2018. struct list_head *l_head;
  2019. l_head = &sw->recp_list[i].filt_replay_rules;
  2020. ice_rem_sw_rule_info(hw, l_head);
  2021. }
  2022. }
  2023. }