ice_switch.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_switch.h"
  4. #define ICE_ETH_DA_OFFSET 0
  5. #define ICE_ETH_ETHTYPE_OFFSET 12
  6. #define ICE_ETH_VLAN_TCI_OFFSET 14
  7. #define ICE_MAX_VLAN_ID 0xFFF
  8. /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
  9. * struct to configure any switch filter rules.
  10. * {DA (6 bytes), SA(6 bytes),
  11. * Ether type (2 bytes for header without VLAN tag) OR
  12. * VLAN tag (4 bytes for header with VLAN tag) }
  13. *
  14. * Word on Hardcoded values
  15. * byte 0 = 0x2: to identify it as locally administered DA MAC
  16. * byte 6 = 0x2: to identify it as locally administered SA MAC
  17. * byte 12 = 0x81 & byte 13 = 0x00:
  18. * In case of VLAN filter first two bytes defines ether type (0x8100)
  19. * and remaining two bytes are placeholder for programming a given VLAN id
  20. * In case of Ether type filter it is treated as header without VLAN tag
  21. * and byte 12 and 13 is used to program a given Ether type instead
  22. */
  23. #define DUMMY_ETH_HDR_LEN 16
  24. static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
  25. 0x2, 0, 0, 0, 0, 0,
  26. 0x81, 0, 0, 0};
  27. #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
  28. (sizeof(struct ice_aqc_sw_rules_elem) - \
  29. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  30. sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
  31. #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
  32. (sizeof(struct ice_aqc_sw_rules_elem) - \
  33. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  34. sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
  35. #define ICE_SW_RULE_LG_ACT_SIZE(n) \
  36. (sizeof(struct ice_aqc_sw_rules_elem) - \
  37. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  38. sizeof(struct ice_sw_rule_lg_act) - \
  39. sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
  40. ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
  41. #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
  42. (sizeof(struct ice_aqc_sw_rules_elem) - \
  43. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  44. sizeof(struct ice_sw_rule_vsi_list) - \
  45. sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
  46. ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
  47. /**
  48. * ice_aq_alloc_free_res - command to allocate/free resources
  49. * @hw: pointer to the hw struct
  50. * @num_entries: number of resource entries in buffer
  51. * @buf: Indirect buffer to hold data parameters and response
  52. * @buf_size: size of buffer for indirect commands
  53. * @opc: pass in the command opcode
  54. * @cd: pointer to command details structure or NULL
  55. *
  56. * Helper function to allocate/free resources using the admin queue commands
  57. */
  58. static enum ice_status
  59. ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
  60. struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
  61. enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  62. {
  63. struct ice_aqc_alloc_free_res_cmd *cmd;
  64. struct ice_aq_desc desc;
  65. cmd = &desc.params.sw_res_ctrl;
  66. if (!buf)
  67. return ICE_ERR_PARAM;
  68. if (buf_size < (num_entries * sizeof(buf->elem[0])))
  69. return ICE_ERR_PARAM;
  70. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  71. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  72. cmd->num_entries = cpu_to_le16(num_entries);
  73. return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  74. }
  75. /**
  76. * ice_init_def_sw_recp - initialize the recipe book keeping tables
  77. * @hw: pointer to the hw struct
  78. *
  79. * Allocate memory for the entire recipe table and initialize the structures/
  80. * entries corresponding to basic recipes.
  81. */
  82. enum ice_status
  83. ice_init_def_sw_recp(struct ice_hw *hw)
  84. {
  85. struct ice_sw_recipe *recps;
  86. u8 i;
  87. recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
  88. sizeof(struct ice_sw_recipe), GFP_KERNEL);
  89. if (!recps)
  90. return ICE_ERR_NO_MEMORY;
  91. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  92. recps[i].root_rid = i;
  93. INIT_LIST_HEAD(&recps[i].filt_rules);
  94. mutex_init(&recps[i].filt_rule_lock);
  95. }
  96. hw->switch_info->recp_list = recps;
  97. return 0;
  98. }
  99. /**
  100. * ice_aq_get_sw_cfg - get switch configuration
  101. * @hw: pointer to the hardware structure
  102. * @buf: pointer to the result buffer
  103. * @buf_size: length of the buffer available for response
  104. * @req_desc: pointer to requested descriptor
  105. * @num_elems: pointer to number of elements
  106. * @cd: pointer to command details structure or NULL
  107. *
  108. * Get switch configuration (0x0200) to be placed in 'buff'.
  109. * This admin command returns information such as initial VSI/port number
  110. * and switch ID it belongs to.
  111. *
  112. * NOTE: *req_desc is both an input/output parameter.
  113. * The caller of this function first calls this function with *request_desc set
  114. * to 0. If the response from f/w has *req_desc set to 0, all the switch
  115. * configuration information has been returned; if non-zero (meaning not all
  116. * the information was returned), the caller should call this function again
  117. * with *req_desc set to the previous value returned by f/w to get the
  118. * next block of switch configuration information.
  119. *
  120. * *num_elems is output only parameter. This reflects the number of elements
  121. * in response buffer. The caller of this function to use *num_elems while
  122. * parsing the response buffer.
  123. */
  124. static enum ice_status
  125. ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
  126. u16 buf_size, u16 *req_desc, u16 *num_elems,
  127. struct ice_sq_cd *cd)
  128. {
  129. struct ice_aqc_get_sw_cfg *cmd;
  130. enum ice_status status;
  131. struct ice_aq_desc desc;
  132. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
  133. cmd = &desc.params.get_sw_conf;
  134. cmd->element = cpu_to_le16(*req_desc);
  135. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  136. if (!status) {
  137. *req_desc = le16_to_cpu(cmd->element);
  138. *num_elems = le16_to_cpu(cmd->num_elems);
  139. }
  140. return status;
  141. }
  142. /**
  143. * ice_aq_add_vsi
  144. * @hw: pointer to the hw struct
  145. * @vsi_ctx: pointer to a VSI context struct
  146. * @cd: pointer to command details structure or NULL
  147. *
  148. * Add a VSI context to the hardware (0x0210)
  149. */
  150. static enum ice_status
  151. ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  152. struct ice_sq_cd *cd)
  153. {
  154. struct ice_aqc_add_update_free_vsi_resp *res;
  155. struct ice_aqc_add_get_update_free_vsi *cmd;
  156. struct ice_aq_desc desc;
  157. enum ice_status status;
  158. cmd = &desc.params.vsi_cmd;
  159. res = &desc.params.add_update_free_vsi_res;
  160. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
  161. if (!vsi_ctx->alloc_from_pool)
  162. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
  163. ICE_AQ_VSI_IS_VALID);
  164. cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
  165. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  166. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  167. sizeof(vsi_ctx->info), cd);
  168. if (!status) {
  169. vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
  170. vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
  171. vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
  172. }
  173. return status;
  174. }
  175. /**
  176. * ice_aq_free_vsi
  177. * @hw: pointer to the hw struct
  178. * @vsi_ctx: pointer to a VSI context struct
  179. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  180. * @cd: pointer to command details structure or NULL
  181. *
  182. * Free VSI context info from hardware (0x0213)
  183. */
  184. static enum ice_status
  185. ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  186. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  187. {
  188. struct ice_aqc_add_update_free_vsi_resp *resp;
  189. struct ice_aqc_add_get_update_free_vsi *cmd;
  190. struct ice_aq_desc desc;
  191. enum ice_status status;
  192. cmd = &desc.params.vsi_cmd;
  193. resp = &desc.params.add_update_free_vsi_res;
  194. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
  195. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  196. if (keep_vsi_alloc)
  197. cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
  198. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  199. if (!status) {
  200. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  201. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  202. }
  203. return status;
  204. }
  205. /**
  206. * ice_aq_update_vsi
  207. * @hw: pointer to the hw struct
  208. * @vsi_ctx: pointer to a VSI context struct
  209. * @cd: pointer to command details structure or NULL
  210. *
  211. * Update VSI context in the hardware (0x0211)
  212. */
  213. enum ice_status
  214. ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  215. struct ice_sq_cd *cd)
  216. {
  217. struct ice_aqc_add_update_free_vsi_resp *resp;
  218. struct ice_aqc_add_get_update_free_vsi *cmd;
  219. struct ice_aq_desc desc;
  220. enum ice_status status;
  221. cmd = &desc.params.vsi_cmd;
  222. resp = &desc.params.add_update_free_vsi_res;
  223. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
  224. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  225. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  226. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  227. sizeof(vsi_ctx->info), cd);
  228. if (!status) {
  229. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  230. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  231. }
  232. return status;
  233. }
  234. /**
  235. * ice_update_fltr_vsi_map - update given filter VSI map
  236. * @list_head: list for which filters needs to be updated
  237. * @list_lock: filter lock which needs to be updated
  238. * @old_vsi_num: old VSI HW id
  239. * @new_vsi_num: new VSI HW id
  240. *
  241. * update the VSI map for a given filter list
  242. */
  243. static void
  244. ice_update_fltr_vsi_map(struct list_head *list_head,
  245. struct mutex *list_lock, u16 old_vsi_num,
  246. u16 new_vsi_num)
  247. {
  248. struct ice_fltr_mgmt_list_entry *itr;
  249. mutex_lock(list_lock);
  250. if (list_empty(list_head))
  251. goto exit_update_map;
  252. list_for_each_entry(itr, list_head, list_entry) {
  253. if (itr->vsi_list_info &&
  254. test_bit(old_vsi_num, itr->vsi_list_info->vsi_map)) {
  255. clear_bit(old_vsi_num, itr->vsi_list_info->vsi_map);
  256. set_bit(new_vsi_num, itr->vsi_list_info->vsi_map);
  257. } else if (itr->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
  258. itr->fltr_info.fwd_id.vsi_id == old_vsi_num) {
  259. itr->fltr_info.fwd_id.vsi_id = new_vsi_num;
  260. itr->fltr_info.src = new_vsi_num;
  261. }
  262. }
  263. exit_update_map:
  264. mutex_unlock(list_lock);
  265. }
  266. /**
  267. * ice_update_all_fltr_vsi_map - update all filters VSI map
  268. * @hw: pointer to the hardware structure
  269. * @old_vsi_num: old VSI HW id
  270. * @new_vsi_num: new VSI HW id
  271. *
  272. * update all filters VSI map
  273. */
  274. static void
  275. ice_update_all_fltr_vsi_map(struct ice_hw *hw, u16 old_vsi_num, u16 new_vsi_num)
  276. {
  277. struct ice_switch_info *sw = hw->switch_info;
  278. u8 i;
  279. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  280. struct list_head *head = &sw->recp_list[i].filt_rules;
  281. struct mutex *lock; /* Lock to protect filter rule list */
  282. lock = &sw->recp_list[i].filt_rule_lock;
  283. ice_update_fltr_vsi_map(head, lock, old_vsi_num,
  284. new_vsi_num);
  285. }
  286. }
  287. /**
  288. * ice_is_vsi_valid - check whether the VSI is valid or not
  289. * @hw: pointer to the hw struct
  290. * @vsi_handle: VSI handle
  291. *
  292. * check whether the VSI is valid or not
  293. */
  294. static bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
  295. {
  296. return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
  297. }
  298. /**
  299. * ice_get_hw_vsi_num - return the hw VSI number
  300. * @hw: pointer to the hw struct
  301. * @vsi_handle: VSI handle
  302. *
  303. * return the hw VSI number
  304. * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
  305. */
  306. static u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
  307. {
  308. return hw->vsi_ctx[vsi_handle]->vsi_num;
  309. }
  310. /**
  311. * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
  312. * @hw: pointer to the hw struct
  313. * @vsi_handle: VSI handle
  314. *
  315. * return the VSI context entry for a given VSI handle
  316. */
  317. static struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
  318. {
  319. return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
  320. }
  321. /**
  322. * ice_save_vsi_ctx - save the VSI context for a given VSI handle
  323. * @hw: pointer to the hw struct
  324. * @vsi_handle: VSI handle
  325. * @vsi: VSI context pointer
  326. *
  327. * save the VSI context entry for a given VSI handle
  328. */
  329. static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
  330. struct ice_vsi_ctx *vsi)
  331. {
  332. hw->vsi_ctx[vsi_handle] = vsi;
  333. }
  334. /**
  335. * ice_clear_vsi_ctx - clear the VSI context entry
  336. * @hw: pointer to the hw struct
  337. * @vsi_handle: VSI handle
  338. *
  339. * clear the VSI context entry
  340. */
  341. static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
  342. {
  343. struct ice_vsi_ctx *vsi;
  344. vsi = ice_get_vsi_ctx(hw, vsi_handle);
  345. if (vsi) {
  346. devm_kfree(ice_hw_to_dev(hw), vsi);
  347. hw->vsi_ctx[vsi_handle] = NULL;
  348. }
  349. }
  350. /**
  351. * ice_add_vsi - add VSI context to the hardware and VSI handle list
  352. * @hw: pointer to the hw struct
  353. * @vsi_handle: unique VSI handle provided by drivers
  354. * @vsi_ctx: pointer to a VSI context struct
  355. * @cd: pointer to command details structure or NULL
  356. *
  357. * Add a VSI context to the hardware also add it into the VSI handle list.
  358. * If this function gets called after reset for existing VSIs then update
  359. * with the new HW VSI number in the corresponding VSI handle list entry.
  360. */
  361. enum ice_status
  362. ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  363. struct ice_sq_cd *cd)
  364. {
  365. struct ice_vsi_ctx *tmp_vsi_ctx;
  366. enum ice_status status;
  367. if (vsi_handle >= ICE_MAX_VSI)
  368. return ICE_ERR_PARAM;
  369. status = ice_aq_add_vsi(hw, vsi_ctx, cd);
  370. if (status)
  371. return status;
  372. tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
  373. if (!tmp_vsi_ctx) {
  374. /* Create a new vsi context */
  375. tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
  376. sizeof(*tmp_vsi_ctx), GFP_KERNEL);
  377. if (!tmp_vsi_ctx) {
  378. ice_aq_free_vsi(hw, vsi_ctx, false, cd);
  379. return ICE_ERR_NO_MEMORY;
  380. }
  381. *tmp_vsi_ctx = *vsi_ctx;
  382. ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
  383. } else {
  384. /* update with new HW VSI num */
  385. if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num) {
  386. /* update all filter lists with new HW VSI num */
  387. ice_update_all_fltr_vsi_map(hw, tmp_vsi_ctx->vsi_num,
  388. vsi_ctx->vsi_num);
  389. tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
  390. }
  391. }
  392. return status;
  393. }
  394. /**
  395. * ice_free_vsi- free VSI context from hardware and VSI handle list
  396. * @hw: pointer to the hw struct
  397. * @vsi_handle: unique VSI handle
  398. * @vsi_ctx: pointer to a VSI context struct
  399. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  400. * @cd: pointer to command details structure or NULL
  401. *
  402. * Free VSI context info from hardware as well as from VSI handle list
  403. */
  404. enum ice_status
  405. ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  406. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  407. {
  408. enum ice_status status;
  409. if (!ice_is_vsi_valid(hw, vsi_handle))
  410. return ICE_ERR_PARAM;
  411. vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
  412. status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
  413. if (!status)
  414. ice_clear_vsi_ctx(hw, vsi_handle);
  415. return status;
  416. }
  417. /**
  418. * ice_aq_alloc_free_vsi_list
  419. * @hw: pointer to the hw struct
  420. * @vsi_list_id: VSI list id returned or used for lookup
  421. * @lkup_type: switch rule filter lookup type
  422. * @opc: switch rules population command type - pass in the command opcode
  423. *
  424. * allocates or free a VSI list resource
  425. */
  426. static enum ice_status
  427. ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
  428. enum ice_sw_lkup_type lkup_type,
  429. enum ice_adminq_opc opc)
  430. {
  431. struct ice_aqc_alloc_free_res_elem *sw_buf;
  432. struct ice_aqc_res_elem *vsi_ele;
  433. enum ice_status status;
  434. u16 buf_len;
  435. buf_len = sizeof(*sw_buf);
  436. sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
  437. if (!sw_buf)
  438. return ICE_ERR_NO_MEMORY;
  439. sw_buf->num_elems = cpu_to_le16(1);
  440. if (lkup_type == ICE_SW_LKUP_MAC ||
  441. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  442. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  443. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  444. lkup_type == ICE_SW_LKUP_PROMISC ||
  445. lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
  446. sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
  447. } else if (lkup_type == ICE_SW_LKUP_VLAN) {
  448. sw_buf->res_type =
  449. cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
  450. } else {
  451. status = ICE_ERR_PARAM;
  452. goto ice_aq_alloc_free_vsi_list_exit;
  453. }
  454. if (opc == ice_aqc_opc_free_res)
  455. sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
  456. status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
  457. if (status)
  458. goto ice_aq_alloc_free_vsi_list_exit;
  459. if (opc == ice_aqc_opc_alloc_res) {
  460. vsi_ele = &sw_buf->elem[0];
  461. *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
  462. }
  463. ice_aq_alloc_free_vsi_list_exit:
  464. devm_kfree(ice_hw_to_dev(hw), sw_buf);
  465. return status;
  466. }
  467. /**
  468. * ice_aq_sw_rules - add/update/remove switch rules
  469. * @hw: pointer to the hw struct
  470. * @rule_list: pointer to switch rule population list
  471. * @rule_list_sz: total size of the rule list in bytes
  472. * @num_rules: number of switch rules in the rule_list
  473. * @opc: switch rules population command type - pass in the command opcode
  474. * @cd: pointer to command details structure or NULL
  475. *
  476. * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
  477. */
  478. static enum ice_status
  479. ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
  480. u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  481. {
  482. struct ice_aq_desc desc;
  483. if (opc != ice_aqc_opc_add_sw_rules &&
  484. opc != ice_aqc_opc_update_sw_rules &&
  485. opc != ice_aqc_opc_remove_sw_rules)
  486. return ICE_ERR_PARAM;
  487. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  488. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  489. desc.params.sw_rules.num_rules_fltr_entry_index =
  490. cpu_to_le16(num_rules);
  491. return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
  492. }
  493. /* ice_init_port_info - Initialize port_info with switch configuration data
  494. * @pi: pointer to port_info
  495. * @vsi_port_num: VSI number or port number
  496. * @type: Type of switch element (port or VSI)
  497. * @swid: switch ID of the switch the element is attached to
  498. * @pf_vf_num: PF or VF number
  499. * @is_vf: true if the element is a VF, false otherwise
  500. */
  501. static void
  502. ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
  503. u16 swid, u16 pf_vf_num, bool is_vf)
  504. {
  505. switch (type) {
  506. case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
  507. pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
  508. pi->sw_id = swid;
  509. pi->pf_vf_num = pf_vf_num;
  510. pi->is_vf = is_vf;
  511. pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  512. pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  513. break;
  514. default:
  515. ice_debug(pi->hw, ICE_DBG_SW,
  516. "incorrect VSI/port type received\n");
  517. break;
  518. }
  519. }
  520. /* ice_get_initial_sw_cfg - Get initial port and default VSI data
  521. * @hw: pointer to the hardware structure
  522. */
  523. enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
  524. {
  525. struct ice_aqc_get_sw_cfg_resp *rbuf;
  526. enum ice_status status;
  527. u16 req_desc = 0;
  528. u16 num_elems;
  529. u16 i;
  530. rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
  531. GFP_KERNEL);
  532. if (!rbuf)
  533. return ICE_ERR_NO_MEMORY;
  534. /* Multiple calls to ice_aq_get_sw_cfg may be required
  535. * to get all the switch configuration information. The need
  536. * for additional calls is indicated by ice_aq_get_sw_cfg
  537. * writing a non-zero value in req_desc
  538. */
  539. do {
  540. status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
  541. &req_desc, &num_elems, NULL);
  542. if (status)
  543. break;
  544. for (i = 0; i < num_elems; i++) {
  545. struct ice_aqc_get_sw_cfg_resp_elem *ele;
  546. u16 pf_vf_num, swid, vsi_port_num;
  547. bool is_vf = false;
  548. u8 type;
  549. ele = rbuf[i].elements;
  550. vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
  551. ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
  552. pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
  553. ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
  554. swid = le16_to_cpu(ele->swid);
  555. if (le16_to_cpu(ele->pf_vf_num) &
  556. ICE_AQC_GET_SW_CONF_RESP_IS_VF)
  557. is_vf = true;
  558. type = le16_to_cpu(ele->vsi_port_num) >>
  559. ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
  560. if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
  561. /* FW VSI is not needed. Just continue. */
  562. continue;
  563. }
  564. ice_init_port_info(hw->port_info, vsi_port_num,
  565. type, swid, pf_vf_num, is_vf);
  566. }
  567. } while (req_desc && !status);
  568. devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
  569. return status;
  570. }
  571. /**
  572. * ice_fill_sw_info - Helper function to populate lb_en and lan_en
  573. * @hw: pointer to the hardware structure
  574. * @f_info: filter info structure to fill/update
  575. *
  576. * This helper function populates the lb_en and lan_en elements of the provided
  577. * ice_fltr_info struct using the switch's type and characteristics of the
  578. * switch rule being configured.
  579. */
  580. static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
  581. {
  582. f_info->lb_en = false;
  583. f_info->lan_en = false;
  584. if ((f_info->flag & ICE_FLTR_TX) &&
  585. (f_info->fltr_act == ICE_FWD_TO_VSI ||
  586. f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
  587. f_info->fltr_act == ICE_FWD_TO_Q ||
  588. f_info->fltr_act == ICE_FWD_TO_QGRP)) {
  589. f_info->lb_en = true;
  590. if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
  591. is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
  592. f_info->lan_en = true;
  593. }
  594. }
  595. /**
  596. * ice_fill_sw_rule - Helper function to fill switch rule structure
  597. * @hw: pointer to the hardware structure
  598. * @f_info: entry containing packet forwarding information
  599. * @s_rule: switch rule structure to be filled in based on mac_entry
  600. * @opc: switch rules population command type - pass in the command opcode
  601. */
  602. static void
  603. ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
  604. struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
  605. {
  606. u16 vlan_id = ICE_MAX_VLAN_ID + 1;
  607. void *daddr = NULL;
  608. u16 eth_hdr_sz;
  609. u8 *eth_hdr;
  610. u32 act = 0;
  611. __be16 *off;
  612. if (opc == ice_aqc_opc_remove_sw_rules) {
  613. s_rule->pdata.lkup_tx_rx.act = 0;
  614. s_rule->pdata.lkup_tx_rx.index =
  615. cpu_to_le16(f_info->fltr_rule_id);
  616. s_rule->pdata.lkup_tx_rx.hdr_len = 0;
  617. return;
  618. }
  619. eth_hdr_sz = sizeof(dummy_eth_header);
  620. eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
  621. /* initialize the ether header with a dummy header */
  622. memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
  623. ice_fill_sw_info(hw, f_info);
  624. switch (f_info->fltr_act) {
  625. case ICE_FWD_TO_VSI:
  626. act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
  627. ICE_SINGLE_ACT_VSI_ID_M;
  628. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  629. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  630. ICE_SINGLE_ACT_VALID_BIT;
  631. break;
  632. case ICE_FWD_TO_VSI_LIST:
  633. act |= ICE_SINGLE_ACT_VSI_LIST;
  634. act |= (f_info->fwd_id.vsi_list_id <<
  635. ICE_SINGLE_ACT_VSI_LIST_ID_S) &
  636. ICE_SINGLE_ACT_VSI_LIST_ID_M;
  637. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  638. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  639. ICE_SINGLE_ACT_VALID_BIT;
  640. break;
  641. case ICE_FWD_TO_Q:
  642. act |= ICE_SINGLE_ACT_TO_Q;
  643. act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
  644. ICE_SINGLE_ACT_Q_INDEX_M;
  645. break;
  646. case ICE_FWD_TO_QGRP:
  647. act |= ICE_SINGLE_ACT_TO_Q;
  648. act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
  649. ICE_SINGLE_ACT_Q_REGION_M;
  650. break;
  651. case ICE_DROP_PACKET:
  652. act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
  653. break;
  654. default:
  655. return;
  656. }
  657. if (f_info->lb_en)
  658. act |= ICE_SINGLE_ACT_LB_ENABLE;
  659. if (f_info->lan_en)
  660. act |= ICE_SINGLE_ACT_LAN_ENABLE;
  661. switch (f_info->lkup_type) {
  662. case ICE_SW_LKUP_MAC:
  663. daddr = f_info->l_data.mac.mac_addr;
  664. break;
  665. case ICE_SW_LKUP_VLAN:
  666. vlan_id = f_info->l_data.vlan.vlan_id;
  667. if (f_info->fltr_act == ICE_FWD_TO_VSI ||
  668. f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
  669. act |= ICE_SINGLE_ACT_PRUNE;
  670. act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
  671. }
  672. break;
  673. case ICE_SW_LKUP_ETHERTYPE_MAC:
  674. daddr = f_info->l_data.ethertype_mac.mac_addr;
  675. /* fall-through */
  676. case ICE_SW_LKUP_ETHERTYPE:
  677. off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
  678. *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
  679. break;
  680. case ICE_SW_LKUP_MAC_VLAN:
  681. daddr = f_info->l_data.mac_vlan.mac_addr;
  682. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  683. break;
  684. case ICE_SW_LKUP_PROMISC_VLAN:
  685. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  686. /* fall-through */
  687. case ICE_SW_LKUP_PROMISC:
  688. daddr = f_info->l_data.mac_vlan.mac_addr;
  689. break;
  690. default:
  691. break;
  692. }
  693. s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
  694. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
  695. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
  696. /* Recipe set depending on lookup type */
  697. s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
  698. s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
  699. s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
  700. if (daddr)
  701. ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
  702. if (!(vlan_id > ICE_MAX_VLAN_ID)) {
  703. off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
  704. *off = cpu_to_be16(vlan_id);
  705. }
  706. /* Create the switch rule with the final dummy Ethernet header */
  707. if (opc != ice_aqc_opc_update_sw_rules)
  708. s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
  709. }
  710. /**
  711. * ice_add_marker_act
  712. * @hw: pointer to the hardware structure
  713. * @m_ent: the management entry for which sw marker needs to be added
  714. * @sw_marker: sw marker to tag the Rx descriptor with
  715. * @l_id: large action resource id
  716. *
  717. * Create a large action to hold software marker and update the switch rule
  718. * entry pointed by m_ent with newly created large action
  719. */
  720. static enum ice_status
  721. ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
  722. u16 sw_marker, u16 l_id)
  723. {
  724. struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
  725. /* For software marker we need 3 large actions
  726. * 1. FWD action: FWD TO VSI or VSI LIST
  727. * 2. GENERIC VALUE action to hold the profile id
  728. * 3. GENERIC VALUE action to hold the software marker id
  729. */
  730. const u16 num_lg_acts = 3;
  731. enum ice_status status;
  732. u16 lg_act_size;
  733. u16 rules_size;
  734. u16 vsi_info;
  735. u32 act;
  736. if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
  737. return ICE_ERR_PARAM;
  738. /* Create two back-to-back switch rules and submit them to the HW using
  739. * one memory buffer:
  740. * 1. Large Action
  741. * 2. Look up tx rx
  742. */
  743. lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
  744. rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  745. lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
  746. if (!lg_act)
  747. return ICE_ERR_NO_MEMORY;
  748. rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
  749. /* Fill in the first switch rule i.e. large action */
  750. lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
  751. lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
  752. lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
  753. /* First action VSI forwarding or VSI list forwarding depending on how
  754. * many VSIs
  755. */
  756. vsi_info = (m_ent->vsi_count > 1) ?
  757. m_ent->fltr_info.fwd_id.vsi_list_id :
  758. m_ent->fltr_info.fwd_id.vsi_id;
  759. act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
  760. act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) &
  761. ICE_LG_ACT_VSI_LIST_ID_M;
  762. if (m_ent->vsi_count > 1)
  763. act |= ICE_LG_ACT_VSI_LIST;
  764. lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
  765. /* Second action descriptor type */
  766. act = ICE_LG_ACT_GENERIC;
  767. act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
  768. lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
  769. act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
  770. ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
  771. /* Third action Marker value */
  772. act |= ICE_LG_ACT_GENERIC;
  773. act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
  774. ICE_LG_ACT_GENERIC_VALUE_M;
  775. lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
  776. /* call the fill switch rule to fill the lookup tx rx structure */
  777. ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
  778. ice_aqc_opc_update_sw_rules);
  779. /* Update the action to point to the large action id */
  780. rx_tx->pdata.lkup_tx_rx.act =
  781. cpu_to_le32(ICE_SINGLE_ACT_PTR |
  782. ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
  783. ICE_SINGLE_ACT_PTR_VAL_M));
  784. /* Use the filter rule id of the previously created rule with single
  785. * act. Once the update happens, hardware will treat this as large
  786. * action
  787. */
  788. rx_tx->pdata.lkup_tx_rx.index =
  789. cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
  790. status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
  791. ice_aqc_opc_update_sw_rules, NULL);
  792. if (!status) {
  793. m_ent->lg_act_idx = l_id;
  794. m_ent->sw_marker_id = sw_marker;
  795. }
  796. devm_kfree(ice_hw_to_dev(hw), lg_act);
  797. return status;
  798. }
  799. /**
  800. * ice_create_vsi_list_map
  801. * @hw: pointer to the hardware structure
  802. * @vsi_array: array of VSIs to form a VSI list
  803. * @num_vsi: num VSI in the array
  804. * @vsi_list_id: VSI list id generated as part of allocate resource
  805. *
  806. * Helper function to create a new entry of VSI list id to VSI mapping
  807. * using the given VSI list id
  808. */
  809. static struct ice_vsi_list_map_info *
  810. ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
  811. u16 vsi_list_id)
  812. {
  813. struct ice_switch_info *sw = hw->switch_info;
  814. struct ice_vsi_list_map_info *v_map;
  815. int i;
  816. v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
  817. if (!v_map)
  818. return NULL;
  819. v_map->vsi_list_id = vsi_list_id;
  820. for (i = 0; i < num_vsi; i++)
  821. set_bit(vsi_array[i], v_map->vsi_map);
  822. list_add(&v_map->list_entry, &sw->vsi_list_map_head);
  823. return v_map;
  824. }
  825. /**
  826. * ice_update_vsi_list_rule
  827. * @hw: pointer to the hardware structure
  828. * @vsi_array: array of VSIs to form a VSI list
  829. * @num_vsi: num VSI in the array
  830. * @vsi_list_id: VSI list id generated as part of allocate resource
  831. * @remove: Boolean value to indicate if this is a remove action
  832. * @opc: switch rules population command type - pass in the command opcode
  833. * @lkup_type: lookup type of the filter
  834. *
  835. * Call AQ command to add a new switch rule or update existing switch rule
  836. * using the given VSI list id
  837. */
  838. static enum ice_status
  839. ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
  840. u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
  841. enum ice_sw_lkup_type lkup_type)
  842. {
  843. struct ice_aqc_sw_rules_elem *s_rule;
  844. enum ice_status status;
  845. u16 s_rule_size;
  846. u16 type;
  847. int i;
  848. if (!num_vsi)
  849. return ICE_ERR_PARAM;
  850. if (lkup_type == ICE_SW_LKUP_MAC ||
  851. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  852. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  853. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  854. lkup_type == ICE_SW_LKUP_PROMISC ||
  855. lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
  856. type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
  857. ICE_AQC_SW_RULES_T_VSI_LIST_SET;
  858. else if (lkup_type == ICE_SW_LKUP_VLAN)
  859. type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
  860. ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
  861. else
  862. return ICE_ERR_PARAM;
  863. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
  864. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  865. if (!s_rule)
  866. return ICE_ERR_NO_MEMORY;
  867. for (i = 0; i < num_vsi; i++)
  868. s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]);
  869. s_rule->type = cpu_to_le16(type);
  870. s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
  871. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  872. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
  873. devm_kfree(ice_hw_to_dev(hw), s_rule);
  874. return status;
  875. }
  876. /**
  877. * ice_create_vsi_list_rule - Creates and populates a VSI list rule
  878. * @hw: pointer to the hw struct
  879. * @vsi_array: array of VSIs to form a VSI list
  880. * @num_vsi: number of VSIs in the array
  881. * @vsi_list_id: stores the ID of the VSI list to be created
  882. * @lkup_type: switch rule filter's lookup type
  883. */
  884. static enum ice_status
  885. ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
  886. u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
  887. {
  888. enum ice_status status;
  889. int i;
  890. for (i = 0; i < num_vsi; i++)
  891. if (vsi_array[i] >= ICE_MAX_VSI)
  892. return ICE_ERR_OUT_OF_RANGE;
  893. status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
  894. ice_aqc_opc_alloc_res);
  895. if (status)
  896. return status;
  897. /* Update the newly created VSI list to include the specified VSIs */
  898. return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id,
  899. false, ice_aqc_opc_add_sw_rules,
  900. lkup_type);
  901. }
  902. /**
  903. * ice_create_pkt_fwd_rule
  904. * @hw: pointer to the hardware structure
  905. * @f_entry: entry containing packet forwarding information
  906. *
  907. * Create switch rule with given filter information and add an entry
  908. * to the corresponding filter management list to track this switch rule
  909. * and VSI mapping
  910. */
  911. static enum ice_status
  912. ice_create_pkt_fwd_rule(struct ice_hw *hw,
  913. struct ice_fltr_list_entry *f_entry)
  914. {
  915. struct ice_fltr_mgmt_list_entry *fm_entry;
  916. struct ice_aqc_sw_rules_elem *s_rule;
  917. enum ice_sw_lkup_type l_type;
  918. struct ice_sw_recipe *recp;
  919. enum ice_status status;
  920. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  921. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  922. if (!s_rule)
  923. return ICE_ERR_NO_MEMORY;
  924. fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
  925. GFP_KERNEL);
  926. if (!fm_entry) {
  927. status = ICE_ERR_NO_MEMORY;
  928. goto ice_create_pkt_fwd_rule_exit;
  929. }
  930. fm_entry->fltr_info = f_entry->fltr_info;
  931. /* Initialize all the fields for the management entry */
  932. fm_entry->vsi_count = 1;
  933. fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
  934. fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
  935. fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
  936. ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
  937. ice_aqc_opc_add_sw_rules);
  938. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  939. ice_aqc_opc_add_sw_rules, NULL);
  940. if (status) {
  941. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  942. goto ice_create_pkt_fwd_rule_exit;
  943. }
  944. f_entry->fltr_info.fltr_rule_id =
  945. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  946. fm_entry->fltr_info.fltr_rule_id =
  947. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  948. /* The book keeping entries will get removed when base driver
  949. * calls remove filter AQ command
  950. */
  951. l_type = fm_entry->fltr_info.lkup_type;
  952. recp = &hw->switch_info->recp_list[l_type];
  953. list_add(&fm_entry->list_entry, &recp->filt_rules);
  954. ice_create_pkt_fwd_rule_exit:
  955. devm_kfree(ice_hw_to_dev(hw), s_rule);
  956. return status;
  957. }
  958. /**
  959. * ice_update_pkt_fwd_rule
  960. * @hw: pointer to the hardware structure
  961. * @f_info: filter information for switch rule
  962. *
  963. * Call AQ command to update a previously created switch rule with a
  964. * VSI list id
  965. */
  966. static enum ice_status
  967. ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
  968. {
  969. struct ice_aqc_sw_rules_elem *s_rule;
  970. enum ice_status status;
  971. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  972. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  973. if (!s_rule)
  974. return ICE_ERR_NO_MEMORY;
  975. ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
  976. s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
  977. /* Update switch rule with new rule set to forward VSI list */
  978. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  979. ice_aqc_opc_update_sw_rules, NULL);
  980. devm_kfree(ice_hw_to_dev(hw), s_rule);
  981. return status;
  982. }
  983. /**
  984. * ice_update_sw_rule_bridge_mode
  985. * @hw: pointer to the hw struct
  986. *
  987. * Updates unicast switch filter rules based on VEB/VEPA mode
  988. */
  989. enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
  990. {
  991. struct ice_switch_info *sw = hw->switch_info;
  992. struct ice_fltr_mgmt_list_entry *fm_entry;
  993. enum ice_status status = 0;
  994. struct list_head *rule_head;
  995. struct mutex *rule_lock; /* Lock to protect filter rule list */
  996. rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
  997. rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
  998. mutex_lock(rule_lock);
  999. list_for_each_entry(fm_entry, rule_head, list_entry) {
  1000. struct ice_fltr_info *fi = &fm_entry->fltr_info;
  1001. u8 *addr = fi->l_data.mac.mac_addr;
  1002. /* Update unicast Tx rules to reflect the selected
  1003. * VEB/VEPA mode
  1004. */
  1005. if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
  1006. (fi->fltr_act == ICE_FWD_TO_VSI ||
  1007. fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
  1008. fi->fltr_act == ICE_FWD_TO_Q ||
  1009. fi->fltr_act == ICE_FWD_TO_QGRP)) {
  1010. status = ice_update_pkt_fwd_rule(hw, fi);
  1011. if (status)
  1012. break;
  1013. }
  1014. }
  1015. mutex_unlock(rule_lock);
  1016. return status;
  1017. }
  1018. /**
  1019. * ice_add_update_vsi_list
  1020. * @hw: pointer to the hardware structure
  1021. * @m_entry: pointer to current filter management list entry
  1022. * @cur_fltr: filter information from the book keeping entry
  1023. * @new_fltr: filter information with the new VSI to be added
  1024. *
  1025. * Call AQ command to add or update previously created VSI list with new VSI.
  1026. *
  1027. * Helper function to do book keeping associated with adding filter information
  1028. * The algorithm to do the booking keeping is described below :
  1029. * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
  1030. * if only one VSI has been added till now
  1031. * Allocate a new VSI list and add two VSIs
  1032. * to this list using switch rule command
  1033. * Update the previously created switch rule with the
  1034. * newly created VSI list id
  1035. * if a VSI list was previously created
  1036. * Add the new VSI to the previously created VSI list set
  1037. * using the update switch rule command
  1038. */
  1039. static enum ice_status
  1040. ice_add_update_vsi_list(struct ice_hw *hw,
  1041. struct ice_fltr_mgmt_list_entry *m_entry,
  1042. struct ice_fltr_info *cur_fltr,
  1043. struct ice_fltr_info *new_fltr)
  1044. {
  1045. enum ice_status status = 0;
  1046. u16 vsi_list_id = 0;
  1047. if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
  1048. cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
  1049. return ICE_ERR_NOT_IMPL;
  1050. if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
  1051. new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
  1052. (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
  1053. cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
  1054. return ICE_ERR_NOT_IMPL;
  1055. if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
  1056. /* Only one entry existed in the mapping and it was not already
  1057. * a part of a VSI list. So, create a VSI list with the old and
  1058. * new VSIs.
  1059. */
  1060. struct ice_fltr_info tmp_fltr;
  1061. u16 vsi_id_arr[2];
  1062. /* A rule already exists with the new VSI being added */
  1063. if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id)
  1064. return ICE_ERR_ALREADY_EXISTS;
  1065. vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id;
  1066. vsi_id_arr[1] = new_fltr->fwd_id.vsi_id;
  1067. status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2,
  1068. &vsi_list_id,
  1069. new_fltr->lkup_type);
  1070. if (status)
  1071. return status;
  1072. tmp_fltr = *new_fltr;
  1073. tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
  1074. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  1075. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  1076. /* Update the previous switch rule of "MAC forward to VSI" to
  1077. * "MAC fwd to VSI list"
  1078. */
  1079. status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
  1080. if (status)
  1081. return status;
  1082. cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1083. cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1084. m_entry->vsi_list_info =
  1085. ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2,
  1086. vsi_list_id);
  1087. /* If this entry was large action then the large action needs
  1088. * to be updated to point to FWD to VSI list
  1089. */
  1090. if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
  1091. status =
  1092. ice_add_marker_act(hw, m_entry,
  1093. m_entry->sw_marker_id,
  1094. m_entry->lg_act_idx);
  1095. } else {
  1096. u16 vsi_id = new_fltr->fwd_id.vsi_id;
  1097. enum ice_adminq_opc opcode;
  1098. /* A rule already exists with the new VSI being added */
  1099. if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map))
  1100. return 0;
  1101. /* Update the previously created VSI list set with
  1102. * the new VSI id passed in
  1103. */
  1104. vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
  1105. opcode = ice_aqc_opc_update_sw_rules;
  1106. status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
  1107. false, opcode,
  1108. new_fltr->lkup_type);
  1109. /* update VSI list mapping info with new VSI id */
  1110. if (!status)
  1111. set_bit(vsi_id, m_entry->vsi_list_info->vsi_map);
  1112. }
  1113. if (!status)
  1114. m_entry->vsi_count++;
  1115. return status;
  1116. }
  1117. /**
  1118. * ice_find_rule_entry - Search a rule entry
  1119. * @hw: pointer to the hardware structure
  1120. * @recp_id: lookup type for which the specified rule needs to be searched
  1121. * @f_info: rule information
  1122. *
  1123. * Helper function to search for a given rule entry
  1124. * Returns pointer to entry storing the rule if found
  1125. */
  1126. static struct ice_fltr_mgmt_list_entry *
  1127. ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
  1128. {
  1129. struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
  1130. struct ice_switch_info *sw = hw->switch_info;
  1131. struct list_head *list_head;
  1132. list_head = &sw->recp_list[recp_id].filt_rules;
  1133. list_for_each_entry(list_itr, list_head, list_entry) {
  1134. if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
  1135. sizeof(f_info->l_data)) &&
  1136. f_info->flag == list_itr->fltr_info.flag) {
  1137. ret = list_itr;
  1138. break;
  1139. }
  1140. }
  1141. return ret;
  1142. }
  1143. /**
  1144. * ice_add_rule_internal - add rule for a given lookup type
  1145. * @hw: pointer to the hardware structure
  1146. * @recp_id: lookup type (recipe id) for which rule has to be added
  1147. * @f_entry: structure containing MAC forwarding information
  1148. *
  1149. * Adds or updates the rule lists for a given recipe
  1150. */
  1151. static enum ice_status
  1152. ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
  1153. struct ice_fltr_list_entry *f_entry)
  1154. {
  1155. struct ice_switch_info *sw = hw->switch_info;
  1156. struct ice_fltr_info *new_fltr, *cur_fltr;
  1157. struct ice_fltr_mgmt_list_entry *m_entry;
  1158. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1159. enum ice_status status = 0;
  1160. rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
  1161. mutex_lock(rule_lock);
  1162. new_fltr = &f_entry->fltr_info;
  1163. if (new_fltr->flag & ICE_FLTR_RX)
  1164. new_fltr->src = hw->port_info->lport;
  1165. else if (new_fltr->flag & ICE_FLTR_TX)
  1166. new_fltr->src = f_entry->fltr_info.fwd_id.vsi_id;
  1167. m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
  1168. if (!m_entry) {
  1169. mutex_unlock(rule_lock);
  1170. return ice_create_pkt_fwd_rule(hw, f_entry);
  1171. }
  1172. cur_fltr = &m_entry->fltr_info;
  1173. status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
  1174. mutex_unlock(rule_lock);
  1175. return status;
  1176. }
  1177. /**
  1178. * ice_remove_vsi_list_rule
  1179. * @hw: pointer to the hardware structure
  1180. * @vsi_list_id: VSI list id generated as part of allocate resource
  1181. * @lkup_type: switch rule filter lookup type
  1182. *
  1183. * The VSI list should be emptied before this function is called to remove the
  1184. * VSI list.
  1185. */
  1186. static enum ice_status
  1187. ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
  1188. enum ice_sw_lkup_type lkup_type)
  1189. {
  1190. struct ice_aqc_sw_rules_elem *s_rule;
  1191. enum ice_status status;
  1192. u16 s_rule_size;
  1193. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
  1194. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1195. if (!s_rule)
  1196. return ICE_ERR_NO_MEMORY;
  1197. s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
  1198. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  1199. /* Free the vsi_list resource that we allocated. It is assumed that the
  1200. * list is empty at this point.
  1201. */
  1202. status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
  1203. ice_aqc_opc_free_res);
  1204. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1205. return status;
  1206. }
  1207. /**
  1208. * ice_rem_update_vsi_list
  1209. * @hw: pointer to the hardware structure
  1210. * @vsi_id: ID of the VSI to remove
  1211. * @fm_list: filter management entry for which the VSI list management needs to
  1212. * be done
  1213. */
  1214. static enum ice_status
  1215. ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_id,
  1216. struct ice_fltr_mgmt_list_entry *fm_list)
  1217. {
  1218. enum ice_sw_lkup_type lkup_type;
  1219. enum ice_status status = 0;
  1220. u16 vsi_list_id;
  1221. if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
  1222. fm_list->vsi_count == 0)
  1223. return ICE_ERR_PARAM;
  1224. /* A rule with the VSI being removed does not exist */
  1225. if (!test_bit(vsi_id, fm_list->vsi_list_info->vsi_map))
  1226. return ICE_ERR_DOES_NOT_EXIST;
  1227. lkup_type = fm_list->fltr_info.lkup_type;
  1228. vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
  1229. status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, true,
  1230. ice_aqc_opc_update_sw_rules,
  1231. lkup_type);
  1232. if (status)
  1233. return status;
  1234. fm_list->vsi_count--;
  1235. clear_bit(vsi_id, fm_list->vsi_list_info->vsi_map);
  1236. if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
  1237. (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
  1238. struct ice_vsi_list_map_info *vsi_list_info =
  1239. fm_list->vsi_list_info;
  1240. u16 rem_vsi_id;
  1241. rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
  1242. ICE_MAX_VSI);
  1243. if (rem_vsi_id == ICE_MAX_VSI)
  1244. return ICE_ERR_OUT_OF_RANGE;
  1245. status = ice_update_vsi_list_rule(hw, &rem_vsi_id, 1,
  1246. vsi_list_id, true,
  1247. ice_aqc_opc_update_sw_rules,
  1248. lkup_type);
  1249. if (status)
  1250. return status;
  1251. /* Remove the VSI list since it is no longer used */
  1252. status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
  1253. if (status)
  1254. return status;
  1255. /* Change the list entry action from VSI_LIST to VSI */
  1256. fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1257. fm_list->fltr_info.fwd_id.vsi_id = rem_vsi_id;
  1258. list_del(&vsi_list_info->list_entry);
  1259. devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
  1260. fm_list->vsi_list_info = NULL;
  1261. }
  1262. return status;
  1263. }
  1264. /**
  1265. * ice_remove_rule_internal - Remove a filter rule of a given type
  1266. * @hw: pointer to the hardware structure
  1267. * @recp_id: recipe id for which the rule needs to removed
  1268. * @f_entry: rule entry containing filter information
  1269. */
  1270. static enum ice_status
  1271. ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
  1272. struct ice_fltr_list_entry *f_entry)
  1273. {
  1274. struct ice_switch_info *sw = hw->switch_info;
  1275. struct ice_fltr_mgmt_list_entry *list_elem;
  1276. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1277. enum ice_status status = 0;
  1278. bool remove_rule = false;
  1279. u16 vsi_id;
  1280. rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
  1281. mutex_lock(rule_lock);
  1282. list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
  1283. if (!list_elem) {
  1284. status = ICE_ERR_DOES_NOT_EXIST;
  1285. goto exit;
  1286. }
  1287. if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
  1288. remove_rule = true;
  1289. } else {
  1290. vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
  1291. status = ice_rem_update_vsi_list(hw, vsi_id, list_elem);
  1292. if (status)
  1293. goto exit;
  1294. /* if vsi count goes to zero after updating the vsi list */
  1295. if (list_elem->vsi_count == 0)
  1296. remove_rule = true;
  1297. }
  1298. if (remove_rule) {
  1299. /* Remove the lookup rule */
  1300. struct ice_aqc_sw_rules_elem *s_rule;
  1301. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  1302. ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
  1303. GFP_KERNEL);
  1304. if (!s_rule) {
  1305. status = ICE_ERR_NO_MEMORY;
  1306. goto exit;
  1307. }
  1308. ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
  1309. ice_aqc_opc_remove_sw_rules);
  1310. status = ice_aq_sw_rules(hw, s_rule,
  1311. ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
  1312. ice_aqc_opc_remove_sw_rules, NULL);
  1313. if (status)
  1314. goto exit;
  1315. /* Remove a book keeping from the list */
  1316. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1317. list_del(&list_elem->list_entry);
  1318. devm_kfree(ice_hw_to_dev(hw), list_elem);
  1319. }
  1320. exit:
  1321. mutex_unlock(rule_lock);
  1322. return status;
  1323. }
  1324. /**
  1325. * ice_add_mac - Add a MAC address based filter rule
  1326. * @hw: pointer to the hardware structure
  1327. * @m_list: list of MAC addresses and forwarding information
  1328. *
  1329. * IMPORTANT: When the ucast_shared flag is set to false and m_list has
  1330. * multiple unicast addresses, the function assumes that all the
  1331. * addresses are unique in a given add_mac call. It doesn't
  1332. * check for duplicates in this case, removing duplicates from a given
  1333. * list should be taken care of in the caller of this function.
  1334. */
  1335. enum ice_status
  1336. ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
  1337. {
  1338. struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
  1339. struct ice_fltr_list_entry *m_list_itr;
  1340. struct list_head *rule_head;
  1341. u16 elem_sent, total_elem_left;
  1342. struct ice_switch_info *sw;
  1343. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1344. enum ice_status status = 0;
  1345. u16 num_unicast = 0;
  1346. u16 s_rule_size;
  1347. if (!m_list || !hw)
  1348. return ICE_ERR_PARAM;
  1349. s_rule = NULL;
  1350. sw = hw->switch_info;
  1351. rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
  1352. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1353. u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
  1354. m_list_itr->fltr_info.flag = ICE_FLTR_TX;
  1355. if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
  1356. is_zero_ether_addr(add))
  1357. return ICE_ERR_PARAM;
  1358. if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
  1359. /* Don't overwrite the unicast address */
  1360. mutex_lock(rule_lock);
  1361. if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
  1362. &m_list_itr->fltr_info)) {
  1363. mutex_unlock(rule_lock);
  1364. return ICE_ERR_ALREADY_EXISTS;
  1365. }
  1366. mutex_unlock(rule_lock);
  1367. num_unicast++;
  1368. } else if (is_multicast_ether_addr(add) ||
  1369. (is_unicast_ether_addr(add) && hw->ucast_shared)) {
  1370. m_list_itr->status =
  1371. ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
  1372. m_list_itr);
  1373. if (m_list_itr->status)
  1374. return m_list_itr->status;
  1375. }
  1376. }
  1377. mutex_lock(rule_lock);
  1378. /* Exit if no suitable entries were found for adding bulk switch rule */
  1379. if (!num_unicast) {
  1380. status = 0;
  1381. goto ice_add_mac_exit;
  1382. }
  1383. rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
  1384. /* Allocate switch rule buffer for the bulk update for unicast */
  1385. s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  1386. s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
  1387. GFP_KERNEL);
  1388. if (!s_rule) {
  1389. status = ICE_ERR_NO_MEMORY;
  1390. goto ice_add_mac_exit;
  1391. }
  1392. r_iter = s_rule;
  1393. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1394. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1395. u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
  1396. if (is_unicast_ether_addr(mac_addr)) {
  1397. ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
  1398. ice_aqc_opc_add_sw_rules);
  1399. r_iter = (struct ice_aqc_sw_rules_elem *)
  1400. ((u8 *)r_iter + s_rule_size);
  1401. }
  1402. }
  1403. /* Call AQ bulk switch rule update for all unicast addresses */
  1404. r_iter = s_rule;
  1405. /* Call AQ switch rule in AQ_MAX chunk */
  1406. for (total_elem_left = num_unicast; total_elem_left > 0;
  1407. total_elem_left -= elem_sent) {
  1408. struct ice_aqc_sw_rules_elem *entry = r_iter;
  1409. elem_sent = min(total_elem_left,
  1410. (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
  1411. status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
  1412. elem_sent, ice_aqc_opc_add_sw_rules,
  1413. NULL);
  1414. if (status)
  1415. goto ice_add_mac_exit;
  1416. r_iter = (struct ice_aqc_sw_rules_elem *)
  1417. ((u8 *)r_iter + (elem_sent * s_rule_size));
  1418. }
  1419. /* Fill up rule id based on the value returned from FW */
  1420. r_iter = s_rule;
  1421. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1422. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1423. u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
  1424. struct ice_fltr_mgmt_list_entry *fm_entry;
  1425. if (is_unicast_ether_addr(mac_addr)) {
  1426. f_info->fltr_rule_id =
  1427. le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
  1428. f_info->fltr_act = ICE_FWD_TO_VSI;
  1429. /* Create an entry to track this MAC address */
  1430. fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
  1431. sizeof(*fm_entry), GFP_KERNEL);
  1432. if (!fm_entry) {
  1433. status = ICE_ERR_NO_MEMORY;
  1434. goto ice_add_mac_exit;
  1435. }
  1436. fm_entry->fltr_info = *f_info;
  1437. fm_entry->vsi_count = 1;
  1438. /* The book keeping entries will get removed when
  1439. * base driver calls remove filter AQ command
  1440. */
  1441. list_add(&fm_entry->list_entry, rule_head);
  1442. r_iter = (struct ice_aqc_sw_rules_elem *)
  1443. ((u8 *)r_iter + s_rule_size);
  1444. }
  1445. }
  1446. ice_add_mac_exit:
  1447. mutex_unlock(rule_lock);
  1448. if (s_rule)
  1449. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1450. return status;
  1451. }
  1452. /**
  1453. * ice_add_vlan_internal - Add one VLAN based filter rule
  1454. * @hw: pointer to the hardware structure
  1455. * @f_entry: filter entry containing one VLAN information
  1456. */
  1457. static enum ice_status
  1458. ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  1459. {
  1460. struct ice_switch_info *sw = hw->switch_info;
  1461. struct ice_fltr_info *new_fltr, *cur_fltr;
  1462. struct ice_fltr_mgmt_list_entry *v_list_itr;
  1463. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1464. enum ice_status status = 0;
  1465. new_fltr = &f_entry->fltr_info;
  1466. /* VLAN id should only be 12 bits */
  1467. if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
  1468. return ICE_ERR_PARAM;
  1469. rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
  1470. mutex_lock(rule_lock);
  1471. v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
  1472. if (!v_list_itr) {
  1473. u16 vsi_id = ICE_VSI_INVAL_ID;
  1474. u16 vsi_list_id = 0;
  1475. if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
  1476. enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type;
  1477. /* All VLAN pruning rules use a VSI list.
  1478. * Convert the action to forwarding to a VSI list.
  1479. */
  1480. vsi_id = new_fltr->fwd_id.vsi_id;
  1481. status = ice_create_vsi_list_rule(hw, &vsi_id, 1,
  1482. &vsi_list_id,
  1483. lkup_type);
  1484. if (status)
  1485. goto exit;
  1486. new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1487. new_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1488. }
  1489. status = ice_create_pkt_fwd_rule(hw, f_entry);
  1490. if (!status && vsi_id != ICE_VSI_INVAL_ID) {
  1491. v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
  1492. new_fltr);
  1493. if (!v_list_itr) {
  1494. status = ICE_ERR_DOES_NOT_EXIST;
  1495. goto exit;
  1496. }
  1497. v_list_itr->vsi_list_info =
  1498. ice_create_vsi_list_map(hw, &vsi_id, 1,
  1499. vsi_list_id);
  1500. }
  1501. goto exit;
  1502. }
  1503. cur_fltr = &v_list_itr->fltr_info;
  1504. status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, new_fltr);
  1505. exit:
  1506. mutex_unlock(rule_lock);
  1507. return status;
  1508. }
  1509. /**
  1510. * ice_add_vlan - Add VLAN based filter rule
  1511. * @hw: pointer to the hardware structure
  1512. * @v_list: list of VLAN entries and forwarding information
  1513. */
  1514. enum ice_status
  1515. ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
  1516. {
  1517. struct ice_fltr_list_entry *v_list_itr;
  1518. if (!v_list || !hw)
  1519. return ICE_ERR_PARAM;
  1520. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1521. if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
  1522. return ICE_ERR_PARAM;
  1523. v_list_itr->fltr_info.flag = ICE_FLTR_TX;
  1524. v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
  1525. if (v_list_itr->status)
  1526. return v_list_itr->status;
  1527. }
  1528. return 0;
  1529. }
  1530. /**
  1531. * ice_rem_sw_rule_info
  1532. * @hw: pointer to the hardware structure
  1533. * @rule_head: pointer to the switch list structure that we want to delete
  1534. */
  1535. static void
  1536. ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
  1537. {
  1538. if (!list_empty(rule_head)) {
  1539. struct ice_fltr_mgmt_list_entry *entry;
  1540. struct ice_fltr_mgmt_list_entry *tmp;
  1541. list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
  1542. list_del(&entry->list_entry);
  1543. devm_kfree(ice_hw_to_dev(hw), entry);
  1544. }
  1545. }
  1546. }
  1547. /**
  1548. * ice_cfg_dflt_vsi - change state of VSI to set/clear default
  1549. * @hw: pointer to the hardware structure
  1550. * @vsi_id: number of VSI to set as default
  1551. * @set: true to add the above mentioned switch rule, false to remove it
  1552. * @direction: ICE_FLTR_RX or ICE_FLTR_TX
  1553. *
  1554. * add filter rule to set/unset given VSI as default VSI for the switch
  1555. * (represented by swid)
  1556. */
  1557. enum ice_status
  1558. ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
  1559. {
  1560. struct ice_aqc_sw_rules_elem *s_rule;
  1561. struct ice_fltr_info f_info;
  1562. enum ice_adminq_opc opcode;
  1563. enum ice_status status;
  1564. u16 s_rule_size;
  1565. s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
  1566. ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
  1567. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1568. if (!s_rule)
  1569. return ICE_ERR_NO_MEMORY;
  1570. memset(&f_info, 0, sizeof(f_info));
  1571. f_info.lkup_type = ICE_SW_LKUP_DFLT;
  1572. f_info.flag = direction;
  1573. f_info.fltr_act = ICE_FWD_TO_VSI;
  1574. f_info.fwd_id.vsi_id = vsi_id;
  1575. if (f_info.flag & ICE_FLTR_RX) {
  1576. f_info.src = hw->port_info->lport;
  1577. if (!set)
  1578. f_info.fltr_rule_id =
  1579. hw->port_info->dflt_rx_vsi_rule_id;
  1580. } else if (f_info.flag & ICE_FLTR_TX) {
  1581. f_info.src = vsi_id;
  1582. if (!set)
  1583. f_info.fltr_rule_id =
  1584. hw->port_info->dflt_tx_vsi_rule_id;
  1585. }
  1586. if (set)
  1587. opcode = ice_aqc_opc_add_sw_rules;
  1588. else
  1589. opcode = ice_aqc_opc_remove_sw_rules;
  1590. ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
  1591. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
  1592. if (status || !(f_info.flag & ICE_FLTR_TX_RX))
  1593. goto out;
  1594. if (set) {
  1595. u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  1596. if (f_info.flag & ICE_FLTR_TX) {
  1597. hw->port_info->dflt_tx_vsi_num = vsi_id;
  1598. hw->port_info->dflt_tx_vsi_rule_id = index;
  1599. } else if (f_info.flag & ICE_FLTR_RX) {
  1600. hw->port_info->dflt_rx_vsi_num = vsi_id;
  1601. hw->port_info->dflt_rx_vsi_rule_id = index;
  1602. }
  1603. } else {
  1604. if (f_info.flag & ICE_FLTR_TX) {
  1605. hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  1606. hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
  1607. } else if (f_info.flag & ICE_FLTR_RX) {
  1608. hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  1609. hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
  1610. }
  1611. }
  1612. out:
  1613. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1614. return status;
  1615. }
  1616. /**
  1617. * ice_remove_mac - remove a MAC address based filter rule
  1618. * @hw: pointer to the hardware structure
  1619. * @m_list: list of MAC addresses and forwarding information
  1620. *
  1621. * This function removes either a MAC filter rule or a specific VSI from a
  1622. * VSI list for a multicast MAC address.
  1623. *
  1624. * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
  1625. * ice_add_mac. Caller should be aware that this call will only work if all
  1626. * the entries passed into m_list were added previously. It will not attempt to
  1627. * do a partial remove of entries that were found.
  1628. */
  1629. enum ice_status
  1630. ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
  1631. {
  1632. struct ice_fltr_list_entry *list_itr;
  1633. if (!m_list)
  1634. return ICE_ERR_PARAM;
  1635. list_for_each_entry(list_itr, m_list, list_entry) {
  1636. enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
  1637. if (l_type != ICE_SW_LKUP_MAC)
  1638. return ICE_ERR_PARAM;
  1639. list_itr->status = ice_remove_rule_internal(hw,
  1640. ICE_SW_LKUP_MAC,
  1641. list_itr);
  1642. if (list_itr->status)
  1643. return list_itr->status;
  1644. }
  1645. return 0;
  1646. }
  1647. /**
  1648. * ice_remove_vlan - Remove VLAN based filter rule
  1649. * @hw: pointer to the hardware structure
  1650. * @v_list: list of VLAN entries and forwarding information
  1651. */
  1652. enum ice_status
  1653. ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
  1654. {
  1655. struct ice_fltr_list_entry *v_list_itr;
  1656. if (!v_list || !hw)
  1657. return ICE_ERR_PARAM;
  1658. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1659. enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
  1660. if (l_type != ICE_SW_LKUP_VLAN)
  1661. return ICE_ERR_PARAM;
  1662. v_list_itr->status = ice_remove_rule_internal(hw,
  1663. ICE_SW_LKUP_VLAN,
  1664. v_list_itr);
  1665. if (v_list_itr->status)
  1666. return v_list_itr->status;
  1667. }
  1668. return 0;
  1669. }
  1670. /**
  1671. * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
  1672. * @fm_entry: filter entry to inspect
  1673. * @vsi_id: ID of VSI to compare with filter info
  1674. */
  1675. static bool
  1676. ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_id)
  1677. {
  1678. return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
  1679. fm_entry->fltr_info.fwd_id.vsi_id == vsi_id) ||
  1680. (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
  1681. (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map))));
  1682. }
  1683. /**
  1684. * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
  1685. * @hw: pointer to the hardware structure
  1686. * @vsi_id: ID of VSI to remove filters from
  1687. * @vsi_list_head: pointer to the list to add entry to
  1688. * @fi: pointer to fltr_info of filter entry to copy & add
  1689. *
  1690. * Helper function, used when creating a list of filters to remove from
  1691. * a specific VSI. The entry added to vsi_list_head is a COPY of the
  1692. * original filter entry, with the exception of fltr_info.fltr_act and
  1693. * fltr_info.fwd_id fields. These are set such that later logic can
  1694. * extract which VSI to remove the fltr from, and pass on that information.
  1695. */
  1696. static enum ice_status
  1697. ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
  1698. struct list_head *vsi_list_head,
  1699. struct ice_fltr_info *fi)
  1700. {
  1701. struct ice_fltr_list_entry *tmp;
  1702. /* this memory is freed up in the caller function
  1703. * once filters for this VSI are removed
  1704. */
  1705. tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
  1706. if (!tmp)
  1707. return ICE_ERR_NO_MEMORY;
  1708. tmp->fltr_info = *fi;
  1709. /* Overwrite these fields to indicate which VSI to remove filter from,
  1710. * so find and remove logic can extract the information from the
  1711. * list entries. Note that original entries will still have proper
  1712. * values.
  1713. */
  1714. tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1715. tmp->fltr_info.fwd_id.vsi_id = vsi_id;
  1716. list_add(&tmp->list_entry, vsi_list_head);
  1717. return 0;
  1718. }
  1719. /**
  1720. * ice_add_to_vsi_fltr_list - Add VSI filters to the list
  1721. * @hw: pointer to the hardware structure
  1722. * @vsi_id: ID of VSI to remove filters from
  1723. * @lkup_list_head: pointer to the list that has certain lookup type filters
  1724. * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id
  1725. *
  1726. * Locates all filters in lkup_list_head that are used by the given VSI,
  1727. * and adds COPIES of those entries to vsi_list_head (intended to be used
  1728. * to remove the listed filters).
  1729. * Note that this means all entries in vsi_list_head must be explicitly
  1730. * deallocated by the caller when done with list.
  1731. */
  1732. static enum ice_status
  1733. ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
  1734. struct list_head *lkup_list_head,
  1735. struct list_head *vsi_list_head)
  1736. {
  1737. struct ice_fltr_mgmt_list_entry *fm_entry;
  1738. enum ice_status status = 0;
  1739. /* check to make sure VSI id is valid and within boundary */
  1740. if (vsi_id >= ICE_MAX_VSI)
  1741. return ICE_ERR_PARAM;
  1742. list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
  1743. struct ice_fltr_info *fi;
  1744. fi = &fm_entry->fltr_info;
  1745. if (!ice_vsi_uses_fltr(fm_entry, vsi_id))
  1746. continue;
  1747. status = ice_add_entry_to_vsi_fltr_list(hw, vsi_id,
  1748. vsi_list_head, fi);
  1749. if (status)
  1750. return status;
  1751. }
  1752. return status;
  1753. }
  1754. /**
  1755. * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
  1756. * @hw: pointer to the hardware structure
  1757. * @vsi_id: ID of VSI to remove filters from
  1758. * @lkup: switch rule filter lookup type
  1759. */
  1760. static void
  1761. ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
  1762. enum ice_sw_lkup_type lkup)
  1763. {
  1764. struct ice_switch_info *sw = hw->switch_info;
  1765. struct ice_fltr_list_entry *fm_entry;
  1766. struct list_head remove_list_head;
  1767. struct list_head *rule_head;
  1768. struct ice_fltr_list_entry *tmp;
  1769. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1770. enum ice_status status;
  1771. INIT_LIST_HEAD(&remove_list_head);
  1772. rule_lock = &sw->recp_list[lkup].filt_rule_lock;
  1773. rule_head = &sw->recp_list[lkup].filt_rules;
  1774. mutex_lock(rule_lock);
  1775. status = ice_add_to_vsi_fltr_list(hw, vsi_id, rule_head,
  1776. &remove_list_head);
  1777. mutex_unlock(rule_lock);
  1778. if (status)
  1779. return;
  1780. switch (lkup) {
  1781. case ICE_SW_LKUP_MAC:
  1782. ice_remove_mac(hw, &remove_list_head);
  1783. break;
  1784. case ICE_SW_LKUP_VLAN:
  1785. ice_remove_vlan(hw, &remove_list_head);
  1786. break;
  1787. case ICE_SW_LKUP_MAC_VLAN:
  1788. case ICE_SW_LKUP_ETHERTYPE:
  1789. case ICE_SW_LKUP_ETHERTYPE_MAC:
  1790. case ICE_SW_LKUP_PROMISC:
  1791. case ICE_SW_LKUP_DFLT:
  1792. case ICE_SW_LKUP_PROMISC_VLAN:
  1793. case ICE_SW_LKUP_LAST:
  1794. default:
  1795. ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
  1796. break;
  1797. }
  1798. list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
  1799. list_del(&fm_entry->list_entry);
  1800. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  1801. }
  1802. }
  1803. /**
  1804. * ice_remove_vsi_fltr - Remove all filters for a VSI
  1805. * @hw: pointer to the hardware structure
  1806. * @vsi_id: ID of VSI to remove filters from
  1807. */
  1808. void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id)
  1809. {
  1810. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC);
  1811. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN);
  1812. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC);
  1813. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN);
  1814. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT);
  1815. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE);
  1816. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC);
  1817. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN);
  1818. }
  1819. /**
  1820. * ice_replay_fltr - Replay all the filters stored by a specific list head
  1821. * @hw: pointer to the hardware structure
  1822. * @list_head: list for which filters needs to be replayed
  1823. * @recp_id: Recipe id for which rules need to be replayed
  1824. */
  1825. static enum ice_status
  1826. ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct list_head *list_head)
  1827. {
  1828. struct ice_fltr_mgmt_list_entry *itr;
  1829. struct list_head l_head;
  1830. enum ice_status status = 0;
  1831. if (list_empty(list_head))
  1832. return status;
  1833. /* Move entries from the given list_head to a temporary l_head so that
  1834. * they can be replayed. Otherwise when trying to re-add the same
  1835. * filter, the function will return already exists
  1836. */
  1837. list_replace_init(list_head, &l_head);
  1838. /* Mark the given list_head empty by reinitializing it so filters
  1839. * could be added again by *handler
  1840. */
  1841. list_for_each_entry(itr, &l_head, list_entry) {
  1842. struct ice_fltr_list_entry f_entry;
  1843. f_entry.fltr_info = itr->fltr_info;
  1844. if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
  1845. status = ice_add_rule_internal(hw, recp_id, &f_entry);
  1846. if (status)
  1847. goto end;
  1848. continue;
  1849. }
  1850. /* Add a filter per vsi separately */
  1851. while (1) {
  1852. u16 vsi;
  1853. vsi = find_first_bit(itr->vsi_list_info->vsi_map,
  1854. ICE_MAX_VSI);
  1855. if (vsi == ICE_MAX_VSI)
  1856. break;
  1857. clear_bit(vsi, itr->vsi_list_info->vsi_map);
  1858. f_entry.fltr_info.fwd_id.vsi_id = vsi;
  1859. f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1860. if (recp_id == ICE_SW_LKUP_VLAN)
  1861. status = ice_add_vlan_internal(hw, &f_entry);
  1862. else
  1863. status = ice_add_rule_internal(hw, recp_id,
  1864. &f_entry);
  1865. if (status)
  1866. goto end;
  1867. }
  1868. }
  1869. end:
  1870. /* Clear the filter management list */
  1871. ice_rem_sw_rule_info(hw, &l_head);
  1872. return status;
  1873. }
  1874. /**
  1875. * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
  1876. * @hw: pointer to the hardware structure
  1877. *
  1878. * NOTE: This function does not clean up partially added filters on error.
  1879. * It is up to caller of the function to issue a reset or fail early.
  1880. */
  1881. enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
  1882. {
  1883. struct ice_switch_info *sw = hw->switch_info;
  1884. enum ice_status status = 0;
  1885. u8 i;
  1886. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  1887. struct list_head *head = &sw->recp_list[i].filt_rules;
  1888. status = ice_replay_fltr(hw, i, head);
  1889. if (status)
  1890. return status;
  1891. }
  1892. return status;
  1893. }