ice_switch.c 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_switch.h"
  4. #define ICE_ETH_DA_OFFSET 0
  5. #define ICE_ETH_ETHTYPE_OFFSET 12
  6. #define ICE_ETH_VLAN_TCI_OFFSET 14
  7. #define ICE_MAX_VLAN_ID 0xFFF
  8. /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
  9. * struct to configure any switch filter rules.
  10. * {DA (6 bytes), SA(6 bytes),
  11. * Ether type (2 bytes for header without VLAN tag) OR
  12. * VLAN tag (4 bytes for header with VLAN tag) }
  13. *
  14. * Word on Hardcoded values
  15. * byte 0 = 0x2: to identify it as locally administered DA MAC
  16. * byte 6 = 0x2: to identify it as locally administered SA MAC
  17. * byte 12 = 0x81 & byte 13 = 0x00:
  18. * In case of VLAN filter first two bytes defines ether type (0x8100)
  19. * and remaining two bytes are placeholder for programming a given VLAN id
  20. * In case of Ether type filter it is treated as header without VLAN tag
  21. * and byte 12 and 13 is used to program a given Ether type instead
  22. */
  23. #define DUMMY_ETH_HDR_LEN 16
  24. static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
  25. 0x2, 0, 0, 0, 0, 0,
  26. 0x81, 0, 0, 0};
  27. #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
  28. (sizeof(struct ice_aqc_sw_rules_elem) - \
  29. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  30. sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
  31. #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
  32. (sizeof(struct ice_aqc_sw_rules_elem) - \
  33. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  34. sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
  35. #define ICE_SW_RULE_LG_ACT_SIZE(n) \
  36. (sizeof(struct ice_aqc_sw_rules_elem) - \
  37. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  38. sizeof(struct ice_sw_rule_lg_act) - \
  39. sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
  40. ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
  41. #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
  42. (sizeof(struct ice_aqc_sw_rules_elem) - \
  43. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  44. sizeof(struct ice_sw_rule_vsi_list) - \
  45. sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
  46. ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
  47. /**
  48. * ice_aq_alloc_free_res - command to allocate/free resources
  49. * @hw: pointer to the hw struct
  50. * @num_entries: number of resource entries in buffer
  51. * @buf: Indirect buffer to hold data parameters and response
  52. * @buf_size: size of buffer for indirect commands
  53. * @opc: pass in the command opcode
  54. * @cd: pointer to command details structure or NULL
  55. *
  56. * Helper function to allocate/free resources using the admin queue commands
  57. */
  58. static enum ice_status
  59. ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
  60. struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
  61. enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  62. {
  63. struct ice_aqc_alloc_free_res_cmd *cmd;
  64. struct ice_aq_desc desc;
  65. cmd = &desc.params.sw_res_ctrl;
  66. if (!buf)
  67. return ICE_ERR_PARAM;
  68. if (buf_size < (num_entries * sizeof(buf->elem[0])))
  69. return ICE_ERR_PARAM;
  70. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  71. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  72. cmd->num_entries = cpu_to_le16(num_entries);
  73. return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  74. }
  75. /**
  76. * ice_init_def_sw_recp - initialize the recipe book keeping tables
  77. * @hw: pointer to the hw struct
  78. *
  79. * Allocate memory for the entire recipe table and initialize the structures/
  80. * entries corresponding to basic recipes.
  81. */
  82. enum ice_status
  83. ice_init_def_sw_recp(struct ice_hw *hw)
  84. {
  85. struct ice_sw_recipe *recps;
  86. u8 i;
  87. recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
  88. sizeof(struct ice_sw_recipe), GFP_KERNEL);
  89. if (!recps)
  90. return ICE_ERR_NO_MEMORY;
  91. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  92. recps[i].root_rid = i;
  93. INIT_LIST_HEAD(&recps[i].filt_rules);
  94. INIT_LIST_HEAD(&recps[i].filt_replay_rules);
  95. mutex_init(&recps[i].filt_rule_lock);
  96. }
  97. hw->switch_info->recp_list = recps;
  98. return 0;
  99. }
  100. /**
  101. * ice_aq_get_sw_cfg - get switch configuration
  102. * @hw: pointer to the hardware structure
  103. * @buf: pointer to the result buffer
  104. * @buf_size: length of the buffer available for response
  105. * @req_desc: pointer to requested descriptor
  106. * @num_elems: pointer to number of elements
  107. * @cd: pointer to command details structure or NULL
  108. *
  109. * Get switch configuration (0x0200) to be placed in 'buff'.
  110. * This admin command returns information such as initial VSI/port number
  111. * and switch ID it belongs to.
  112. *
  113. * NOTE: *req_desc is both an input/output parameter.
  114. * The caller of this function first calls this function with *request_desc set
  115. * to 0. If the response from f/w has *req_desc set to 0, all the switch
  116. * configuration information has been returned; if non-zero (meaning not all
  117. * the information was returned), the caller should call this function again
  118. * with *req_desc set to the previous value returned by f/w to get the
  119. * next block of switch configuration information.
  120. *
  121. * *num_elems is output only parameter. This reflects the number of elements
  122. * in response buffer. The caller of this function to use *num_elems while
  123. * parsing the response buffer.
  124. */
  125. static enum ice_status
  126. ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
  127. u16 buf_size, u16 *req_desc, u16 *num_elems,
  128. struct ice_sq_cd *cd)
  129. {
  130. struct ice_aqc_get_sw_cfg *cmd;
  131. enum ice_status status;
  132. struct ice_aq_desc desc;
  133. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
  134. cmd = &desc.params.get_sw_conf;
  135. cmd->element = cpu_to_le16(*req_desc);
  136. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  137. if (!status) {
  138. *req_desc = le16_to_cpu(cmd->element);
  139. *num_elems = le16_to_cpu(cmd->num_elems);
  140. }
  141. return status;
  142. }
  143. /**
  144. * ice_aq_add_vsi
  145. * @hw: pointer to the hw struct
  146. * @vsi_ctx: pointer to a VSI context struct
  147. * @cd: pointer to command details structure or NULL
  148. *
  149. * Add a VSI context to the hardware (0x0210)
  150. */
  151. static enum ice_status
  152. ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  153. struct ice_sq_cd *cd)
  154. {
  155. struct ice_aqc_add_update_free_vsi_resp *res;
  156. struct ice_aqc_add_get_update_free_vsi *cmd;
  157. struct ice_aq_desc desc;
  158. enum ice_status status;
  159. cmd = &desc.params.vsi_cmd;
  160. res = &desc.params.add_update_free_vsi_res;
  161. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
  162. if (!vsi_ctx->alloc_from_pool)
  163. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
  164. ICE_AQ_VSI_IS_VALID);
  165. cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
  166. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  167. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  168. sizeof(vsi_ctx->info), cd);
  169. if (!status) {
  170. vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
  171. vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
  172. vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
  173. }
  174. return status;
  175. }
  176. /**
  177. * ice_aq_free_vsi
  178. * @hw: pointer to the hw struct
  179. * @vsi_ctx: pointer to a VSI context struct
  180. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  181. * @cd: pointer to command details structure or NULL
  182. *
  183. * Free VSI context info from hardware (0x0213)
  184. */
  185. static enum ice_status
  186. ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  187. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  188. {
  189. struct ice_aqc_add_update_free_vsi_resp *resp;
  190. struct ice_aqc_add_get_update_free_vsi *cmd;
  191. struct ice_aq_desc desc;
  192. enum ice_status status;
  193. cmd = &desc.params.vsi_cmd;
  194. resp = &desc.params.add_update_free_vsi_res;
  195. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
  196. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  197. if (keep_vsi_alloc)
  198. cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
  199. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  200. if (!status) {
  201. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  202. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  203. }
  204. return status;
  205. }
  206. /**
  207. * ice_aq_update_vsi
  208. * @hw: pointer to the hw struct
  209. * @vsi_ctx: pointer to a VSI context struct
  210. * @cd: pointer to command details structure or NULL
  211. *
  212. * Update VSI context in the hardware (0x0211)
  213. */
  214. static enum ice_status
  215. ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  216. struct ice_sq_cd *cd)
  217. {
  218. struct ice_aqc_add_update_free_vsi_resp *resp;
  219. struct ice_aqc_add_get_update_free_vsi *cmd;
  220. struct ice_aq_desc desc;
  221. enum ice_status status;
  222. cmd = &desc.params.vsi_cmd;
  223. resp = &desc.params.add_update_free_vsi_res;
  224. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
  225. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  226. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  227. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  228. sizeof(vsi_ctx->info), cd);
  229. if (!status) {
  230. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  231. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  232. }
  233. return status;
  234. }
  235. /**
  236. * ice_is_vsi_valid - check whether the VSI is valid or not
  237. * @hw: pointer to the hw struct
  238. * @vsi_handle: VSI handle
  239. *
  240. * check whether the VSI is valid or not
  241. */
  242. bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
  243. {
  244. return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
  245. }
  246. /**
  247. * ice_get_hw_vsi_num - return the hw VSI number
  248. * @hw: pointer to the hw struct
  249. * @vsi_handle: VSI handle
  250. *
  251. * return the hw VSI number
  252. * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
  253. */
  254. u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
  255. {
  256. return hw->vsi_ctx[vsi_handle]->vsi_num;
  257. }
  258. /**
  259. * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
  260. * @hw: pointer to the hw struct
  261. * @vsi_handle: VSI handle
  262. *
  263. * return the VSI context entry for a given VSI handle
  264. */
  265. struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
  266. {
  267. return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
  268. }
  269. /**
  270. * ice_save_vsi_ctx - save the VSI context for a given VSI handle
  271. * @hw: pointer to the hw struct
  272. * @vsi_handle: VSI handle
  273. * @vsi: VSI context pointer
  274. *
  275. * save the VSI context entry for a given VSI handle
  276. */
  277. static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
  278. struct ice_vsi_ctx *vsi)
  279. {
  280. hw->vsi_ctx[vsi_handle] = vsi;
  281. }
  282. /**
  283. * ice_clear_vsi_ctx - clear the VSI context entry
  284. * @hw: pointer to the hw struct
  285. * @vsi_handle: VSI handle
  286. *
  287. * clear the VSI context entry
  288. */
  289. static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
  290. {
  291. struct ice_vsi_ctx *vsi;
  292. vsi = ice_get_vsi_ctx(hw, vsi_handle);
  293. if (vsi) {
  294. devm_kfree(ice_hw_to_dev(hw), vsi);
  295. hw->vsi_ctx[vsi_handle] = NULL;
  296. }
  297. }
  298. /**
  299. * ice_add_vsi - add VSI context to the hardware and VSI handle list
  300. * @hw: pointer to the hw struct
  301. * @vsi_handle: unique VSI handle provided by drivers
  302. * @vsi_ctx: pointer to a VSI context struct
  303. * @cd: pointer to command details structure or NULL
  304. *
  305. * Add a VSI context to the hardware also add it into the VSI handle list.
  306. * If this function gets called after reset for existing VSIs then update
  307. * with the new HW VSI number in the corresponding VSI handle list entry.
  308. */
  309. enum ice_status
  310. ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  311. struct ice_sq_cd *cd)
  312. {
  313. struct ice_vsi_ctx *tmp_vsi_ctx;
  314. enum ice_status status;
  315. if (vsi_handle >= ICE_MAX_VSI)
  316. return ICE_ERR_PARAM;
  317. status = ice_aq_add_vsi(hw, vsi_ctx, cd);
  318. if (status)
  319. return status;
  320. tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
  321. if (!tmp_vsi_ctx) {
  322. /* Create a new vsi context */
  323. tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
  324. sizeof(*tmp_vsi_ctx), GFP_KERNEL);
  325. if (!tmp_vsi_ctx) {
  326. ice_aq_free_vsi(hw, vsi_ctx, false, cd);
  327. return ICE_ERR_NO_MEMORY;
  328. }
  329. *tmp_vsi_ctx = *vsi_ctx;
  330. ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
  331. } else {
  332. /* update with new HW VSI num */
  333. if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
  334. tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
  335. }
  336. return status;
  337. }
  338. /**
  339. * ice_free_vsi- free VSI context from hardware and VSI handle list
  340. * @hw: pointer to the hw struct
  341. * @vsi_handle: unique VSI handle
  342. * @vsi_ctx: pointer to a VSI context struct
  343. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  344. * @cd: pointer to command details structure or NULL
  345. *
  346. * Free VSI context info from hardware as well as from VSI handle list
  347. */
  348. enum ice_status
  349. ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  350. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  351. {
  352. enum ice_status status;
  353. if (!ice_is_vsi_valid(hw, vsi_handle))
  354. return ICE_ERR_PARAM;
  355. vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
  356. status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
  357. if (!status)
  358. ice_clear_vsi_ctx(hw, vsi_handle);
  359. return status;
  360. }
  361. /**
  362. * ice_update_vsi
  363. * @hw: pointer to the hw struct
  364. * @vsi_handle: unique VSI handle
  365. * @vsi_ctx: pointer to a VSI context struct
  366. * @cd: pointer to command details structure or NULL
  367. *
  368. * Update VSI context in the hardware
  369. */
  370. enum ice_status
  371. ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  372. struct ice_sq_cd *cd)
  373. {
  374. if (!ice_is_vsi_valid(hw, vsi_handle))
  375. return ICE_ERR_PARAM;
  376. vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
  377. return ice_aq_update_vsi(hw, vsi_ctx, cd);
  378. }
  379. /**
  380. * ice_aq_alloc_free_vsi_list
  381. * @hw: pointer to the hw struct
  382. * @vsi_list_id: VSI list id returned or used for lookup
  383. * @lkup_type: switch rule filter lookup type
  384. * @opc: switch rules population command type - pass in the command opcode
  385. *
  386. * allocates or free a VSI list resource
  387. */
  388. static enum ice_status
  389. ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
  390. enum ice_sw_lkup_type lkup_type,
  391. enum ice_adminq_opc opc)
  392. {
  393. struct ice_aqc_alloc_free_res_elem *sw_buf;
  394. struct ice_aqc_res_elem *vsi_ele;
  395. enum ice_status status;
  396. u16 buf_len;
  397. buf_len = sizeof(*sw_buf);
  398. sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
  399. if (!sw_buf)
  400. return ICE_ERR_NO_MEMORY;
  401. sw_buf->num_elems = cpu_to_le16(1);
  402. if (lkup_type == ICE_SW_LKUP_MAC ||
  403. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  404. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  405. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  406. lkup_type == ICE_SW_LKUP_PROMISC ||
  407. lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
  408. sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
  409. } else if (lkup_type == ICE_SW_LKUP_VLAN) {
  410. sw_buf->res_type =
  411. cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
  412. } else {
  413. status = ICE_ERR_PARAM;
  414. goto ice_aq_alloc_free_vsi_list_exit;
  415. }
  416. if (opc == ice_aqc_opc_free_res)
  417. sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
  418. status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
  419. if (status)
  420. goto ice_aq_alloc_free_vsi_list_exit;
  421. if (opc == ice_aqc_opc_alloc_res) {
  422. vsi_ele = &sw_buf->elem[0];
  423. *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
  424. }
  425. ice_aq_alloc_free_vsi_list_exit:
  426. devm_kfree(ice_hw_to_dev(hw), sw_buf);
  427. return status;
  428. }
  429. /**
  430. * ice_aq_sw_rules - add/update/remove switch rules
  431. * @hw: pointer to the hw struct
  432. * @rule_list: pointer to switch rule population list
  433. * @rule_list_sz: total size of the rule list in bytes
  434. * @num_rules: number of switch rules in the rule_list
  435. * @opc: switch rules population command type - pass in the command opcode
  436. * @cd: pointer to command details structure or NULL
  437. *
  438. * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
  439. */
  440. static enum ice_status
  441. ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
  442. u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  443. {
  444. struct ice_aq_desc desc;
  445. if (opc != ice_aqc_opc_add_sw_rules &&
  446. opc != ice_aqc_opc_update_sw_rules &&
  447. opc != ice_aqc_opc_remove_sw_rules)
  448. return ICE_ERR_PARAM;
  449. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  450. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  451. desc.params.sw_rules.num_rules_fltr_entry_index =
  452. cpu_to_le16(num_rules);
  453. return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
  454. }
  455. /* ice_init_port_info - Initialize port_info with switch configuration data
  456. * @pi: pointer to port_info
  457. * @vsi_port_num: VSI number or port number
  458. * @type: Type of switch element (port or VSI)
  459. * @swid: switch ID of the switch the element is attached to
  460. * @pf_vf_num: PF or VF number
  461. * @is_vf: true if the element is a VF, false otherwise
  462. */
  463. static void
  464. ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
  465. u16 swid, u16 pf_vf_num, bool is_vf)
  466. {
  467. switch (type) {
  468. case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
  469. pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
  470. pi->sw_id = swid;
  471. pi->pf_vf_num = pf_vf_num;
  472. pi->is_vf = is_vf;
  473. pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  474. pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  475. break;
  476. default:
  477. ice_debug(pi->hw, ICE_DBG_SW,
  478. "incorrect VSI/port type received\n");
  479. break;
  480. }
  481. }
  482. /* ice_get_initial_sw_cfg - Get initial port and default VSI data
  483. * @hw: pointer to the hardware structure
  484. */
  485. enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
  486. {
  487. struct ice_aqc_get_sw_cfg_resp *rbuf;
  488. enum ice_status status;
  489. u16 req_desc = 0;
  490. u16 num_elems;
  491. u16 i;
  492. rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
  493. GFP_KERNEL);
  494. if (!rbuf)
  495. return ICE_ERR_NO_MEMORY;
  496. /* Multiple calls to ice_aq_get_sw_cfg may be required
  497. * to get all the switch configuration information. The need
  498. * for additional calls is indicated by ice_aq_get_sw_cfg
  499. * writing a non-zero value in req_desc
  500. */
  501. do {
  502. status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
  503. &req_desc, &num_elems, NULL);
  504. if (status)
  505. break;
  506. for (i = 0; i < num_elems; i++) {
  507. struct ice_aqc_get_sw_cfg_resp_elem *ele;
  508. u16 pf_vf_num, swid, vsi_port_num;
  509. bool is_vf = false;
  510. u8 type;
  511. ele = rbuf[i].elements;
  512. vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
  513. ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
  514. pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
  515. ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
  516. swid = le16_to_cpu(ele->swid);
  517. if (le16_to_cpu(ele->pf_vf_num) &
  518. ICE_AQC_GET_SW_CONF_RESP_IS_VF)
  519. is_vf = true;
  520. type = le16_to_cpu(ele->vsi_port_num) >>
  521. ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
  522. if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
  523. /* FW VSI is not needed. Just continue. */
  524. continue;
  525. }
  526. ice_init_port_info(hw->port_info, vsi_port_num,
  527. type, swid, pf_vf_num, is_vf);
  528. }
  529. } while (req_desc && !status);
  530. devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
  531. return status;
  532. }
  533. /**
  534. * ice_fill_sw_info - Helper function to populate lb_en and lan_en
  535. * @hw: pointer to the hardware structure
  536. * @f_info: filter info structure to fill/update
  537. *
  538. * This helper function populates the lb_en and lan_en elements of the provided
  539. * ice_fltr_info struct using the switch's type and characteristics of the
  540. * switch rule being configured.
  541. */
  542. static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
  543. {
  544. f_info->lb_en = false;
  545. f_info->lan_en = false;
  546. if ((f_info->flag & ICE_FLTR_TX) &&
  547. (f_info->fltr_act == ICE_FWD_TO_VSI ||
  548. f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
  549. f_info->fltr_act == ICE_FWD_TO_Q ||
  550. f_info->fltr_act == ICE_FWD_TO_QGRP)) {
  551. f_info->lb_en = true;
  552. if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
  553. is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
  554. f_info->lan_en = true;
  555. }
  556. }
  557. /**
  558. * ice_fill_sw_rule - Helper function to fill switch rule structure
  559. * @hw: pointer to the hardware structure
  560. * @f_info: entry containing packet forwarding information
  561. * @s_rule: switch rule structure to be filled in based on mac_entry
  562. * @opc: switch rules population command type - pass in the command opcode
  563. */
  564. static void
  565. ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
  566. struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
  567. {
  568. u16 vlan_id = ICE_MAX_VLAN_ID + 1;
  569. void *daddr = NULL;
  570. u16 eth_hdr_sz;
  571. u8 *eth_hdr;
  572. u32 act = 0;
  573. __be16 *off;
  574. if (opc == ice_aqc_opc_remove_sw_rules) {
  575. s_rule->pdata.lkup_tx_rx.act = 0;
  576. s_rule->pdata.lkup_tx_rx.index =
  577. cpu_to_le16(f_info->fltr_rule_id);
  578. s_rule->pdata.lkup_tx_rx.hdr_len = 0;
  579. return;
  580. }
  581. eth_hdr_sz = sizeof(dummy_eth_header);
  582. eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
  583. /* initialize the ether header with a dummy header */
  584. memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
  585. ice_fill_sw_info(hw, f_info);
  586. switch (f_info->fltr_act) {
  587. case ICE_FWD_TO_VSI:
  588. act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
  589. ICE_SINGLE_ACT_VSI_ID_M;
  590. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  591. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  592. ICE_SINGLE_ACT_VALID_BIT;
  593. break;
  594. case ICE_FWD_TO_VSI_LIST:
  595. act |= ICE_SINGLE_ACT_VSI_LIST;
  596. act |= (f_info->fwd_id.vsi_list_id <<
  597. ICE_SINGLE_ACT_VSI_LIST_ID_S) &
  598. ICE_SINGLE_ACT_VSI_LIST_ID_M;
  599. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  600. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  601. ICE_SINGLE_ACT_VALID_BIT;
  602. break;
  603. case ICE_FWD_TO_Q:
  604. act |= ICE_SINGLE_ACT_TO_Q;
  605. act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
  606. ICE_SINGLE_ACT_Q_INDEX_M;
  607. break;
  608. case ICE_FWD_TO_QGRP:
  609. act |= ICE_SINGLE_ACT_TO_Q;
  610. act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
  611. ICE_SINGLE_ACT_Q_REGION_M;
  612. break;
  613. case ICE_DROP_PACKET:
  614. act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
  615. break;
  616. default:
  617. return;
  618. }
  619. if (f_info->lb_en)
  620. act |= ICE_SINGLE_ACT_LB_ENABLE;
  621. if (f_info->lan_en)
  622. act |= ICE_SINGLE_ACT_LAN_ENABLE;
  623. switch (f_info->lkup_type) {
  624. case ICE_SW_LKUP_MAC:
  625. daddr = f_info->l_data.mac.mac_addr;
  626. break;
  627. case ICE_SW_LKUP_VLAN:
  628. vlan_id = f_info->l_data.vlan.vlan_id;
  629. if (f_info->fltr_act == ICE_FWD_TO_VSI ||
  630. f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
  631. act |= ICE_SINGLE_ACT_PRUNE;
  632. act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
  633. }
  634. break;
  635. case ICE_SW_LKUP_ETHERTYPE_MAC:
  636. daddr = f_info->l_data.ethertype_mac.mac_addr;
  637. /* fall-through */
  638. case ICE_SW_LKUP_ETHERTYPE:
  639. off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
  640. *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
  641. break;
  642. case ICE_SW_LKUP_MAC_VLAN:
  643. daddr = f_info->l_data.mac_vlan.mac_addr;
  644. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  645. break;
  646. case ICE_SW_LKUP_PROMISC_VLAN:
  647. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  648. /* fall-through */
  649. case ICE_SW_LKUP_PROMISC:
  650. daddr = f_info->l_data.mac_vlan.mac_addr;
  651. break;
  652. default:
  653. break;
  654. }
  655. s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
  656. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
  657. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
  658. /* Recipe set depending on lookup type */
  659. s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
  660. s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
  661. s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
  662. if (daddr)
  663. ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
  664. if (!(vlan_id > ICE_MAX_VLAN_ID)) {
  665. off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
  666. *off = cpu_to_be16(vlan_id);
  667. }
  668. /* Create the switch rule with the final dummy Ethernet header */
  669. if (opc != ice_aqc_opc_update_sw_rules)
  670. s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
  671. }
  672. /**
  673. * ice_add_marker_act
  674. * @hw: pointer to the hardware structure
  675. * @m_ent: the management entry for which sw marker needs to be added
  676. * @sw_marker: sw marker to tag the Rx descriptor with
  677. * @l_id: large action resource id
  678. *
  679. * Create a large action to hold software marker and update the switch rule
  680. * entry pointed by m_ent with newly created large action
  681. */
  682. static enum ice_status
  683. ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
  684. u16 sw_marker, u16 l_id)
  685. {
  686. struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
  687. /* For software marker we need 3 large actions
  688. * 1. FWD action: FWD TO VSI or VSI LIST
  689. * 2. GENERIC VALUE action to hold the profile id
  690. * 3. GENERIC VALUE action to hold the software marker id
  691. */
  692. const u16 num_lg_acts = 3;
  693. enum ice_status status;
  694. u16 lg_act_size;
  695. u16 rules_size;
  696. u32 act;
  697. u16 id;
  698. if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
  699. return ICE_ERR_PARAM;
  700. /* Create two back-to-back switch rules and submit them to the HW using
  701. * one memory buffer:
  702. * 1. Large Action
  703. * 2. Look up tx rx
  704. */
  705. lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
  706. rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  707. lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
  708. if (!lg_act)
  709. return ICE_ERR_NO_MEMORY;
  710. rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
  711. /* Fill in the first switch rule i.e. large action */
  712. lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
  713. lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
  714. lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
  715. /* First action VSI forwarding or VSI list forwarding depending on how
  716. * many VSIs
  717. */
  718. id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
  719. m_ent->fltr_info.fwd_id.hw_vsi_id;
  720. act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
  721. act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
  722. ICE_LG_ACT_VSI_LIST_ID_M;
  723. if (m_ent->vsi_count > 1)
  724. act |= ICE_LG_ACT_VSI_LIST;
  725. lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
  726. /* Second action descriptor type */
  727. act = ICE_LG_ACT_GENERIC;
  728. act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
  729. lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
  730. act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
  731. ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
  732. /* Third action Marker value */
  733. act |= ICE_LG_ACT_GENERIC;
  734. act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
  735. ICE_LG_ACT_GENERIC_VALUE_M;
  736. lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
  737. /* call the fill switch rule to fill the lookup tx rx structure */
  738. ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
  739. ice_aqc_opc_update_sw_rules);
  740. /* Update the action to point to the large action id */
  741. rx_tx->pdata.lkup_tx_rx.act =
  742. cpu_to_le32(ICE_SINGLE_ACT_PTR |
  743. ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
  744. ICE_SINGLE_ACT_PTR_VAL_M));
  745. /* Use the filter rule id of the previously created rule with single
  746. * act. Once the update happens, hardware will treat this as large
  747. * action
  748. */
  749. rx_tx->pdata.lkup_tx_rx.index =
  750. cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
  751. status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
  752. ice_aqc_opc_update_sw_rules, NULL);
  753. if (!status) {
  754. m_ent->lg_act_idx = l_id;
  755. m_ent->sw_marker_id = sw_marker;
  756. }
  757. devm_kfree(ice_hw_to_dev(hw), lg_act);
  758. return status;
  759. }
  760. /**
  761. * ice_create_vsi_list_map
  762. * @hw: pointer to the hardware structure
  763. * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
  764. * @num_vsi: number of VSI handles in the array
  765. * @vsi_list_id: VSI list id generated as part of allocate resource
  766. *
  767. * Helper function to create a new entry of VSI list id to VSI mapping
  768. * using the given VSI list id
  769. */
  770. static struct ice_vsi_list_map_info *
  771. ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  772. u16 vsi_list_id)
  773. {
  774. struct ice_switch_info *sw = hw->switch_info;
  775. struct ice_vsi_list_map_info *v_map;
  776. int i;
  777. v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
  778. if (!v_map)
  779. return NULL;
  780. v_map->vsi_list_id = vsi_list_id;
  781. v_map->ref_cnt = 1;
  782. for (i = 0; i < num_vsi; i++)
  783. set_bit(vsi_handle_arr[i], v_map->vsi_map);
  784. list_add(&v_map->list_entry, &sw->vsi_list_map_head);
  785. return v_map;
  786. }
  787. /**
  788. * ice_update_vsi_list_rule
  789. * @hw: pointer to the hardware structure
  790. * @vsi_handle_arr: array of VSI handles to form a VSI list
  791. * @num_vsi: number of VSI handles in the array
  792. * @vsi_list_id: VSI list id generated as part of allocate resource
  793. * @remove: Boolean value to indicate if this is a remove action
  794. * @opc: switch rules population command type - pass in the command opcode
  795. * @lkup_type: lookup type of the filter
  796. *
  797. * Call AQ command to add a new switch rule or update existing switch rule
  798. * using the given VSI list id
  799. */
  800. static enum ice_status
  801. ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  802. u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
  803. enum ice_sw_lkup_type lkup_type)
  804. {
  805. struct ice_aqc_sw_rules_elem *s_rule;
  806. enum ice_status status;
  807. u16 s_rule_size;
  808. u16 type;
  809. int i;
  810. if (!num_vsi)
  811. return ICE_ERR_PARAM;
  812. if (lkup_type == ICE_SW_LKUP_MAC ||
  813. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  814. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  815. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  816. lkup_type == ICE_SW_LKUP_PROMISC ||
  817. lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
  818. type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
  819. ICE_AQC_SW_RULES_T_VSI_LIST_SET;
  820. else if (lkup_type == ICE_SW_LKUP_VLAN)
  821. type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
  822. ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
  823. else
  824. return ICE_ERR_PARAM;
  825. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
  826. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  827. if (!s_rule)
  828. return ICE_ERR_NO_MEMORY;
  829. for (i = 0; i < num_vsi; i++) {
  830. if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
  831. status = ICE_ERR_PARAM;
  832. goto exit;
  833. }
  834. /* AQ call requires hw_vsi_id(s) */
  835. s_rule->pdata.vsi_list.vsi[i] =
  836. cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
  837. }
  838. s_rule->type = cpu_to_le16(type);
  839. s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
  840. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  841. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
  842. exit:
  843. devm_kfree(ice_hw_to_dev(hw), s_rule);
  844. return status;
  845. }
  846. /**
  847. * ice_create_vsi_list_rule - Creates and populates a VSI list rule
  848. * @hw: pointer to the hw struct
  849. * @vsi_handle_arr: array of VSI handles to form a VSI list
  850. * @num_vsi: number of VSI handles in the array
  851. * @vsi_list_id: stores the ID of the VSI list to be created
  852. * @lkup_type: switch rule filter's lookup type
  853. */
  854. static enum ice_status
  855. ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  856. u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
  857. {
  858. enum ice_status status;
  859. status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
  860. ice_aqc_opc_alloc_res);
  861. if (status)
  862. return status;
  863. /* Update the newly created VSI list to include the specified VSIs */
  864. return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
  865. *vsi_list_id, false,
  866. ice_aqc_opc_add_sw_rules, lkup_type);
  867. }
  868. /**
  869. * ice_create_pkt_fwd_rule
  870. * @hw: pointer to the hardware structure
  871. * @f_entry: entry containing packet forwarding information
  872. *
  873. * Create switch rule with given filter information and add an entry
  874. * to the corresponding filter management list to track this switch rule
  875. * and VSI mapping
  876. */
  877. static enum ice_status
  878. ice_create_pkt_fwd_rule(struct ice_hw *hw,
  879. struct ice_fltr_list_entry *f_entry)
  880. {
  881. struct ice_fltr_mgmt_list_entry *fm_entry;
  882. struct ice_aqc_sw_rules_elem *s_rule;
  883. enum ice_sw_lkup_type l_type;
  884. struct ice_sw_recipe *recp;
  885. enum ice_status status;
  886. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  887. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  888. if (!s_rule)
  889. return ICE_ERR_NO_MEMORY;
  890. fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
  891. GFP_KERNEL);
  892. if (!fm_entry) {
  893. status = ICE_ERR_NO_MEMORY;
  894. goto ice_create_pkt_fwd_rule_exit;
  895. }
  896. fm_entry->fltr_info = f_entry->fltr_info;
  897. /* Initialize all the fields for the management entry */
  898. fm_entry->vsi_count = 1;
  899. fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
  900. fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
  901. fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
  902. ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
  903. ice_aqc_opc_add_sw_rules);
  904. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  905. ice_aqc_opc_add_sw_rules, NULL);
  906. if (status) {
  907. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  908. goto ice_create_pkt_fwd_rule_exit;
  909. }
  910. f_entry->fltr_info.fltr_rule_id =
  911. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  912. fm_entry->fltr_info.fltr_rule_id =
  913. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  914. /* The book keeping entries will get removed when base driver
  915. * calls remove filter AQ command
  916. */
  917. l_type = fm_entry->fltr_info.lkup_type;
  918. recp = &hw->switch_info->recp_list[l_type];
  919. list_add(&fm_entry->list_entry, &recp->filt_rules);
  920. ice_create_pkt_fwd_rule_exit:
  921. devm_kfree(ice_hw_to_dev(hw), s_rule);
  922. return status;
  923. }
  924. /**
  925. * ice_update_pkt_fwd_rule
  926. * @hw: pointer to the hardware structure
  927. * @f_info: filter information for switch rule
  928. *
  929. * Call AQ command to update a previously created switch rule with a
  930. * VSI list id
  931. */
  932. static enum ice_status
  933. ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
  934. {
  935. struct ice_aqc_sw_rules_elem *s_rule;
  936. enum ice_status status;
  937. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  938. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  939. if (!s_rule)
  940. return ICE_ERR_NO_MEMORY;
  941. ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
  942. s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
  943. /* Update switch rule with new rule set to forward VSI list */
  944. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  945. ice_aqc_opc_update_sw_rules, NULL);
  946. devm_kfree(ice_hw_to_dev(hw), s_rule);
  947. return status;
  948. }
  949. /**
  950. * ice_update_sw_rule_bridge_mode
  951. * @hw: pointer to the hw struct
  952. *
  953. * Updates unicast switch filter rules based on VEB/VEPA mode
  954. */
  955. enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
  956. {
  957. struct ice_switch_info *sw = hw->switch_info;
  958. struct ice_fltr_mgmt_list_entry *fm_entry;
  959. enum ice_status status = 0;
  960. struct list_head *rule_head;
  961. struct mutex *rule_lock; /* Lock to protect filter rule list */
  962. rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
  963. rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
  964. mutex_lock(rule_lock);
  965. list_for_each_entry(fm_entry, rule_head, list_entry) {
  966. struct ice_fltr_info *fi = &fm_entry->fltr_info;
  967. u8 *addr = fi->l_data.mac.mac_addr;
  968. /* Update unicast Tx rules to reflect the selected
  969. * VEB/VEPA mode
  970. */
  971. if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
  972. (fi->fltr_act == ICE_FWD_TO_VSI ||
  973. fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
  974. fi->fltr_act == ICE_FWD_TO_Q ||
  975. fi->fltr_act == ICE_FWD_TO_QGRP)) {
  976. status = ice_update_pkt_fwd_rule(hw, fi);
  977. if (status)
  978. break;
  979. }
  980. }
  981. mutex_unlock(rule_lock);
  982. return status;
  983. }
  984. /**
  985. * ice_add_update_vsi_list
  986. * @hw: pointer to the hardware structure
  987. * @m_entry: pointer to current filter management list entry
  988. * @cur_fltr: filter information from the book keeping entry
  989. * @new_fltr: filter information with the new VSI to be added
  990. *
  991. * Call AQ command to add or update previously created VSI list with new VSI.
  992. *
  993. * Helper function to do book keeping associated with adding filter information
  994. * The algorithm to do the booking keeping is described below :
  995. * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
  996. * if only one VSI has been added till now
  997. * Allocate a new VSI list and add two VSIs
  998. * to this list using switch rule command
  999. * Update the previously created switch rule with the
  1000. * newly created VSI list id
  1001. * if a VSI list was previously created
  1002. * Add the new VSI to the previously created VSI list set
  1003. * using the update switch rule command
  1004. */
  1005. static enum ice_status
  1006. ice_add_update_vsi_list(struct ice_hw *hw,
  1007. struct ice_fltr_mgmt_list_entry *m_entry,
  1008. struct ice_fltr_info *cur_fltr,
  1009. struct ice_fltr_info *new_fltr)
  1010. {
  1011. enum ice_status status = 0;
  1012. u16 vsi_list_id = 0;
  1013. if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
  1014. cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
  1015. return ICE_ERR_NOT_IMPL;
  1016. if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
  1017. new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
  1018. (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
  1019. cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
  1020. return ICE_ERR_NOT_IMPL;
  1021. if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
  1022. /* Only one entry existed in the mapping and it was not already
  1023. * a part of a VSI list. So, create a VSI list with the old and
  1024. * new VSIs.
  1025. */
  1026. struct ice_fltr_info tmp_fltr;
  1027. u16 vsi_handle_arr[2];
  1028. /* A rule already exists with the new VSI being added */
  1029. if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
  1030. return ICE_ERR_ALREADY_EXISTS;
  1031. vsi_handle_arr[0] = cur_fltr->vsi_handle;
  1032. vsi_handle_arr[1] = new_fltr->vsi_handle;
  1033. status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
  1034. &vsi_list_id,
  1035. new_fltr->lkup_type);
  1036. if (status)
  1037. return status;
  1038. tmp_fltr = *new_fltr;
  1039. tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
  1040. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  1041. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  1042. /* Update the previous switch rule of "MAC forward to VSI" to
  1043. * "MAC fwd to VSI list"
  1044. */
  1045. status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
  1046. if (status)
  1047. return status;
  1048. cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1049. cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1050. m_entry->vsi_list_info =
  1051. ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
  1052. vsi_list_id);
  1053. /* If this entry was large action then the large action needs
  1054. * to be updated to point to FWD to VSI list
  1055. */
  1056. if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
  1057. status =
  1058. ice_add_marker_act(hw, m_entry,
  1059. m_entry->sw_marker_id,
  1060. m_entry->lg_act_idx);
  1061. } else {
  1062. u16 vsi_handle = new_fltr->vsi_handle;
  1063. enum ice_adminq_opc opcode;
  1064. /* A rule already exists with the new VSI being added */
  1065. if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
  1066. return 0;
  1067. /* Update the previously created VSI list set with
  1068. * the new VSI id passed in
  1069. */
  1070. vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
  1071. opcode = ice_aqc_opc_update_sw_rules;
  1072. status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
  1073. vsi_list_id, false, opcode,
  1074. new_fltr->lkup_type);
  1075. /* update VSI list mapping info with new VSI id */
  1076. if (!status)
  1077. set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
  1078. }
  1079. if (!status)
  1080. m_entry->vsi_count++;
  1081. return status;
  1082. }
  1083. /**
  1084. * ice_find_rule_entry - Search a rule entry
  1085. * @hw: pointer to the hardware structure
  1086. * @recp_id: lookup type for which the specified rule needs to be searched
  1087. * @f_info: rule information
  1088. *
  1089. * Helper function to search for a given rule entry
  1090. * Returns pointer to entry storing the rule if found
  1091. */
  1092. static struct ice_fltr_mgmt_list_entry *
  1093. ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
  1094. {
  1095. struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
  1096. struct ice_switch_info *sw = hw->switch_info;
  1097. struct list_head *list_head;
  1098. list_head = &sw->recp_list[recp_id].filt_rules;
  1099. list_for_each_entry(list_itr, list_head, list_entry) {
  1100. if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
  1101. sizeof(f_info->l_data)) &&
  1102. f_info->flag == list_itr->fltr_info.flag) {
  1103. ret = list_itr;
  1104. break;
  1105. }
  1106. }
  1107. return ret;
  1108. }
  1109. /**
  1110. * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
  1111. * @hw: pointer to the hardware structure
  1112. * @recp_id: lookup type for which VSI lists needs to be searched
  1113. * @vsi_handle: VSI handle to be found in VSI list
  1114. * @vsi_list_id: VSI list id found containing vsi_handle
  1115. *
  1116. * Helper function to search a VSI list with single entry containing given VSI
  1117. * handle element. This can be extended further to search VSI list with more
  1118. * than 1 vsi_count. Returns pointer to VSI list entry if found.
  1119. */
  1120. static struct ice_vsi_list_map_info *
  1121. ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
  1122. u16 *vsi_list_id)
  1123. {
  1124. struct ice_vsi_list_map_info *map_info = NULL;
  1125. struct ice_switch_info *sw = hw->switch_info;
  1126. struct ice_fltr_mgmt_list_entry *list_itr;
  1127. struct list_head *list_head;
  1128. list_head = &sw->recp_list[recp_id].filt_rules;
  1129. list_for_each_entry(list_itr, list_head, list_entry) {
  1130. if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
  1131. map_info = list_itr->vsi_list_info;
  1132. if (test_bit(vsi_handle, map_info->vsi_map)) {
  1133. *vsi_list_id = map_info->vsi_list_id;
  1134. return map_info;
  1135. }
  1136. }
  1137. }
  1138. return NULL;
  1139. }
  1140. /**
  1141. * ice_add_rule_internal - add rule for a given lookup type
  1142. * @hw: pointer to the hardware structure
  1143. * @recp_id: lookup type (recipe id) for which rule has to be added
  1144. * @f_entry: structure containing MAC forwarding information
  1145. *
  1146. * Adds or updates the rule lists for a given recipe
  1147. */
  1148. static enum ice_status
  1149. ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
  1150. struct ice_fltr_list_entry *f_entry)
  1151. {
  1152. struct ice_switch_info *sw = hw->switch_info;
  1153. struct ice_fltr_info *new_fltr, *cur_fltr;
  1154. struct ice_fltr_mgmt_list_entry *m_entry;
  1155. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1156. enum ice_status status = 0;
  1157. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1158. return ICE_ERR_PARAM;
  1159. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1160. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1161. rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
  1162. mutex_lock(rule_lock);
  1163. new_fltr = &f_entry->fltr_info;
  1164. if (new_fltr->flag & ICE_FLTR_RX)
  1165. new_fltr->src = hw->port_info->lport;
  1166. else if (new_fltr->flag & ICE_FLTR_TX)
  1167. new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
  1168. m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
  1169. if (!m_entry) {
  1170. mutex_unlock(rule_lock);
  1171. return ice_create_pkt_fwd_rule(hw, f_entry);
  1172. }
  1173. cur_fltr = &m_entry->fltr_info;
  1174. status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
  1175. mutex_unlock(rule_lock);
  1176. return status;
  1177. }
  1178. /**
  1179. * ice_remove_vsi_list_rule
  1180. * @hw: pointer to the hardware structure
  1181. * @vsi_list_id: VSI list id generated as part of allocate resource
  1182. * @lkup_type: switch rule filter lookup type
  1183. *
  1184. * The VSI list should be emptied before this function is called to remove the
  1185. * VSI list.
  1186. */
  1187. static enum ice_status
  1188. ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
  1189. enum ice_sw_lkup_type lkup_type)
  1190. {
  1191. struct ice_aqc_sw_rules_elem *s_rule;
  1192. enum ice_status status;
  1193. u16 s_rule_size;
  1194. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
  1195. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1196. if (!s_rule)
  1197. return ICE_ERR_NO_MEMORY;
  1198. s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
  1199. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  1200. /* Free the vsi_list resource that we allocated. It is assumed that the
  1201. * list is empty at this point.
  1202. */
  1203. status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
  1204. ice_aqc_opc_free_res);
  1205. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1206. return status;
  1207. }
  1208. /**
  1209. * ice_rem_update_vsi_list
  1210. * @hw: pointer to the hardware structure
  1211. * @vsi_handle: VSI handle of the VSI to remove
  1212. * @fm_list: filter management entry for which the VSI list management needs to
  1213. * be done
  1214. */
  1215. static enum ice_status
  1216. ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
  1217. struct ice_fltr_mgmt_list_entry *fm_list)
  1218. {
  1219. enum ice_sw_lkup_type lkup_type;
  1220. enum ice_status status = 0;
  1221. u16 vsi_list_id;
  1222. if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
  1223. fm_list->vsi_count == 0)
  1224. return ICE_ERR_PARAM;
  1225. /* A rule with the VSI being removed does not exist */
  1226. if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
  1227. return ICE_ERR_DOES_NOT_EXIST;
  1228. lkup_type = fm_list->fltr_info.lkup_type;
  1229. vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
  1230. status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
  1231. ice_aqc_opc_update_sw_rules,
  1232. lkup_type);
  1233. if (status)
  1234. return status;
  1235. fm_list->vsi_count--;
  1236. clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
  1237. if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
  1238. (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
  1239. struct ice_vsi_list_map_info *vsi_list_info =
  1240. fm_list->vsi_list_info;
  1241. u16 rem_vsi_handle;
  1242. rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
  1243. ICE_MAX_VSI);
  1244. if (!ice_is_vsi_valid(hw, rem_vsi_handle))
  1245. return ICE_ERR_OUT_OF_RANGE;
  1246. status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
  1247. vsi_list_id, true,
  1248. ice_aqc_opc_update_sw_rules,
  1249. lkup_type);
  1250. if (status)
  1251. return status;
  1252. /* Remove the VSI list since it is no longer used */
  1253. status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
  1254. if (status)
  1255. return status;
  1256. /* Change the list entry action from VSI_LIST to VSI */
  1257. fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1258. fm_list->fltr_info.fwd_id.hw_vsi_id =
  1259. ice_get_hw_vsi_num(hw, rem_vsi_handle);
  1260. fm_list->fltr_info.vsi_handle = rem_vsi_handle;
  1261. list_del(&vsi_list_info->list_entry);
  1262. devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
  1263. fm_list->vsi_list_info = NULL;
  1264. }
  1265. return status;
  1266. }
  1267. /**
  1268. * ice_remove_rule_internal - Remove a filter rule of a given type
  1269. * @hw: pointer to the hardware structure
  1270. * @recp_id: recipe id for which the rule needs to removed
  1271. * @f_entry: rule entry containing filter information
  1272. */
  1273. static enum ice_status
  1274. ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
  1275. struct ice_fltr_list_entry *f_entry)
  1276. {
  1277. struct ice_switch_info *sw = hw->switch_info;
  1278. struct ice_fltr_mgmt_list_entry *list_elem;
  1279. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1280. enum ice_status status = 0;
  1281. bool remove_rule = false;
  1282. u16 vsi_handle;
  1283. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1284. return ICE_ERR_PARAM;
  1285. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1286. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1287. rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
  1288. mutex_lock(rule_lock);
  1289. list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
  1290. if (!list_elem) {
  1291. status = ICE_ERR_DOES_NOT_EXIST;
  1292. goto exit;
  1293. }
  1294. if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
  1295. remove_rule = true;
  1296. } else if (!list_elem->vsi_list_info) {
  1297. status = ICE_ERR_DOES_NOT_EXIST;
  1298. goto exit;
  1299. } else {
  1300. if (list_elem->vsi_list_info->ref_cnt > 1)
  1301. list_elem->vsi_list_info->ref_cnt--;
  1302. vsi_handle = f_entry->fltr_info.vsi_handle;
  1303. status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
  1304. if (status)
  1305. goto exit;
  1306. /* if vsi count goes to zero after updating the vsi list */
  1307. if (list_elem->vsi_count == 0)
  1308. remove_rule = true;
  1309. }
  1310. if (remove_rule) {
  1311. /* Remove the lookup rule */
  1312. struct ice_aqc_sw_rules_elem *s_rule;
  1313. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  1314. ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
  1315. GFP_KERNEL);
  1316. if (!s_rule) {
  1317. status = ICE_ERR_NO_MEMORY;
  1318. goto exit;
  1319. }
  1320. ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
  1321. ice_aqc_opc_remove_sw_rules);
  1322. status = ice_aq_sw_rules(hw, s_rule,
  1323. ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
  1324. ice_aqc_opc_remove_sw_rules, NULL);
  1325. if (status)
  1326. goto exit;
  1327. /* Remove a book keeping from the list */
  1328. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1329. list_del(&list_elem->list_entry);
  1330. devm_kfree(ice_hw_to_dev(hw), list_elem);
  1331. }
  1332. exit:
  1333. mutex_unlock(rule_lock);
  1334. return status;
  1335. }
  1336. /**
  1337. * ice_add_mac - Add a MAC address based filter rule
  1338. * @hw: pointer to the hardware structure
  1339. * @m_list: list of MAC addresses and forwarding information
  1340. *
  1341. * IMPORTANT: When the ucast_shared flag is set to false and m_list has
  1342. * multiple unicast addresses, the function assumes that all the
  1343. * addresses are unique in a given add_mac call. It doesn't
  1344. * check for duplicates in this case, removing duplicates from a given
  1345. * list should be taken care of in the caller of this function.
  1346. */
  1347. enum ice_status
  1348. ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
  1349. {
  1350. struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
  1351. struct ice_fltr_list_entry *m_list_itr;
  1352. struct list_head *rule_head;
  1353. u16 elem_sent, total_elem_left;
  1354. struct ice_switch_info *sw;
  1355. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1356. enum ice_status status = 0;
  1357. u16 num_unicast = 0;
  1358. u16 s_rule_size;
  1359. if (!m_list || !hw)
  1360. return ICE_ERR_PARAM;
  1361. s_rule = NULL;
  1362. sw = hw->switch_info;
  1363. rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
  1364. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1365. u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
  1366. u16 vsi_handle;
  1367. u16 hw_vsi_id;
  1368. m_list_itr->fltr_info.flag = ICE_FLTR_TX;
  1369. vsi_handle = m_list_itr->fltr_info.vsi_handle;
  1370. if (!ice_is_vsi_valid(hw, vsi_handle))
  1371. return ICE_ERR_PARAM;
  1372. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1373. m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
  1374. /* update the src in case it is vsi num */
  1375. if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
  1376. return ICE_ERR_PARAM;
  1377. m_list_itr->fltr_info.src = hw_vsi_id;
  1378. if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
  1379. is_zero_ether_addr(add))
  1380. return ICE_ERR_PARAM;
  1381. if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
  1382. /* Don't overwrite the unicast address */
  1383. mutex_lock(rule_lock);
  1384. if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
  1385. &m_list_itr->fltr_info)) {
  1386. mutex_unlock(rule_lock);
  1387. return ICE_ERR_ALREADY_EXISTS;
  1388. }
  1389. mutex_unlock(rule_lock);
  1390. num_unicast++;
  1391. } else if (is_multicast_ether_addr(add) ||
  1392. (is_unicast_ether_addr(add) && hw->ucast_shared)) {
  1393. m_list_itr->status =
  1394. ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
  1395. m_list_itr);
  1396. if (m_list_itr->status)
  1397. return m_list_itr->status;
  1398. }
  1399. }
  1400. mutex_lock(rule_lock);
  1401. /* Exit if no suitable entries were found for adding bulk switch rule */
  1402. if (!num_unicast) {
  1403. status = 0;
  1404. goto ice_add_mac_exit;
  1405. }
  1406. rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
  1407. /* Allocate switch rule buffer for the bulk update for unicast */
  1408. s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  1409. s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
  1410. GFP_KERNEL);
  1411. if (!s_rule) {
  1412. status = ICE_ERR_NO_MEMORY;
  1413. goto ice_add_mac_exit;
  1414. }
  1415. r_iter = s_rule;
  1416. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1417. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1418. u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
  1419. if (is_unicast_ether_addr(mac_addr)) {
  1420. ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
  1421. ice_aqc_opc_add_sw_rules);
  1422. r_iter = (struct ice_aqc_sw_rules_elem *)
  1423. ((u8 *)r_iter + s_rule_size);
  1424. }
  1425. }
  1426. /* Call AQ bulk switch rule update for all unicast addresses */
  1427. r_iter = s_rule;
  1428. /* Call AQ switch rule in AQ_MAX chunk */
  1429. for (total_elem_left = num_unicast; total_elem_left > 0;
  1430. total_elem_left -= elem_sent) {
  1431. struct ice_aqc_sw_rules_elem *entry = r_iter;
  1432. elem_sent = min(total_elem_left,
  1433. (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
  1434. status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
  1435. elem_sent, ice_aqc_opc_add_sw_rules,
  1436. NULL);
  1437. if (status)
  1438. goto ice_add_mac_exit;
  1439. r_iter = (struct ice_aqc_sw_rules_elem *)
  1440. ((u8 *)r_iter + (elem_sent * s_rule_size));
  1441. }
  1442. /* Fill up rule id based on the value returned from FW */
  1443. r_iter = s_rule;
  1444. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1445. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1446. u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
  1447. struct ice_fltr_mgmt_list_entry *fm_entry;
  1448. if (is_unicast_ether_addr(mac_addr)) {
  1449. f_info->fltr_rule_id =
  1450. le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
  1451. f_info->fltr_act = ICE_FWD_TO_VSI;
  1452. /* Create an entry to track this MAC address */
  1453. fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
  1454. sizeof(*fm_entry), GFP_KERNEL);
  1455. if (!fm_entry) {
  1456. status = ICE_ERR_NO_MEMORY;
  1457. goto ice_add_mac_exit;
  1458. }
  1459. fm_entry->fltr_info = *f_info;
  1460. fm_entry->vsi_count = 1;
  1461. /* The book keeping entries will get removed when
  1462. * base driver calls remove filter AQ command
  1463. */
  1464. list_add(&fm_entry->list_entry, rule_head);
  1465. r_iter = (struct ice_aqc_sw_rules_elem *)
  1466. ((u8 *)r_iter + s_rule_size);
  1467. }
  1468. }
  1469. ice_add_mac_exit:
  1470. mutex_unlock(rule_lock);
  1471. if (s_rule)
  1472. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1473. return status;
  1474. }
  1475. /**
  1476. * ice_add_vlan_internal - Add one VLAN based filter rule
  1477. * @hw: pointer to the hardware structure
  1478. * @f_entry: filter entry containing one VLAN information
  1479. */
  1480. static enum ice_status
  1481. ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  1482. {
  1483. struct ice_switch_info *sw = hw->switch_info;
  1484. struct ice_fltr_mgmt_list_entry *v_list_itr;
  1485. struct ice_fltr_info *new_fltr, *cur_fltr;
  1486. enum ice_sw_lkup_type lkup_type;
  1487. u16 vsi_list_id = 0, vsi_handle;
  1488. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1489. enum ice_status status = 0;
  1490. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1491. return ICE_ERR_PARAM;
  1492. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1493. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1494. new_fltr = &f_entry->fltr_info;
  1495. /* VLAN id should only be 12 bits */
  1496. if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
  1497. return ICE_ERR_PARAM;
  1498. if (new_fltr->src_id != ICE_SRC_ID_VSI)
  1499. return ICE_ERR_PARAM;
  1500. new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
  1501. lkup_type = new_fltr->lkup_type;
  1502. vsi_handle = new_fltr->vsi_handle;
  1503. rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
  1504. mutex_lock(rule_lock);
  1505. v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
  1506. if (!v_list_itr) {
  1507. struct ice_vsi_list_map_info *map_info = NULL;
  1508. if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
  1509. /* All VLAN pruning rules use a VSI list. Check if
  1510. * there is already a VSI list containing VSI that we
  1511. * want to add. If found, use the same vsi_list_id for
  1512. * this new VLAN rule or else create a new list.
  1513. */
  1514. map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
  1515. vsi_handle,
  1516. &vsi_list_id);
  1517. if (!map_info) {
  1518. status = ice_create_vsi_list_rule(hw,
  1519. &vsi_handle,
  1520. 1,
  1521. &vsi_list_id,
  1522. lkup_type);
  1523. if (status)
  1524. goto exit;
  1525. }
  1526. /* Convert the action to forwarding to a VSI list. */
  1527. new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1528. new_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1529. }
  1530. status = ice_create_pkt_fwd_rule(hw, f_entry);
  1531. if (!status) {
  1532. v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
  1533. new_fltr);
  1534. if (!v_list_itr) {
  1535. status = ICE_ERR_DOES_NOT_EXIST;
  1536. goto exit;
  1537. }
  1538. /* reuse VSI list for new rule and increment ref_cnt */
  1539. if (map_info) {
  1540. v_list_itr->vsi_list_info = map_info;
  1541. map_info->ref_cnt++;
  1542. } else {
  1543. v_list_itr->vsi_list_info =
  1544. ice_create_vsi_list_map(hw, &vsi_handle,
  1545. 1, vsi_list_id);
  1546. }
  1547. }
  1548. } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
  1549. /* Update existing VSI list to add new VSI id only if it used
  1550. * by one VLAN rule.
  1551. */
  1552. cur_fltr = &v_list_itr->fltr_info;
  1553. status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
  1554. new_fltr);
  1555. } else {
  1556. /* If VLAN rule exists and VSI list being used by this rule is
  1557. * referenced by more than 1 VLAN rule. Then create a new VSI
  1558. * list appending previous VSI with new VSI and update existing
  1559. * VLAN rule to point to new VSI list id
  1560. */
  1561. struct ice_fltr_info tmp_fltr;
  1562. u16 vsi_handle_arr[2];
  1563. u16 cur_handle;
  1564. /* Current implementation only supports reusing VSI list with
  1565. * one VSI count. We should never hit below condition
  1566. */
  1567. if (v_list_itr->vsi_count > 1 &&
  1568. v_list_itr->vsi_list_info->ref_cnt > 1) {
  1569. ice_debug(hw, ICE_DBG_SW,
  1570. "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
  1571. status = ICE_ERR_CFG;
  1572. goto exit;
  1573. }
  1574. cur_handle =
  1575. find_first_bit(v_list_itr->vsi_list_info->vsi_map,
  1576. ICE_MAX_VSI);
  1577. /* A rule already exists with the new VSI being added */
  1578. if (cur_handle == vsi_handle) {
  1579. status = ICE_ERR_ALREADY_EXISTS;
  1580. goto exit;
  1581. }
  1582. vsi_handle_arr[0] = cur_handle;
  1583. vsi_handle_arr[1] = vsi_handle;
  1584. status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
  1585. &vsi_list_id, lkup_type);
  1586. if (status)
  1587. goto exit;
  1588. tmp_fltr = v_list_itr->fltr_info;
  1589. tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
  1590. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  1591. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  1592. /* Update the previous switch rule to a new VSI list which
  1593. * includes current VSI thats requested
  1594. */
  1595. status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
  1596. if (status)
  1597. goto exit;
  1598. /* before overriding VSI list map info. decrement ref_cnt of
  1599. * previous VSI list
  1600. */
  1601. v_list_itr->vsi_list_info->ref_cnt--;
  1602. /* now update to newly created list */
  1603. v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
  1604. v_list_itr->vsi_list_info =
  1605. ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
  1606. vsi_list_id);
  1607. v_list_itr->vsi_count++;
  1608. }
  1609. exit:
  1610. mutex_unlock(rule_lock);
  1611. return status;
  1612. }
  1613. /**
  1614. * ice_add_vlan - Add VLAN based filter rule
  1615. * @hw: pointer to the hardware structure
  1616. * @v_list: list of VLAN entries and forwarding information
  1617. */
  1618. enum ice_status
  1619. ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
  1620. {
  1621. struct ice_fltr_list_entry *v_list_itr;
  1622. if (!v_list || !hw)
  1623. return ICE_ERR_PARAM;
  1624. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1625. if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
  1626. return ICE_ERR_PARAM;
  1627. v_list_itr->fltr_info.flag = ICE_FLTR_TX;
  1628. v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
  1629. if (v_list_itr->status)
  1630. return v_list_itr->status;
  1631. }
  1632. return 0;
  1633. }
  1634. /**
  1635. * ice_rem_sw_rule_info
  1636. * @hw: pointer to the hardware structure
  1637. * @rule_head: pointer to the switch list structure that we want to delete
  1638. */
  1639. static void
  1640. ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
  1641. {
  1642. if (!list_empty(rule_head)) {
  1643. struct ice_fltr_mgmt_list_entry *entry;
  1644. struct ice_fltr_mgmt_list_entry *tmp;
  1645. list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
  1646. list_del(&entry->list_entry);
  1647. devm_kfree(ice_hw_to_dev(hw), entry);
  1648. }
  1649. }
  1650. }
  1651. /**
  1652. * ice_cfg_dflt_vsi - change state of VSI to set/clear default
  1653. * @hw: pointer to the hardware structure
  1654. * @vsi_handle: VSI handle to set as default
  1655. * @set: true to add the above mentioned switch rule, false to remove it
  1656. * @direction: ICE_FLTR_RX or ICE_FLTR_TX
  1657. *
  1658. * add filter rule to set/unset given VSI as default VSI for the switch
  1659. * (represented by swid)
  1660. */
  1661. enum ice_status
  1662. ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
  1663. {
  1664. struct ice_aqc_sw_rules_elem *s_rule;
  1665. struct ice_fltr_info f_info;
  1666. enum ice_adminq_opc opcode;
  1667. enum ice_status status;
  1668. u16 s_rule_size;
  1669. u16 hw_vsi_id;
  1670. if (!ice_is_vsi_valid(hw, vsi_handle))
  1671. return ICE_ERR_PARAM;
  1672. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1673. s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
  1674. ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
  1675. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1676. if (!s_rule)
  1677. return ICE_ERR_NO_MEMORY;
  1678. memset(&f_info, 0, sizeof(f_info));
  1679. f_info.lkup_type = ICE_SW_LKUP_DFLT;
  1680. f_info.flag = direction;
  1681. f_info.fltr_act = ICE_FWD_TO_VSI;
  1682. f_info.fwd_id.hw_vsi_id = hw_vsi_id;
  1683. if (f_info.flag & ICE_FLTR_RX) {
  1684. f_info.src = hw->port_info->lport;
  1685. f_info.src_id = ICE_SRC_ID_LPORT;
  1686. if (!set)
  1687. f_info.fltr_rule_id =
  1688. hw->port_info->dflt_rx_vsi_rule_id;
  1689. } else if (f_info.flag & ICE_FLTR_TX) {
  1690. f_info.src_id = ICE_SRC_ID_VSI;
  1691. f_info.src = hw_vsi_id;
  1692. if (!set)
  1693. f_info.fltr_rule_id =
  1694. hw->port_info->dflt_tx_vsi_rule_id;
  1695. }
  1696. if (set)
  1697. opcode = ice_aqc_opc_add_sw_rules;
  1698. else
  1699. opcode = ice_aqc_opc_remove_sw_rules;
  1700. ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
  1701. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
  1702. if (status || !(f_info.flag & ICE_FLTR_TX_RX))
  1703. goto out;
  1704. if (set) {
  1705. u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  1706. if (f_info.flag & ICE_FLTR_TX) {
  1707. hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
  1708. hw->port_info->dflt_tx_vsi_rule_id = index;
  1709. } else if (f_info.flag & ICE_FLTR_RX) {
  1710. hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
  1711. hw->port_info->dflt_rx_vsi_rule_id = index;
  1712. }
  1713. } else {
  1714. if (f_info.flag & ICE_FLTR_TX) {
  1715. hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  1716. hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
  1717. } else if (f_info.flag & ICE_FLTR_RX) {
  1718. hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  1719. hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
  1720. }
  1721. }
  1722. out:
  1723. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1724. return status;
  1725. }
  1726. /**
  1727. * ice_remove_mac - remove a MAC address based filter rule
  1728. * @hw: pointer to the hardware structure
  1729. * @m_list: list of MAC addresses and forwarding information
  1730. *
  1731. * This function removes either a MAC filter rule or a specific VSI from a
  1732. * VSI list for a multicast MAC address.
  1733. *
  1734. * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
  1735. * ice_add_mac. Caller should be aware that this call will only work if all
  1736. * the entries passed into m_list were added previously. It will not attempt to
  1737. * do a partial remove of entries that were found.
  1738. */
  1739. enum ice_status
  1740. ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
  1741. {
  1742. struct ice_fltr_list_entry *list_itr;
  1743. if (!m_list)
  1744. return ICE_ERR_PARAM;
  1745. list_for_each_entry(list_itr, m_list, list_entry) {
  1746. enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
  1747. if (l_type != ICE_SW_LKUP_MAC)
  1748. return ICE_ERR_PARAM;
  1749. list_itr->status = ice_remove_rule_internal(hw,
  1750. ICE_SW_LKUP_MAC,
  1751. list_itr);
  1752. if (list_itr->status)
  1753. return list_itr->status;
  1754. }
  1755. return 0;
  1756. }
  1757. /**
  1758. * ice_remove_vlan - Remove VLAN based filter rule
  1759. * @hw: pointer to the hardware structure
  1760. * @v_list: list of VLAN entries and forwarding information
  1761. */
  1762. enum ice_status
  1763. ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
  1764. {
  1765. struct ice_fltr_list_entry *v_list_itr;
  1766. if (!v_list || !hw)
  1767. return ICE_ERR_PARAM;
  1768. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1769. enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
  1770. if (l_type != ICE_SW_LKUP_VLAN)
  1771. return ICE_ERR_PARAM;
  1772. v_list_itr->status = ice_remove_rule_internal(hw,
  1773. ICE_SW_LKUP_VLAN,
  1774. v_list_itr);
  1775. if (v_list_itr->status)
  1776. return v_list_itr->status;
  1777. }
  1778. return 0;
  1779. }
  1780. /**
  1781. * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
  1782. * @fm_entry: filter entry to inspect
  1783. * @vsi_handle: VSI handle to compare with filter info
  1784. */
  1785. static bool
  1786. ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
  1787. {
  1788. return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
  1789. fm_entry->fltr_info.vsi_handle == vsi_handle) ||
  1790. (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
  1791. (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
  1792. }
  1793. /**
  1794. * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
  1795. * @hw: pointer to the hardware structure
  1796. * @vsi_handle: VSI handle to remove filters from
  1797. * @vsi_list_head: pointer to the list to add entry to
  1798. * @fi: pointer to fltr_info of filter entry to copy & add
  1799. *
  1800. * Helper function, used when creating a list of filters to remove from
  1801. * a specific VSI. The entry added to vsi_list_head is a COPY of the
  1802. * original filter entry, with the exception of fltr_info.fltr_act and
  1803. * fltr_info.fwd_id fields. These are set such that later logic can
  1804. * extract which VSI to remove the fltr from, and pass on that information.
  1805. */
  1806. static enum ice_status
  1807. ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
  1808. struct list_head *vsi_list_head,
  1809. struct ice_fltr_info *fi)
  1810. {
  1811. struct ice_fltr_list_entry *tmp;
  1812. /* this memory is freed up in the caller function
  1813. * once filters for this VSI are removed
  1814. */
  1815. tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
  1816. if (!tmp)
  1817. return ICE_ERR_NO_MEMORY;
  1818. tmp->fltr_info = *fi;
  1819. /* Overwrite these fields to indicate which VSI to remove filter from,
  1820. * so find and remove logic can extract the information from the
  1821. * list entries. Note that original entries will still have proper
  1822. * values.
  1823. */
  1824. tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1825. tmp->fltr_info.vsi_handle = vsi_handle;
  1826. tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1827. list_add(&tmp->list_entry, vsi_list_head);
  1828. return 0;
  1829. }
  1830. /**
  1831. * ice_add_to_vsi_fltr_list - Add VSI filters to the list
  1832. * @hw: pointer to the hardware structure
  1833. * @vsi_handle: VSI handle to remove filters from
  1834. * @lkup_list_head: pointer to the list that has certain lookup type filters
  1835. * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
  1836. *
  1837. * Locates all filters in lkup_list_head that are used by the given VSI,
  1838. * and adds COPIES of those entries to vsi_list_head (intended to be used
  1839. * to remove the listed filters).
  1840. * Note that this means all entries in vsi_list_head must be explicitly
  1841. * deallocated by the caller when done with list.
  1842. */
  1843. static enum ice_status
  1844. ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
  1845. struct list_head *lkup_list_head,
  1846. struct list_head *vsi_list_head)
  1847. {
  1848. struct ice_fltr_mgmt_list_entry *fm_entry;
  1849. enum ice_status status = 0;
  1850. /* check to make sure VSI id is valid and within boundary */
  1851. if (!ice_is_vsi_valid(hw, vsi_handle))
  1852. return ICE_ERR_PARAM;
  1853. list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
  1854. struct ice_fltr_info *fi;
  1855. fi = &fm_entry->fltr_info;
  1856. if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
  1857. continue;
  1858. status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
  1859. vsi_list_head, fi);
  1860. if (status)
  1861. return status;
  1862. }
  1863. return status;
  1864. }
  1865. /**
  1866. * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
  1867. * @hw: pointer to the hardware structure
  1868. * @vsi_handle: VSI handle to remove filters from
  1869. * @lkup: switch rule filter lookup type
  1870. */
  1871. static void
  1872. ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
  1873. enum ice_sw_lkup_type lkup)
  1874. {
  1875. struct ice_switch_info *sw = hw->switch_info;
  1876. struct ice_fltr_list_entry *fm_entry;
  1877. struct list_head remove_list_head;
  1878. struct list_head *rule_head;
  1879. struct ice_fltr_list_entry *tmp;
  1880. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1881. enum ice_status status;
  1882. INIT_LIST_HEAD(&remove_list_head);
  1883. rule_lock = &sw->recp_list[lkup].filt_rule_lock;
  1884. rule_head = &sw->recp_list[lkup].filt_rules;
  1885. mutex_lock(rule_lock);
  1886. status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
  1887. &remove_list_head);
  1888. mutex_unlock(rule_lock);
  1889. if (status)
  1890. return;
  1891. switch (lkup) {
  1892. case ICE_SW_LKUP_MAC:
  1893. ice_remove_mac(hw, &remove_list_head);
  1894. break;
  1895. case ICE_SW_LKUP_VLAN:
  1896. ice_remove_vlan(hw, &remove_list_head);
  1897. break;
  1898. case ICE_SW_LKUP_MAC_VLAN:
  1899. case ICE_SW_LKUP_ETHERTYPE:
  1900. case ICE_SW_LKUP_ETHERTYPE_MAC:
  1901. case ICE_SW_LKUP_PROMISC:
  1902. case ICE_SW_LKUP_DFLT:
  1903. case ICE_SW_LKUP_PROMISC_VLAN:
  1904. case ICE_SW_LKUP_LAST:
  1905. default:
  1906. ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
  1907. break;
  1908. }
  1909. list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
  1910. list_del(&fm_entry->list_entry);
  1911. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  1912. }
  1913. }
  1914. /**
  1915. * ice_remove_vsi_fltr - Remove all filters for a VSI
  1916. * @hw: pointer to the hardware structure
  1917. * @vsi_handle: VSI handle to remove filters from
  1918. */
  1919. void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
  1920. {
  1921. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
  1922. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
  1923. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
  1924. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
  1925. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
  1926. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
  1927. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
  1928. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
  1929. }
  1930. /**
  1931. * ice_replay_vsi_fltr - Replay filters for requested VSI
  1932. * @hw: pointer to the hardware structure
  1933. * @vsi_handle: driver VSI handle
  1934. * @recp_id: Recipe id for which rules need to be replayed
  1935. * @list_head: list for which filters need to be replayed
  1936. *
  1937. * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
  1938. * It is required to pass valid VSI handle.
  1939. */
  1940. static enum ice_status
  1941. ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
  1942. struct list_head *list_head)
  1943. {
  1944. struct ice_fltr_mgmt_list_entry *itr;
  1945. enum ice_status status = 0;
  1946. u16 hw_vsi_id;
  1947. if (list_empty(list_head))
  1948. return status;
  1949. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1950. list_for_each_entry(itr, list_head, list_entry) {
  1951. struct ice_fltr_list_entry f_entry;
  1952. f_entry.fltr_info = itr->fltr_info;
  1953. if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
  1954. itr->fltr_info.vsi_handle == vsi_handle) {
  1955. /* update the src in case it is vsi num */
  1956. if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
  1957. f_entry.fltr_info.src = hw_vsi_id;
  1958. status = ice_add_rule_internal(hw, recp_id, &f_entry);
  1959. if (status)
  1960. goto end;
  1961. continue;
  1962. }
  1963. if (!test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
  1964. continue;
  1965. /* Clearing it so that the logic can add it back */
  1966. clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
  1967. f_entry.fltr_info.vsi_handle = vsi_handle;
  1968. f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1969. /* update the src in case it is vsi num */
  1970. if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
  1971. f_entry.fltr_info.src = hw_vsi_id;
  1972. if (recp_id == ICE_SW_LKUP_VLAN)
  1973. status = ice_add_vlan_internal(hw, &f_entry);
  1974. else
  1975. status = ice_add_rule_internal(hw, recp_id, &f_entry);
  1976. if (status)
  1977. goto end;
  1978. }
  1979. end:
  1980. return status;
  1981. }
  1982. /**
  1983. * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
  1984. * @hw: pointer to the hardware structure
  1985. * @vsi_handle: driver VSI handle
  1986. *
  1987. * Replays filters for requested VSI via vsi_handle.
  1988. */
  1989. enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
  1990. {
  1991. struct ice_switch_info *sw = hw->switch_info;
  1992. enum ice_status status = 0;
  1993. u8 i;
  1994. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  1995. struct list_head *head;
  1996. head = &sw->recp_list[i].filt_replay_rules;
  1997. status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
  1998. if (status)
  1999. return status;
  2000. }
  2001. return status;
  2002. }
  2003. /**
  2004. * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
  2005. * @hw: pointer to the hw struct
  2006. *
  2007. * Deletes the filter replay rules.
  2008. */
  2009. void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
  2010. {
  2011. struct ice_switch_info *sw = hw->switch_info;
  2012. u8 i;
  2013. if (!sw)
  2014. return;
  2015. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  2016. if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
  2017. struct list_head *l_head;
  2018. l_head = &sw->recp_list[i].filt_replay_rules;
  2019. ice_rem_sw_rule_info(hw, l_head);
  2020. }
  2021. }
  2022. }