ice_switch.c 68 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_switch.h"
  4. #define ICE_ETH_DA_OFFSET 0
  5. #define ICE_ETH_ETHTYPE_OFFSET 12
  6. #define ICE_ETH_VLAN_TCI_OFFSET 14
  7. #define ICE_MAX_VLAN_ID 0xFFF
  8. /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
  9. * struct to configure any switch filter rules.
  10. * {DA (6 bytes), SA(6 bytes),
  11. * Ether type (2 bytes for header without VLAN tag) OR
  12. * VLAN tag (4 bytes for header with VLAN tag) }
  13. *
  14. * Word on Hardcoded values
  15. * byte 0 = 0x2: to identify it as locally administered DA MAC
  16. * byte 6 = 0x2: to identify it as locally administered SA MAC
  17. * byte 12 = 0x81 & byte 13 = 0x00:
  18. * In case of VLAN filter first two bytes defines ether type (0x8100)
  19. * and remaining two bytes are placeholder for programming a given VLAN id
  20. * In case of Ether type filter it is treated as header without VLAN tag
  21. * and byte 12 and 13 is used to program a given Ether type instead
  22. */
  23. #define DUMMY_ETH_HDR_LEN 16
  24. static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
  25. 0x2, 0, 0, 0, 0, 0,
  26. 0x81, 0, 0, 0};
  27. #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
  28. (sizeof(struct ice_aqc_sw_rules_elem) - \
  29. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  30. sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
  31. #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
  32. (sizeof(struct ice_aqc_sw_rules_elem) - \
  33. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  34. sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
  35. #define ICE_SW_RULE_LG_ACT_SIZE(n) \
  36. (sizeof(struct ice_aqc_sw_rules_elem) - \
  37. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  38. sizeof(struct ice_sw_rule_lg_act) - \
  39. sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
  40. ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
  41. #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
  42. (sizeof(struct ice_aqc_sw_rules_elem) - \
  43. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  44. sizeof(struct ice_sw_rule_vsi_list) - \
  45. sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
  46. ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
  47. /**
  48. * ice_aq_alloc_free_res - command to allocate/free resources
  49. * @hw: pointer to the hw struct
  50. * @num_entries: number of resource entries in buffer
  51. * @buf: Indirect buffer to hold data parameters and response
  52. * @buf_size: size of buffer for indirect commands
  53. * @opc: pass in the command opcode
  54. * @cd: pointer to command details structure or NULL
  55. *
  56. * Helper function to allocate/free resources using the admin queue commands
  57. */
  58. static enum ice_status
  59. ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
  60. struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
  61. enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  62. {
  63. struct ice_aqc_alloc_free_res_cmd *cmd;
  64. struct ice_aq_desc desc;
  65. cmd = &desc.params.sw_res_ctrl;
  66. if (!buf)
  67. return ICE_ERR_PARAM;
  68. if (buf_size < (num_entries * sizeof(buf->elem[0])))
  69. return ICE_ERR_PARAM;
  70. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  71. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  72. cmd->num_entries = cpu_to_le16(num_entries);
  73. return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  74. }
  75. /**
  76. * ice_init_def_sw_recp - initialize the recipe book keeping tables
  77. * @hw: pointer to the hw struct
  78. *
  79. * Allocate memory for the entire recipe table and initialize the structures/
  80. * entries corresponding to basic recipes.
  81. */
  82. enum ice_status
  83. ice_init_def_sw_recp(struct ice_hw *hw)
  84. {
  85. struct ice_sw_recipe *recps;
  86. u8 i;
  87. recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
  88. sizeof(struct ice_sw_recipe), GFP_KERNEL);
  89. if (!recps)
  90. return ICE_ERR_NO_MEMORY;
  91. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  92. recps[i].root_rid = i;
  93. INIT_LIST_HEAD(&recps[i].filt_rules);
  94. mutex_init(&recps[i].filt_rule_lock);
  95. }
  96. hw->switch_info->recp_list = recps;
  97. return 0;
  98. }
  99. /**
  100. * ice_aq_get_sw_cfg - get switch configuration
  101. * @hw: pointer to the hardware structure
  102. * @buf: pointer to the result buffer
  103. * @buf_size: length of the buffer available for response
  104. * @req_desc: pointer to requested descriptor
  105. * @num_elems: pointer to number of elements
  106. * @cd: pointer to command details structure or NULL
  107. *
  108. * Get switch configuration (0x0200) to be placed in 'buff'.
  109. * This admin command returns information such as initial VSI/port number
  110. * and switch ID it belongs to.
  111. *
  112. * NOTE: *req_desc is both an input/output parameter.
  113. * The caller of this function first calls this function with *request_desc set
  114. * to 0. If the response from f/w has *req_desc set to 0, all the switch
  115. * configuration information has been returned; if non-zero (meaning not all
  116. * the information was returned), the caller should call this function again
  117. * with *req_desc set to the previous value returned by f/w to get the
  118. * next block of switch configuration information.
  119. *
  120. * *num_elems is output only parameter. This reflects the number of elements
  121. * in response buffer. The caller of this function to use *num_elems while
  122. * parsing the response buffer.
  123. */
  124. static enum ice_status
  125. ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
  126. u16 buf_size, u16 *req_desc, u16 *num_elems,
  127. struct ice_sq_cd *cd)
  128. {
  129. struct ice_aqc_get_sw_cfg *cmd;
  130. enum ice_status status;
  131. struct ice_aq_desc desc;
  132. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
  133. cmd = &desc.params.get_sw_conf;
  134. cmd->element = cpu_to_le16(*req_desc);
  135. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  136. if (!status) {
  137. *req_desc = le16_to_cpu(cmd->element);
  138. *num_elems = le16_to_cpu(cmd->num_elems);
  139. }
  140. return status;
  141. }
  142. /**
  143. * ice_aq_add_vsi
  144. * @hw: pointer to the hw struct
  145. * @vsi_ctx: pointer to a VSI context struct
  146. * @cd: pointer to command details structure or NULL
  147. *
  148. * Add a VSI context to the hardware (0x0210)
  149. */
  150. static enum ice_status
  151. ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  152. struct ice_sq_cd *cd)
  153. {
  154. struct ice_aqc_add_update_free_vsi_resp *res;
  155. struct ice_aqc_add_get_update_free_vsi *cmd;
  156. struct ice_aq_desc desc;
  157. enum ice_status status;
  158. cmd = &desc.params.vsi_cmd;
  159. res = &desc.params.add_update_free_vsi_res;
  160. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
  161. if (!vsi_ctx->alloc_from_pool)
  162. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
  163. ICE_AQ_VSI_IS_VALID);
  164. cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
  165. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  166. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  167. sizeof(vsi_ctx->info), cd);
  168. if (!status) {
  169. vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
  170. vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
  171. vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
  172. }
  173. return status;
  174. }
  175. /**
  176. * ice_aq_free_vsi
  177. * @hw: pointer to the hw struct
  178. * @vsi_ctx: pointer to a VSI context struct
  179. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  180. * @cd: pointer to command details structure or NULL
  181. *
  182. * Free VSI context info from hardware (0x0213)
  183. */
  184. static enum ice_status
  185. ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  186. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  187. {
  188. struct ice_aqc_add_update_free_vsi_resp *resp;
  189. struct ice_aqc_add_get_update_free_vsi *cmd;
  190. struct ice_aq_desc desc;
  191. enum ice_status status;
  192. cmd = &desc.params.vsi_cmd;
  193. resp = &desc.params.add_update_free_vsi_res;
  194. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
  195. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  196. if (keep_vsi_alloc)
  197. cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
  198. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  199. if (!status) {
  200. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  201. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  202. }
  203. return status;
  204. }
  205. /**
  206. * ice_aq_update_vsi
  207. * @hw: pointer to the hw struct
  208. * @vsi_ctx: pointer to a VSI context struct
  209. * @cd: pointer to command details structure or NULL
  210. *
  211. * Update VSI context in the hardware (0x0211)
  212. */
  213. static enum ice_status
  214. ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  215. struct ice_sq_cd *cd)
  216. {
  217. struct ice_aqc_add_update_free_vsi_resp *resp;
  218. struct ice_aqc_add_get_update_free_vsi *cmd;
  219. struct ice_aq_desc desc;
  220. enum ice_status status;
  221. cmd = &desc.params.vsi_cmd;
  222. resp = &desc.params.add_update_free_vsi_res;
  223. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
  224. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  225. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  226. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  227. sizeof(vsi_ctx->info), cd);
  228. if (!status) {
  229. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  230. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  231. }
  232. return status;
  233. }
  234. /**
  235. * ice_is_vsi_valid - check whether the VSI is valid or not
  236. * @hw: pointer to the hw struct
  237. * @vsi_handle: VSI handle
  238. *
  239. * check whether the VSI is valid or not
  240. */
  241. bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
  242. {
  243. return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
  244. }
  245. /**
  246. * ice_get_hw_vsi_num - return the hw VSI number
  247. * @hw: pointer to the hw struct
  248. * @vsi_handle: VSI handle
  249. *
  250. * return the hw VSI number
  251. * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
  252. */
  253. u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
  254. {
  255. return hw->vsi_ctx[vsi_handle]->vsi_num;
  256. }
  257. /**
  258. * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
  259. * @hw: pointer to the hw struct
  260. * @vsi_handle: VSI handle
  261. *
  262. * return the VSI context entry for a given VSI handle
  263. */
  264. struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
  265. {
  266. return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
  267. }
  268. /**
  269. * ice_save_vsi_ctx - save the VSI context for a given VSI handle
  270. * @hw: pointer to the hw struct
  271. * @vsi_handle: VSI handle
  272. * @vsi: VSI context pointer
  273. *
  274. * save the VSI context entry for a given VSI handle
  275. */
  276. static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
  277. struct ice_vsi_ctx *vsi)
  278. {
  279. hw->vsi_ctx[vsi_handle] = vsi;
  280. }
  281. /**
  282. * ice_clear_vsi_ctx - clear the VSI context entry
  283. * @hw: pointer to the hw struct
  284. * @vsi_handle: VSI handle
  285. *
  286. * clear the VSI context entry
  287. */
  288. static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
  289. {
  290. struct ice_vsi_ctx *vsi;
  291. vsi = ice_get_vsi_ctx(hw, vsi_handle);
  292. if (vsi) {
  293. devm_kfree(ice_hw_to_dev(hw), vsi);
  294. hw->vsi_ctx[vsi_handle] = NULL;
  295. }
  296. }
  297. /**
  298. * ice_add_vsi - add VSI context to the hardware and VSI handle list
  299. * @hw: pointer to the hw struct
  300. * @vsi_handle: unique VSI handle provided by drivers
  301. * @vsi_ctx: pointer to a VSI context struct
  302. * @cd: pointer to command details structure or NULL
  303. *
  304. * Add a VSI context to the hardware also add it into the VSI handle list.
  305. * If this function gets called after reset for existing VSIs then update
  306. * with the new HW VSI number in the corresponding VSI handle list entry.
  307. */
  308. enum ice_status
  309. ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  310. struct ice_sq_cd *cd)
  311. {
  312. struct ice_vsi_ctx *tmp_vsi_ctx;
  313. enum ice_status status;
  314. if (vsi_handle >= ICE_MAX_VSI)
  315. return ICE_ERR_PARAM;
  316. status = ice_aq_add_vsi(hw, vsi_ctx, cd);
  317. if (status)
  318. return status;
  319. tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
  320. if (!tmp_vsi_ctx) {
  321. /* Create a new vsi context */
  322. tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
  323. sizeof(*tmp_vsi_ctx), GFP_KERNEL);
  324. if (!tmp_vsi_ctx) {
  325. ice_aq_free_vsi(hw, vsi_ctx, false, cd);
  326. return ICE_ERR_NO_MEMORY;
  327. }
  328. *tmp_vsi_ctx = *vsi_ctx;
  329. ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
  330. } else {
  331. /* update with new HW VSI num */
  332. if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
  333. tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
  334. }
  335. return status;
  336. }
  337. /**
  338. * ice_free_vsi- free VSI context from hardware and VSI handle list
  339. * @hw: pointer to the hw struct
  340. * @vsi_handle: unique VSI handle
  341. * @vsi_ctx: pointer to a VSI context struct
  342. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  343. * @cd: pointer to command details structure or NULL
  344. *
  345. * Free VSI context info from hardware as well as from VSI handle list
  346. */
  347. enum ice_status
  348. ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  349. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  350. {
  351. enum ice_status status;
  352. if (!ice_is_vsi_valid(hw, vsi_handle))
  353. return ICE_ERR_PARAM;
  354. vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
  355. status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
  356. if (!status)
  357. ice_clear_vsi_ctx(hw, vsi_handle);
  358. return status;
  359. }
  360. /**
  361. * ice_update_vsi
  362. * @hw: pointer to the hw struct
  363. * @vsi_handle: unique VSI handle
  364. * @vsi_ctx: pointer to a VSI context struct
  365. * @cd: pointer to command details structure or NULL
  366. *
  367. * Update VSI context in the hardware
  368. */
  369. enum ice_status
  370. ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
  371. struct ice_sq_cd *cd)
  372. {
  373. if (!ice_is_vsi_valid(hw, vsi_handle))
  374. return ICE_ERR_PARAM;
  375. vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
  376. return ice_aq_update_vsi(hw, vsi_ctx, cd);
  377. }
  378. /**
  379. * ice_aq_alloc_free_vsi_list
  380. * @hw: pointer to the hw struct
  381. * @vsi_list_id: VSI list id returned or used for lookup
  382. * @lkup_type: switch rule filter lookup type
  383. * @opc: switch rules population command type - pass in the command opcode
  384. *
  385. * allocates or free a VSI list resource
  386. */
  387. static enum ice_status
  388. ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
  389. enum ice_sw_lkup_type lkup_type,
  390. enum ice_adminq_opc opc)
  391. {
  392. struct ice_aqc_alloc_free_res_elem *sw_buf;
  393. struct ice_aqc_res_elem *vsi_ele;
  394. enum ice_status status;
  395. u16 buf_len;
  396. buf_len = sizeof(*sw_buf);
  397. sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
  398. if (!sw_buf)
  399. return ICE_ERR_NO_MEMORY;
  400. sw_buf->num_elems = cpu_to_le16(1);
  401. if (lkup_type == ICE_SW_LKUP_MAC ||
  402. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  403. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  404. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  405. lkup_type == ICE_SW_LKUP_PROMISC ||
  406. lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
  407. sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
  408. } else if (lkup_type == ICE_SW_LKUP_VLAN) {
  409. sw_buf->res_type =
  410. cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
  411. } else {
  412. status = ICE_ERR_PARAM;
  413. goto ice_aq_alloc_free_vsi_list_exit;
  414. }
  415. if (opc == ice_aqc_opc_free_res)
  416. sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
  417. status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
  418. if (status)
  419. goto ice_aq_alloc_free_vsi_list_exit;
  420. if (opc == ice_aqc_opc_alloc_res) {
  421. vsi_ele = &sw_buf->elem[0];
  422. *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
  423. }
  424. ice_aq_alloc_free_vsi_list_exit:
  425. devm_kfree(ice_hw_to_dev(hw), sw_buf);
  426. return status;
  427. }
  428. /**
  429. * ice_aq_sw_rules - add/update/remove switch rules
  430. * @hw: pointer to the hw struct
  431. * @rule_list: pointer to switch rule population list
  432. * @rule_list_sz: total size of the rule list in bytes
  433. * @num_rules: number of switch rules in the rule_list
  434. * @opc: switch rules population command type - pass in the command opcode
  435. * @cd: pointer to command details structure or NULL
  436. *
  437. * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
  438. */
  439. static enum ice_status
  440. ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
  441. u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  442. {
  443. struct ice_aq_desc desc;
  444. if (opc != ice_aqc_opc_add_sw_rules &&
  445. opc != ice_aqc_opc_update_sw_rules &&
  446. opc != ice_aqc_opc_remove_sw_rules)
  447. return ICE_ERR_PARAM;
  448. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  449. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  450. desc.params.sw_rules.num_rules_fltr_entry_index =
  451. cpu_to_le16(num_rules);
  452. return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
  453. }
  454. /* ice_init_port_info - Initialize port_info with switch configuration data
  455. * @pi: pointer to port_info
  456. * @vsi_port_num: VSI number or port number
  457. * @type: Type of switch element (port or VSI)
  458. * @swid: switch ID of the switch the element is attached to
  459. * @pf_vf_num: PF or VF number
  460. * @is_vf: true if the element is a VF, false otherwise
  461. */
  462. static void
  463. ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
  464. u16 swid, u16 pf_vf_num, bool is_vf)
  465. {
  466. switch (type) {
  467. case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
  468. pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
  469. pi->sw_id = swid;
  470. pi->pf_vf_num = pf_vf_num;
  471. pi->is_vf = is_vf;
  472. pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  473. pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  474. break;
  475. default:
  476. ice_debug(pi->hw, ICE_DBG_SW,
  477. "incorrect VSI/port type received\n");
  478. break;
  479. }
  480. }
  481. /* ice_get_initial_sw_cfg - Get initial port and default VSI data
  482. * @hw: pointer to the hardware structure
  483. */
  484. enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
  485. {
  486. struct ice_aqc_get_sw_cfg_resp *rbuf;
  487. enum ice_status status;
  488. u16 req_desc = 0;
  489. u16 num_elems;
  490. u16 i;
  491. rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
  492. GFP_KERNEL);
  493. if (!rbuf)
  494. return ICE_ERR_NO_MEMORY;
  495. /* Multiple calls to ice_aq_get_sw_cfg may be required
  496. * to get all the switch configuration information. The need
  497. * for additional calls is indicated by ice_aq_get_sw_cfg
  498. * writing a non-zero value in req_desc
  499. */
  500. do {
  501. status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
  502. &req_desc, &num_elems, NULL);
  503. if (status)
  504. break;
  505. for (i = 0; i < num_elems; i++) {
  506. struct ice_aqc_get_sw_cfg_resp_elem *ele;
  507. u16 pf_vf_num, swid, vsi_port_num;
  508. bool is_vf = false;
  509. u8 type;
  510. ele = rbuf[i].elements;
  511. vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
  512. ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
  513. pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
  514. ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
  515. swid = le16_to_cpu(ele->swid);
  516. if (le16_to_cpu(ele->pf_vf_num) &
  517. ICE_AQC_GET_SW_CONF_RESP_IS_VF)
  518. is_vf = true;
  519. type = le16_to_cpu(ele->vsi_port_num) >>
  520. ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
  521. if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
  522. /* FW VSI is not needed. Just continue. */
  523. continue;
  524. }
  525. ice_init_port_info(hw->port_info, vsi_port_num,
  526. type, swid, pf_vf_num, is_vf);
  527. }
  528. } while (req_desc && !status);
  529. devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
  530. return status;
  531. }
  532. /**
  533. * ice_fill_sw_info - Helper function to populate lb_en and lan_en
  534. * @hw: pointer to the hardware structure
  535. * @f_info: filter info structure to fill/update
  536. *
  537. * This helper function populates the lb_en and lan_en elements of the provided
  538. * ice_fltr_info struct using the switch's type and characteristics of the
  539. * switch rule being configured.
  540. */
  541. static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
  542. {
  543. f_info->lb_en = false;
  544. f_info->lan_en = false;
  545. if ((f_info->flag & ICE_FLTR_TX) &&
  546. (f_info->fltr_act == ICE_FWD_TO_VSI ||
  547. f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
  548. f_info->fltr_act == ICE_FWD_TO_Q ||
  549. f_info->fltr_act == ICE_FWD_TO_QGRP)) {
  550. f_info->lb_en = true;
  551. if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
  552. is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
  553. f_info->lan_en = true;
  554. }
  555. }
  556. /**
  557. * ice_fill_sw_rule - Helper function to fill switch rule structure
  558. * @hw: pointer to the hardware structure
  559. * @f_info: entry containing packet forwarding information
  560. * @s_rule: switch rule structure to be filled in based on mac_entry
  561. * @opc: switch rules population command type - pass in the command opcode
  562. */
  563. static void
  564. ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
  565. struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
  566. {
  567. u16 vlan_id = ICE_MAX_VLAN_ID + 1;
  568. void *daddr = NULL;
  569. u16 eth_hdr_sz;
  570. u8 *eth_hdr;
  571. u32 act = 0;
  572. __be16 *off;
  573. if (opc == ice_aqc_opc_remove_sw_rules) {
  574. s_rule->pdata.lkup_tx_rx.act = 0;
  575. s_rule->pdata.lkup_tx_rx.index =
  576. cpu_to_le16(f_info->fltr_rule_id);
  577. s_rule->pdata.lkup_tx_rx.hdr_len = 0;
  578. return;
  579. }
  580. eth_hdr_sz = sizeof(dummy_eth_header);
  581. eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
  582. /* initialize the ether header with a dummy header */
  583. memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
  584. ice_fill_sw_info(hw, f_info);
  585. switch (f_info->fltr_act) {
  586. case ICE_FWD_TO_VSI:
  587. act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
  588. ICE_SINGLE_ACT_VSI_ID_M;
  589. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  590. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  591. ICE_SINGLE_ACT_VALID_BIT;
  592. break;
  593. case ICE_FWD_TO_VSI_LIST:
  594. act |= ICE_SINGLE_ACT_VSI_LIST;
  595. act |= (f_info->fwd_id.vsi_list_id <<
  596. ICE_SINGLE_ACT_VSI_LIST_ID_S) &
  597. ICE_SINGLE_ACT_VSI_LIST_ID_M;
  598. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  599. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  600. ICE_SINGLE_ACT_VALID_BIT;
  601. break;
  602. case ICE_FWD_TO_Q:
  603. act |= ICE_SINGLE_ACT_TO_Q;
  604. act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
  605. ICE_SINGLE_ACT_Q_INDEX_M;
  606. break;
  607. case ICE_FWD_TO_QGRP:
  608. act |= ICE_SINGLE_ACT_TO_Q;
  609. act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
  610. ICE_SINGLE_ACT_Q_REGION_M;
  611. break;
  612. case ICE_DROP_PACKET:
  613. act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
  614. break;
  615. default:
  616. return;
  617. }
  618. if (f_info->lb_en)
  619. act |= ICE_SINGLE_ACT_LB_ENABLE;
  620. if (f_info->lan_en)
  621. act |= ICE_SINGLE_ACT_LAN_ENABLE;
  622. switch (f_info->lkup_type) {
  623. case ICE_SW_LKUP_MAC:
  624. daddr = f_info->l_data.mac.mac_addr;
  625. break;
  626. case ICE_SW_LKUP_VLAN:
  627. vlan_id = f_info->l_data.vlan.vlan_id;
  628. if (f_info->fltr_act == ICE_FWD_TO_VSI ||
  629. f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
  630. act |= ICE_SINGLE_ACT_PRUNE;
  631. act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
  632. }
  633. break;
  634. case ICE_SW_LKUP_ETHERTYPE_MAC:
  635. daddr = f_info->l_data.ethertype_mac.mac_addr;
  636. /* fall-through */
  637. case ICE_SW_LKUP_ETHERTYPE:
  638. off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
  639. *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
  640. break;
  641. case ICE_SW_LKUP_MAC_VLAN:
  642. daddr = f_info->l_data.mac_vlan.mac_addr;
  643. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  644. break;
  645. case ICE_SW_LKUP_PROMISC_VLAN:
  646. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  647. /* fall-through */
  648. case ICE_SW_LKUP_PROMISC:
  649. daddr = f_info->l_data.mac_vlan.mac_addr;
  650. break;
  651. default:
  652. break;
  653. }
  654. s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
  655. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
  656. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
  657. /* Recipe set depending on lookup type */
  658. s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
  659. s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
  660. s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
  661. if (daddr)
  662. ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
  663. if (!(vlan_id > ICE_MAX_VLAN_ID)) {
  664. off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
  665. *off = cpu_to_be16(vlan_id);
  666. }
  667. /* Create the switch rule with the final dummy Ethernet header */
  668. if (opc != ice_aqc_opc_update_sw_rules)
  669. s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
  670. }
  671. /**
  672. * ice_add_marker_act
  673. * @hw: pointer to the hardware structure
  674. * @m_ent: the management entry for which sw marker needs to be added
  675. * @sw_marker: sw marker to tag the Rx descriptor with
  676. * @l_id: large action resource id
  677. *
  678. * Create a large action to hold software marker and update the switch rule
  679. * entry pointed by m_ent with newly created large action
  680. */
  681. static enum ice_status
  682. ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
  683. u16 sw_marker, u16 l_id)
  684. {
  685. struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
  686. /* For software marker we need 3 large actions
  687. * 1. FWD action: FWD TO VSI or VSI LIST
  688. * 2. GENERIC VALUE action to hold the profile id
  689. * 3. GENERIC VALUE action to hold the software marker id
  690. */
  691. const u16 num_lg_acts = 3;
  692. enum ice_status status;
  693. u16 lg_act_size;
  694. u16 rules_size;
  695. u32 act;
  696. u16 id;
  697. if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
  698. return ICE_ERR_PARAM;
  699. /* Create two back-to-back switch rules and submit them to the HW using
  700. * one memory buffer:
  701. * 1. Large Action
  702. * 2. Look up tx rx
  703. */
  704. lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
  705. rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  706. lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
  707. if (!lg_act)
  708. return ICE_ERR_NO_MEMORY;
  709. rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
  710. /* Fill in the first switch rule i.e. large action */
  711. lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
  712. lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
  713. lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
  714. /* First action VSI forwarding or VSI list forwarding depending on how
  715. * many VSIs
  716. */
  717. id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
  718. m_ent->fltr_info.fwd_id.hw_vsi_id;
  719. act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
  720. act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
  721. ICE_LG_ACT_VSI_LIST_ID_M;
  722. if (m_ent->vsi_count > 1)
  723. act |= ICE_LG_ACT_VSI_LIST;
  724. lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
  725. /* Second action descriptor type */
  726. act = ICE_LG_ACT_GENERIC;
  727. act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
  728. lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
  729. act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
  730. ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
  731. /* Third action Marker value */
  732. act |= ICE_LG_ACT_GENERIC;
  733. act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
  734. ICE_LG_ACT_GENERIC_VALUE_M;
  735. lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
  736. /* call the fill switch rule to fill the lookup tx rx structure */
  737. ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
  738. ice_aqc_opc_update_sw_rules);
  739. /* Update the action to point to the large action id */
  740. rx_tx->pdata.lkup_tx_rx.act =
  741. cpu_to_le32(ICE_SINGLE_ACT_PTR |
  742. ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
  743. ICE_SINGLE_ACT_PTR_VAL_M));
  744. /* Use the filter rule id of the previously created rule with single
  745. * act. Once the update happens, hardware will treat this as large
  746. * action
  747. */
  748. rx_tx->pdata.lkup_tx_rx.index =
  749. cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
  750. status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
  751. ice_aqc_opc_update_sw_rules, NULL);
  752. if (!status) {
  753. m_ent->lg_act_idx = l_id;
  754. m_ent->sw_marker_id = sw_marker;
  755. }
  756. devm_kfree(ice_hw_to_dev(hw), lg_act);
  757. return status;
  758. }
  759. /**
  760. * ice_create_vsi_list_map
  761. * @hw: pointer to the hardware structure
  762. * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
  763. * @num_vsi: number of VSI handles in the array
  764. * @vsi_list_id: VSI list id generated as part of allocate resource
  765. *
  766. * Helper function to create a new entry of VSI list id to VSI mapping
  767. * using the given VSI list id
  768. */
  769. static struct ice_vsi_list_map_info *
  770. ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  771. u16 vsi_list_id)
  772. {
  773. struct ice_switch_info *sw = hw->switch_info;
  774. struct ice_vsi_list_map_info *v_map;
  775. int i;
  776. v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
  777. if (!v_map)
  778. return NULL;
  779. v_map->vsi_list_id = vsi_list_id;
  780. v_map->ref_cnt = 1;
  781. for (i = 0; i < num_vsi; i++)
  782. set_bit(vsi_handle_arr[i], v_map->vsi_map);
  783. list_add(&v_map->list_entry, &sw->vsi_list_map_head);
  784. return v_map;
  785. }
  786. /**
  787. * ice_update_vsi_list_rule
  788. * @hw: pointer to the hardware structure
  789. * @vsi_handle_arr: array of VSI handles to form a VSI list
  790. * @num_vsi: number of VSI handles in the array
  791. * @vsi_list_id: VSI list id generated as part of allocate resource
  792. * @remove: Boolean value to indicate if this is a remove action
  793. * @opc: switch rules population command type - pass in the command opcode
  794. * @lkup_type: lookup type of the filter
  795. *
  796. * Call AQ command to add a new switch rule or update existing switch rule
  797. * using the given VSI list id
  798. */
  799. static enum ice_status
  800. ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  801. u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
  802. enum ice_sw_lkup_type lkup_type)
  803. {
  804. struct ice_aqc_sw_rules_elem *s_rule;
  805. enum ice_status status;
  806. u16 s_rule_size;
  807. u16 type;
  808. int i;
  809. if (!num_vsi)
  810. return ICE_ERR_PARAM;
  811. if (lkup_type == ICE_SW_LKUP_MAC ||
  812. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  813. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  814. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  815. lkup_type == ICE_SW_LKUP_PROMISC ||
  816. lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
  817. type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
  818. ICE_AQC_SW_RULES_T_VSI_LIST_SET;
  819. else if (lkup_type == ICE_SW_LKUP_VLAN)
  820. type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
  821. ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
  822. else
  823. return ICE_ERR_PARAM;
  824. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
  825. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  826. if (!s_rule)
  827. return ICE_ERR_NO_MEMORY;
  828. for (i = 0; i < num_vsi; i++) {
  829. if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
  830. status = ICE_ERR_PARAM;
  831. goto exit;
  832. }
  833. /* AQ call requires hw_vsi_id(s) */
  834. s_rule->pdata.vsi_list.vsi[i] =
  835. cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
  836. }
  837. s_rule->type = cpu_to_le16(type);
  838. s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
  839. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  840. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
  841. exit:
  842. devm_kfree(ice_hw_to_dev(hw), s_rule);
  843. return status;
  844. }
  845. /**
  846. * ice_create_vsi_list_rule - Creates and populates a VSI list rule
  847. * @hw: pointer to the hw struct
  848. * @vsi_handle_arr: array of VSI handles to form a VSI list
  849. * @num_vsi: number of VSI handles in the array
  850. * @vsi_list_id: stores the ID of the VSI list to be created
  851. * @lkup_type: switch rule filter's lookup type
  852. */
  853. static enum ice_status
  854. ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
  855. u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
  856. {
  857. enum ice_status status;
  858. status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
  859. ice_aqc_opc_alloc_res);
  860. if (status)
  861. return status;
  862. /* Update the newly created VSI list to include the specified VSIs */
  863. return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
  864. *vsi_list_id, false,
  865. ice_aqc_opc_add_sw_rules, lkup_type);
  866. }
  867. /**
  868. * ice_create_pkt_fwd_rule
  869. * @hw: pointer to the hardware structure
  870. * @f_entry: entry containing packet forwarding information
  871. *
  872. * Create switch rule with given filter information and add an entry
  873. * to the corresponding filter management list to track this switch rule
  874. * and VSI mapping
  875. */
  876. static enum ice_status
  877. ice_create_pkt_fwd_rule(struct ice_hw *hw,
  878. struct ice_fltr_list_entry *f_entry)
  879. {
  880. struct ice_fltr_mgmt_list_entry *fm_entry;
  881. struct ice_aqc_sw_rules_elem *s_rule;
  882. enum ice_sw_lkup_type l_type;
  883. struct ice_sw_recipe *recp;
  884. enum ice_status status;
  885. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  886. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  887. if (!s_rule)
  888. return ICE_ERR_NO_MEMORY;
  889. fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
  890. GFP_KERNEL);
  891. if (!fm_entry) {
  892. status = ICE_ERR_NO_MEMORY;
  893. goto ice_create_pkt_fwd_rule_exit;
  894. }
  895. fm_entry->fltr_info = f_entry->fltr_info;
  896. /* Initialize all the fields for the management entry */
  897. fm_entry->vsi_count = 1;
  898. fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
  899. fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
  900. fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
  901. ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
  902. ice_aqc_opc_add_sw_rules);
  903. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  904. ice_aqc_opc_add_sw_rules, NULL);
  905. if (status) {
  906. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  907. goto ice_create_pkt_fwd_rule_exit;
  908. }
  909. f_entry->fltr_info.fltr_rule_id =
  910. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  911. fm_entry->fltr_info.fltr_rule_id =
  912. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  913. /* The book keeping entries will get removed when base driver
  914. * calls remove filter AQ command
  915. */
  916. l_type = fm_entry->fltr_info.lkup_type;
  917. recp = &hw->switch_info->recp_list[l_type];
  918. list_add(&fm_entry->list_entry, &recp->filt_rules);
  919. ice_create_pkt_fwd_rule_exit:
  920. devm_kfree(ice_hw_to_dev(hw), s_rule);
  921. return status;
  922. }
  923. /**
  924. * ice_update_pkt_fwd_rule
  925. * @hw: pointer to the hardware structure
  926. * @f_info: filter information for switch rule
  927. *
  928. * Call AQ command to update a previously created switch rule with a
  929. * VSI list id
  930. */
  931. static enum ice_status
  932. ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
  933. {
  934. struct ice_aqc_sw_rules_elem *s_rule;
  935. enum ice_status status;
  936. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  937. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  938. if (!s_rule)
  939. return ICE_ERR_NO_MEMORY;
  940. ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
  941. s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
  942. /* Update switch rule with new rule set to forward VSI list */
  943. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  944. ice_aqc_opc_update_sw_rules, NULL);
  945. devm_kfree(ice_hw_to_dev(hw), s_rule);
  946. return status;
  947. }
  948. /**
  949. * ice_update_sw_rule_bridge_mode
  950. * @hw: pointer to the hw struct
  951. *
  952. * Updates unicast switch filter rules based on VEB/VEPA mode
  953. */
  954. enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
  955. {
  956. struct ice_switch_info *sw = hw->switch_info;
  957. struct ice_fltr_mgmt_list_entry *fm_entry;
  958. enum ice_status status = 0;
  959. struct list_head *rule_head;
  960. struct mutex *rule_lock; /* Lock to protect filter rule list */
  961. rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
  962. rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
  963. mutex_lock(rule_lock);
  964. list_for_each_entry(fm_entry, rule_head, list_entry) {
  965. struct ice_fltr_info *fi = &fm_entry->fltr_info;
  966. u8 *addr = fi->l_data.mac.mac_addr;
  967. /* Update unicast Tx rules to reflect the selected
  968. * VEB/VEPA mode
  969. */
  970. if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
  971. (fi->fltr_act == ICE_FWD_TO_VSI ||
  972. fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
  973. fi->fltr_act == ICE_FWD_TO_Q ||
  974. fi->fltr_act == ICE_FWD_TO_QGRP)) {
  975. status = ice_update_pkt_fwd_rule(hw, fi);
  976. if (status)
  977. break;
  978. }
  979. }
  980. mutex_unlock(rule_lock);
  981. return status;
  982. }
  983. /**
  984. * ice_add_update_vsi_list
  985. * @hw: pointer to the hardware structure
  986. * @m_entry: pointer to current filter management list entry
  987. * @cur_fltr: filter information from the book keeping entry
  988. * @new_fltr: filter information with the new VSI to be added
  989. *
  990. * Call AQ command to add or update previously created VSI list with new VSI.
  991. *
  992. * Helper function to do book keeping associated with adding filter information
  993. * The algorithm to do the booking keeping is described below :
  994. * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
  995. * if only one VSI has been added till now
  996. * Allocate a new VSI list and add two VSIs
  997. * to this list using switch rule command
  998. * Update the previously created switch rule with the
  999. * newly created VSI list id
  1000. * if a VSI list was previously created
  1001. * Add the new VSI to the previously created VSI list set
  1002. * using the update switch rule command
  1003. */
  1004. static enum ice_status
  1005. ice_add_update_vsi_list(struct ice_hw *hw,
  1006. struct ice_fltr_mgmt_list_entry *m_entry,
  1007. struct ice_fltr_info *cur_fltr,
  1008. struct ice_fltr_info *new_fltr)
  1009. {
  1010. enum ice_status status = 0;
  1011. u16 vsi_list_id = 0;
  1012. if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
  1013. cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
  1014. return ICE_ERR_NOT_IMPL;
  1015. if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
  1016. new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
  1017. (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
  1018. cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
  1019. return ICE_ERR_NOT_IMPL;
  1020. if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
  1021. /* Only one entry existed in the mapping and it was not already
  1022. * a part of a VSI list. So, create a VSI list with the old and
  1023. * new VSIs.
  1024. */
  1025. struct ice_fltr_info tmp_fltr;
  1026. u16 vsi_handle_arr[2];
  1027. /* A rule already exists with the new VSI being added */
  1028. if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
  1029. return ICE_ERR_ALREADY_EXISTS;
  1030. vsi_handle_arr[0] = cur_fltr->vsi_handle;
  1031. vsi_handle_arr[1] = new_fltr->vsi_handle;
  1032. status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
  1033. &vsi_list_id,
  1034. new_fltr->lkup_type);
  1035. if (status)
  1036. return status;
  1037. tmp_fltr = *new_fltr;
  1038. tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
  1039. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  1040. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  1041. /* Update the previous switch rule of "MAC forward to VSI" to
  1042. * "MAC fwd to VSI list"
  1043. */
  1044. status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
  1045. if (status)
  1046. return status;
  1047. cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1048. cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1049. m_entry->vsi_list_info =
  1050. ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
  1051. vsi_list_id);
  1052. /* If this entry was large action then the large action needs
  1053. * to be updated to point to FWD to VSI list
  1054. */
  1055. if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
  1056. status =
  1057. ice_add_marker_act(hw, m_entry,
  1058. m_entry->sw_marker_id,
  1059. m_entry->lg_act_idx);
  1060. } else {
  1061. u16 vsi_handle = new_fltr->vsi_handle;
  1062. enum ice_adminq_opc opcode;
  1063. /* A rule already exists with the new VSI being added */
  1064. if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
  1065. return 0;
  1066. /* Update the previously created VSI list set with
  1067. * the new VSI id passed in
  1068. */
  1069. vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
  1070. opcode = ice_aqc_opc_update_sw_rules;
  1071. status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
  1072. vsi_list_id, false, opcode,
  1073. new_fltr->lkup_type);
  1074. /* update VSI list mapping info with new VSI id */
  1075. if (!status)
  1076. set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
  1077. }
  1078. if (!status)
  1079. m_entry->vsi_count++;
  1080. return status;
  1081. }
  1082. /**
  1083. * ice_find_rule_entry - Search a rule entry
  1084. * @hw: pointer to the hardware structure
  1085. * @recp_id: lookup type for which the specified rule needs to be searched
  1086. * @f_info: rule information
  1087. *
  1088. * Helper function to search for a given rule entry
  1089. * Returns pointer to entry storing the rule if found
  1090. */
  1091. static struct ice_fltr_mgmt_list_entry *
  1092. ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
  1093. {
  1094. struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
  1095. struct ice_switch_info *sw = hw->switch_info;
  1096. struct list_head *list_head;
  1097. list_head = &sw->recp_list[recp_id].filt_rules;
  1098. list_for_each_entry(list_itr, list_head, list_entry) {
  1099. if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
  1100. sizeof(f_info->l_data)) &&
  1101. f_info->flag == list_itr->fltr_info.flag) {
  1102. ret = list_itr;
  1103. break;
  1104. }
  1105. }
  1106. return ret;
  1107. }
  1108. /**
  1109. * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
  1110. * @hw: pointer to the hardware structure
  1111. * @recp_id: lookup type for which VSI lists needs to be searched
  1112. * @vsi_handle: VSI handle to be found in VSI list
  1113. * @vsi_list_id: VSI list id found containing vsi_handle
  1114. *
  1115. * Helper function to search a VSI list with single entry containing given VSI
  1116. * handle element. This can be extended further to search VSI list with more
  1117. * than 1 vsi_count. Returns pointer to VSI list entry if found.
  1118. */
  1119. static struct ice_vsi_list_map_info *
  1120. ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
  1121. u16 *vsi_list_id)
  1122. {
  1123. struct ice_vsi_list_map_info *map_info = NULL;
  1124. struct ice_switch_info *sw = hw->switch_info;
  1125. struct ice_fltr_mgmt_list_entry *list_itr;
  1126. struct list_head *list_head;
  1127. list_head = &sw->recp_list[recp_id].filt_rules;
  1128. list_for_each_entry(list_itr, list_head, list_entry) {
  1129. if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
  1130. map_info = list_itr->vsi_list_info;
  1131. if (test_bit(vsi_handle, map_info->vsi_map)) {
  1132. *vsi_list_id = map_info->vsi_list_id;
  1133. return map_info;
  1134. }
  1135. }
  1136. }
  1137. return NULL;
  1138. }
  1139. /**
  1140. * ice_add_rule_internal - add rule for a given lookup type
  1141. * @hw: pointer to the hardware structure
  1142. * @recp_id: lookup type (recipe id) for which rule has to be added
  1143. * @f_entry: structure containing MAC forwarding information
  1144. *
  1145. * Adds or updates the rule lists for a given recipe
  1146. */
  1147. static enum ice_status
  1148. ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
  1149. struct ice_fltr_list_entry *f_entry)
  1150. {
  1151. struct ice_switch_info *sw = hw->switch_info;
  1152. struct ice_fltr_info *new_fltr, *cur_fltr;
  1153. struct ice_fltr_mgmt_list_entry *m_entry;
  1154. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1155. enum ice_status status = 0;
  1156. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1157. return ICE_ERR_PARAM;
  1158. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1159. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1160. rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
  1161. mutex_lock(rule_lock);
  1162. new_fltr = &f_entry->fltr_info;
  1163. if (new_fltr->flag & ICE_FLTR_RX)
  1164. new_fltr->src = hw->port_info->lport;
  1165. else if (new_fltr->flag & ICE_FLTR_TX)
  1166. new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
  1167. m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
  1168. if (!m_entry) {
  1169. mutex_unlock(rule_lock);
  1170. return ice_create_pkt_fwd_rule(hw, f_entry);
  1171. }
  1172. cur_fltr = &m_entry->fltr_info;
  1173. status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
  1174. mutex_unlock(rule_lock);
  1175. return status;
  1176. }
  1177. /**
  1178. * ice_remove_vsi_list_rule
  1179. * @hw: pointer to the hardware structure
  1180. * @vsi_list_id: VSI list id generated as part of allocate resource
  1181. * @lkup_type: switch rule filter lookup type
  1182. *
  1183. * The VSI list should be emptied before this function is called to remove the
  1184. * VSI list.
  1185. */
  1186. static enum ice_status
  1187. ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
  1188. enum ice_sw_lkup_type lkup_type)
  1189. {
  1190. struct ice_aqc_sw_rules_elem *s_rule;
  1191. enum ice_status status;
  1192. u16 s_rule_size;
  1193. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
  1194. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1195. if (!s_rule)
  1196. return ICE_ERR_NO_MEMORY;
  1197. s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
  1198. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  1199. /* Free the vsi_list resource that we allocated. It is assumed that the
  1200. * list is empty at this point.
  1201. */
  1202. status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
  1203. ice_aqc_opc_free_res);
  1204. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1205. return status;
  1206. }
  1207. /**
  1208. * ice_rem_update_vsi_list
  1209. * @hw: pointer to the hardware structure
  1210. * @vsi_handle: VSI handle of the VSI to remove
  1211. * @fm_list: filter management entry for which the VSI list management needs to
  1212. * be done
  1213. */
  1214. static enum ice_status
  1215. ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
  1216. struct ice_fltr_mgmt_list_entry *fm_list)
  1217. {
  1218. enum ice_sw_lkup_type lkup_type;
  1219. enum ice_status status = 0;
  1220. u16 vsi_list_id;
  1221. if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
  1222. fm_list->vsi_count == 0)
  1223. return ICE_ERR_PARAM;
  1224. /* A rule with the VSI being removed does not exist */
  1225. if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
  1226. return ICE_ERR_DOES_NOT_EXIST;
  1227. lkup_type = fm_list->fltr_info.lkup_type;
  1228. vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
  1229. status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
  1230. ice_aqc_opc_update_sw_rules,
  1231. lkup_type);
  1232. if (status)
  1233. return status;
  1234. fm_list->vsi_count--;
  1235. clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
  1236. if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
  1237. (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
  1238. struct ice_vsi_list_map_info *vsi_list_info =
  1239. fm_list->vsi_list_info;
  1240. u16 rem_vsi_handle;
  1241. rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
  1242. ICE_MAX_VSI);
  1243. if (!ice_is_vsi_valid(hw, rem_vsi_handle))
  1244. return ICE_ERR_OUT_OF_RANGE;
  1245. status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
  1246. vsi_list_id, true,
  1247. ice_aqc_opc_update_sw_rules,
  1248. lkup_type);
  1249. if (status)
  1250. return status;
  1251. /* Remove the VSI list since it is no longer used */
  1252. status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
  1253. if (status)
  1254. return status;
  1255. /* Change the list entry action from VSI_LIST to VSI */
  1256. fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1257. fm_list->fltr_info.fwd_id.hw_vsi_id =
  1258. ice_get_hw_vsi_num(hw, rem_vsi_handle);
  1259. fm_list->fltr_info.vsi_handle = rem_vsi_handle;
  1260. list_del(&vsi_list_info->list_entry);
  1261. devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
  1262. fm_list->vsi_list_info = NULL;
  1263. }
  1264. return status;
  1265. }
  1266. /**
  1267. * ice_remove_rule_internal - Remove a filter rule of a given type
  1268. * @hw: pointer to the hardware structure
  1269. * @recp_id: recipe id for which the rule needs to removed
  1270. * @f_entry: rule entry containing filter information
  1271. */
  1272. static enum ice_status
  1273. ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
  1274. struct ice_fltr_list_entry *f_entry)
  1275. {
  1276. struct ice_switch_info *sw = hw->switch_info;
  1277. struct ice_fltr_mgmt_list_entry *list_elem;
  1278. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1279. enum ice_status status = 0;
  1280. bool remove_rule = false;
  1281. u16 vsi_handle;
  1282. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1283. return ICE_ERR_PARAM;
  1284. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1285. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1286. rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
  1287. mutex_lock(rule_lock);
  1288. list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
  1289. if (!list_elem) {
  1290. status = ICE_ERR_DOES_NOT_EXIST;
  1291. goto exit;
  1292. }
  1293. if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
  1294. remove_rule = true;
  1295. } else if (!list_elem->vsi_list_info) {
  1296. status = ICE_ERR_DOES_NOT_EXIST;
  1297. goto exit;
  1298. } else {
  1299. if (list_elem->vsi_list_info->ref_cnt > 1)
  1300. list_elem->vsi_list_info->ref_cnt--;
  1301. vsi_handle = f_entry->fltr_info.vsi_handle;
  1302. status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
  1303. if (status)
  1304. goto exit;
  1305. /* if vsi count goes to zero after updating the vsi list */
  1306. if (list_elem->vsi_count == 0)
  1307. remove_rule = true;
  1308. }
  1309. if (remove_rule) {
  1310. /* Remove the lookup rule */
  1311. struct ice_aqc_sw_rules_elem *s_rule;
  1312. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  1313. ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
  1314. GFP_KERNEL);
  1315. if (!s_rule) {
  1316. status = ICE_ERR_NO_MEMORY;
  1317. goto exit;
  1318. }
  1319. ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
  1320. ice_aqc_opc_remove_sw_rules);
  1321. status = ice_aq_sw_rules(hw, s_rule,
  1322. ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
  1323. ice_aqc_opc_remove_sw_rules, NULL);
  1324. if (status)
  1325. goto exit;
  1326. /* Remove a book keeping from the list */
  1327. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1328. list_del(&list_elem->list_entry);
  1329. devm_kfree(ice_hw_to_dev(hw), list_elem);
  1330. }
  1331. exit:
  1332. mutex_unlock(rule_lock);
  1333. return status;
  1334. }
  1335. /**
  1336. * ice_add_mac - Add a MAC address based filter rule
  1337. * @hw: pointer to the hardware structure
  1338. * @m_list: list of MAC addresses and forwarding information
  1339. *
  1340. * IMPORTANT: When the ucast_shared flag is set to false and m_list has
  1341. * multiple unicast addresses, the function assumes that all the
  1342. * addresses are unique in a given add_mac call. It doesn't
  1343. * check for duplicates in this case, removing duplicates from a given
  1344. * list should be taken care of in the caller of this function.
  1345. */
  1346. enum ice_status
  1347. ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
  1348. {
  1349. struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
  1350. struct ice_fltr_list_entry *m_list_itr;
  1351. struct list_head *rule_head;
  1352. u16 elem_sent, total_elem_left;
  1353. struct ice_switch_info *sw;
  1354. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1355. enum ice_status status = 0;
  1356. u16 num_unicast = 0;
  1357. u16 s_rule_size;
  1358. if (!m_list || !hw)
  1359. return ICE_ERR_PARAM;
  1360. s_rule = NULL;
  1361. sw = hw->switch_info;
  1362. rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
  1363. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1364. u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
  1365. u16 vsi_handle;
  1366. u16 hw_vsi_id;
  1367. m_list_itr->fltr_info.flag = ICE_FLTR_TX;
  1368. vsi_handle = m_list_itr->fltr_info.vsi_handle;
  1369. if (!ice_is_vsi_valid(hw, vsi_handle))
  1370. return ICE_ERR_PARAM;
  1371. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1372. m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
  1373. /* update the src in case it is vsi num */
  1374. if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
  1375. return ICE_ERR_PARAM;
  1376. m_list_itr->fltr_info.src = hw_vsi_id;
  1377. if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
  1378. is_zero_ether_addr(add))
  1379. return ICE_ERR_PARAM;
  1380. if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
  1381. /* Don't overwrite the unicast address */
  1382. mutex_lock(rule_lock);
  1383. if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
  1384. &m_list_itr->fltr_info)) {
  1385. mutex_unlock(rule_lock);
  1386. return ICE_ERR_ALREADY_EXISTS;
  1387. }
  1388. mutex_unlock(rule_lock);
  1389. num_unicast++;
  1390. } else if (is_multicast_ether_addr(add) ||
  1391. (is_unicast_ether_addr(add) && hw->ucast_shared)) {
  1392. m_list_itr->status =
  1393. ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
  1394. m_list_itr);
  1395. if (m_list_itr->status)
  1396. return m_list_itr->status;
  1397. }
  1398. }
  1399. mutex_lock(rule_lock);
  1400. /* Exit if no suitable entries were found for adding bulk switch rule */
  1401. if (!num_unicast) {
  1402. status = 0;
  1403. goto ice_add_mac_exit;
  1404. }
  1405. rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
  1406. /* Allocate switch rule buffer for the bulk update for unicast */
  1407. s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  1408. s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
  1409. GFP_KERNEL);
  1410. if (!s_rule) {
  1411. status = ICE_ERR_NO_MEMORY;
  1412. goto ice_add_mac_exit;
  1413. }
  1414. r_iter = s_rule;
  1415. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1416. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1417. u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
  1418. if (is_unicast_ether_addr(mac_addr)) {
  1419. ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
  1420. ice_aqc_opc_add_sw_rules);
  1421. r_iter = (struct ice_aqc_sw_rules_elem *)
  1422. ((u8 *)r_iter + s_rule_size);
  1423. }
  1424. }
  1425. /* Call AQ bulk switch rule update for all unicast addresses */
  1426. r_iter = s_rule;
  1427. /* Call AQ switch rule in AQ_MAX chunk */
  1428. for (total_elem_left = num_unicast; total_elem_left > 0;
  1429. total_elem_left -= elem_sent) {
  1430. struct ice_aqc_sw_rules_elem *entry = r_iter;
  1431. elem_sent = min(total_elem_left,
  1432. (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
  1433. status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
  1434. elem_sent, ice_aqc_opc_add_sw_rules,
  1435. NULL);
  1436. if (status)
  1437. goto ice_add_mac_exit;
  1438. r_iter = (struct ice_aqc_sw_rules_elem *)
  1439. ((u8 *)r_iter + (elem_sent * s_rule_size));
  1440. }
  1441. /* Fill up rule id based on the value returned from FW */
  1442. r_iter = s_rule;
  1443. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1444. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1445. u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
  1446. struct ice_fltr_mgmt_list_entry *fm_entry;
  1447. if (is_unicast_ether_addr(mac_addr)) {
  1448. f_info->fltr_rule_id =
  1449. le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
  1450. f_info->fltr_act = ICE_FWD_TO_VSI;
  1451. /* Create an entry to track this MAC address */
  1452. fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
  1453. sizeof(*fm_entry), GFP_KERNEL);
  1454. if (!fm_entry) {
  1455. status = ICE_ERR_NO_MEMORY;
  1456. goto ice_add_mac_exit;
  1457. }
  1458. fm_entry->fltr_info = *f_info;
  1459. fm_entry->vsi_count = 1;
  1460. /* The book keeping entries will get removed when
  1461. * base driver calls remove filter AQ command
  1462. */
  1463. list_add(&fm_entry->list_entry, rule_head);
  1464. r_iter = (struct ice_aqc_sw_rules_elem *)
  1465. ((u8 *)r_iter + s_rule_size);
  1466. }
  1467. }
  1468. ice_add_mac_exit:
  1469. mutex_unlock(rule_lock);
  1470. if (s_rule)
  1471. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1472. return status;
  1473. }
  1474. /**
  1475. * ice_add_vlan_internal - Add one VLAN based filter rule
  1476. * @hw: pointer to the hardware structure
  1477. * @f_entry: filter entry containing one VLAN information
  1478. */
  1479. static enum ice_status
  1480. ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  1481. {
  1482. struct ice_switch_info *sw = hw->switch_info;
  1483. struct ice_fltr_mgmt_list_entry *v_list_itr;
  1484. struct ice_fltr_info *new_fltr, *cur_fltr;
  1485. enum ice_sw_lkup_type lkup_type;
  1486. u16 vsi_list_id = 0, vsi_handle;
  1487. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1488. enum ice_status status = 0;
  1489. if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
  1490. return ICE_ERR_PARAM;
  1491. f_entry->fltr_info.fwd_id.hw_vsi_id =
  1492. ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
  1493. new_fltr = &f_entry->fltr_info;
  1494. /* VLAN id should only be 12 bits */
  1495. if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
  1496. return ICE_ERR_PARAM;
  1497. if (new_fltr->src_id != ICE_SRC_ID_VSI)
  1498. return ICE_ERR_PARAM;
  1499. new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
  1500. lkup_type = new_fltr->lkup_type;
  1501. vsi_handle = new_fltr->vsi_handle;
  1502. rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
  1503. mutex_lock(rule_lock);
  1504. v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
  1505. if (!v_list_itr) {
  1506. struct ice_vsi_list_map_info *map_info = NULL;
  1507. if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
  1508. /* All VLAN pruning rules use a VSI list. Check if
  1509. * there is already a VSI list containing VSI that we
  1510. * want to add. If found, use the same vsi_list_id for
  1511. * this new VLAN rule or else create a new list.
  1512. */
  1513. map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
  1514. vsi_handle,
  1515. &vsi_list_id);
  1516. if (!map_info) {
  1517. status = ice_create_vsi_list_rule(hw,
  1518. &vsi_handle,
  1519. 1,
  1520. &vsi_list_id,
  1521. lkup_type);
  1522. if (status)
  1523. goto exit;
  1524. }
  1525. /* Convert the action to forwarding to a VSI list. */
  1526. new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1527. new_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1528. }
  1529. status = ice_create_pkt_fwd_rule(hw, f_entry);
  1530. if (!status) {
  1531. v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
  1532. new_fltr);
  1533. if (!v_list_itr) {
  1534. status = ICE_ERR_DOES_NOT_EXIST;
  1535. goto exit;
  1536. }
  1537. /* reuse VSI list for new rule and increment ref_cnt */
  1538. if (map_info) {
  1539. v_list_itr->vsi_list_info = map_info;
  1540. map_info->ref_cnt++;
  1541. } else {
  1542. v_list_itr->vsi_list_info =
  1543. ice_create_vsi_list_map(hw, &vsi_handle,
  1544. 1, vsi_list_id);
  1545. }
  1546. }
  1547. } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
  1548. /* Update existing VSI list to add new VSI id only if it used
  1549. * by one VLAN rule.
  1550. */
  1551. cur_fltr = &v_list_itr->fltr_info;
  1552. status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
  1553. new_fltr);
  1554. } else {
  1555. /* If VLAN rule exists and VSI list being used by this rule is
  1556. * referenced by more than 1 VLAN rule. Then create a new VSI
  1557. * list appending previous VSI with new VSI and update existing
  1558. * VLAN rule to point to new VSI list id
  1559. */
  1560. struct ice_fltr_info tmp_fltr;
  1561. u16 vsi_handle_arr[2];
  1562. u16 cur_handle;
  1563. /* Current implementation only supports reusing VSI list with
  1564. * one VSI count. We should never hit below condition
  1565. */
  1566. if (v_list_itr->vsi_count > 1 &&
  1567. v_list_itr->vsi_list_info->ref_cnt > 1) {
  1568. ice_debug(hw, ICE_DBG_SW,
  1569. "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
  1570. status = ICE_ERR_CFG;
  1571. goto exit;
  1572. }
  1573. cur_handle =
  1574. find_first_bit(v_list_itr->vsi_list_info->vsi_map,
  1575. ICE_MAX_VSI);
  1576. /* A rule already exists with the new VSI being added */
  1577. if (cur_handle == vsi_handle) {
  1578. status = ICE_ERR_ALREADY_EXISTS;
  1579. goto exit;
  1580. }
  1581. vsi_handle_arr[0] = cur_handle;
  1582. vsi_handle_arr[1] = vsi_handle;
  1583. status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
  1584. &vsi_list_id, lkup_type);
  1585. if (status)
  1586. goto exit;
  1587. tmp_fltr = v_list_itr->fltr_info;
  1588. tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
  1589. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  1590. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  1591. /* Update the previous switch rule to a new VSI list which
  1592. * includes current VSI thats requested
  1593. */
  1594. status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
  1595. if (status)
  1596. goto exit;
  1597. /* before overriding VSI list map info. decrement ref_cnt of
  1598. * previous VSI list
  1599. */
  1600. v_list_itr->vsi_list_info->ref_cnt--;
  1601. /* now update to newly created list */
  1602. v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
  1603. v_list_itr->vsi_list_info =
  1604. ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
  1605. vsi_list_id);
  1606. v_list_itr->vsi_count++;
  1607. }
  1608. exit:
  1609. mutex_unlock(rule_lock);
  1610. return status;
  1611. }
  1612. /**
  1613. * ice_add_vlan - Add VLAN based filter rule
  1614. * @hw: pointer to the hardware structure
  1615. * @v_list: list of VLAN entries and forwarding information
  1616. */
  1617. enum ice_status
  1618. ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
  1619. {
  1620. struct ice_fltr_list_entry *v_list_itr;
  1621. if (!v_list || !hw)
  1622. return ICE_ERR_PARAM;
  1623. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1624. if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
  1625. return ICE_ERR_PARAM;
  1626. v_list_itr->fltr_info.flag = ICE_FLTR_TX;
  1627. v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
  1628. if (v_list_itr->status)
  1629. return v_list_itr->status;
  1630. }
  1631. return 0;
  1632. }
  1633. /**
  1634. * ice_rem_sw_rule_info
  1635. * @hw: pointer to the hardware structure
  1636. * @rule_head: pointer to the switch list structure that we want to delete
  1637. */
  1638. static void
  1639. ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
  1640. {
  1641. if (!list_empty(rule_head)) {
  1642. struct ice_fltr_mgmt_list_entry *entry;
  1643. struct ice_fltr_mgmt_list_entry *tmp;
  1644. list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
  1645. list_del(&entry->list_entry);
  1646. devm_kfree(ice_hw_to_dev(hw), entry);
  1647. }
  1648. }
  1649. }
  1650. /**
  1651. * ice_cfg_dflt_vsi - change state of VSI to set/clear default
  1652. * @hw: pointer to the hardware structure
  1653. * @vsi_handle: VSI handle to set as default
  1654. * @set: true to add the above mentioned switch rule, false to remove it
  1655. * @direction: ICE_FLTR_RX or ICE_FLTR_TX
  1656. *
  1657. * add filter rule to set/unset given VSI as default VSI for the switch
  1658. * (represented by swid)
  1659. */
  1660. enum ice_status
  1661. ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
  1662. {
  1663. struct ice_aqc_sw_rules_elem *s_rule;
  1664. struct ice_fltr_info f_info;
  1665. enum ice_adminq_opc opcode;
  1666. enum ice_status status;
  1667. u16 s_rule_size;
  1668. u16 hw_vsi_id;
  1669. if (!ice_is_vsi_valid(hw, vsi_handle))
  1670. return ICE_ERR_PARAM;
  1671. hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1672. s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
  1673. ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
  1674. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1675. if (!s_rule)
  1676. return ICE_ERR_NO_MEMORY;
  1677. memset(&f_info, 0, sizeof(f_info));
  1678. f_info.lkup_type = ICE_SW_LKUP_DFLT;
  1679. f_info.flag = direction;
  1680. f_info.fltr_act = ICE_FWD_TO_VSI;
  1681. f_info.fwd_id.hw_vsi_id = hw_vsi_id;
  1682. if (f_info.flag & ICE_FLTR_RX) {
  1683. f_info.src = hw->port_info->lport;
  1684. f_info.src_id = ICE_SRC_ID_LPORT;
  1685. if (!set)
  1686. f_info.fltr_rule_id =
  1687. hw->port_info->dflt_rx_vsi_rule_id;
  1688. } else if (f_info.flag & ICE_FLTR_TX) {
  1689. f_info.src_id = ICE_SRC_ID_VSI;
  1690. f_info.src = hw_vsi_id;
  1691. if (!set)
  1692. f_info.fltr_rule_id =
  1693. hw->port_info->dflt_tx_vsi_rule_id;
  1694. }
  1695. if (set)
  1696. opcode = ice_aqc_opc_add_sw_rules;
  1697. else
  1698. opcode = ice_aqc_opc_remove_sw_rules;
  1699. ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
  1700. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
  1701. if (status || !(f_info.flag & ICE_FLTR_TX_RX))
  1702. goto out;
  1703. if (set) {
  1704. u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  1705. if (f_info.flag & ICE_FLTR_TX) {
  1706. hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
  1707. hw->port_info->dflt_tx_vsi_rule_id = index;
  1708. } else if (f_info.flag & ICE_FLTR_RX) {
  1709. hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
  1710. hw->port_info->dflt_rx_vsi_rule_id = index;
  1711. }
  1712. } else {
  1713. if (f_info.flag & ICE_FLTR_TX) {
  1714. hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  1715. hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
  1716. } else if (f_info.flag & ICE_FLTR_RX) {
  1717. hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  1718. hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
  1719. }
  1720. }
  1721. out:
  1722. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1723. return status;
  1724. }
  1725. /**
  1726. * ice_remove_mac - remove a MAC address based filter rule
  1727. * @hw: pointer to the hardware structure
  1728. * @m_list: list of MAC addresses and forwarding information
  1729. *
  1730. * This function removes either a MAC filter rule or a specific VSI from a
  1731. * VSI list for a multicast MAC address.
  1732. *
  1733. * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
  1734. * ice_add_mac. Caller should be aware that this call will only work if all
  1735. * the entries passed into m_list were added previously. It will not attempt to
  1736. * do a partial remove of entries that were found.
  1737. */
  1738. enum ice_status
  1739. ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
  1740. {
  1741. struct ice_fltr_list_entry *list_itr;
  1742. if (!m_list)
  1743. return ICE_ERR_PARAM;
  1744. list_for_each_entry(list_itr, m_list, list_entry) {
  1745. enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
  1746. if (l_type != ICE_SW_LKUP_MAC)
  1747. return ICE_ERR_PARAM;
  1748. list_itr->status = ice_remove_rule_internal(hw,
  1749. ICE_SW_LKUP_MAC,
  1750. list_itr);
  1751. if (list_itr->status)
  1752. return list_itr->status;
  1753. }
  1754. return 0;
  1755. }
  1756. /**
  1757. * ice_remove_vlan - Remove VLAN based filter rule
  1758. * @hw: pointer to the hardware structure
  1759. * @v_list: list of VLAN entries and forwarding information
  1760. */
  1761. enum ice_status
  1762. ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
  1763. {
  1764. struct ice_fltr_list_entry *v_list_itr;
  1765. if (!v_list || !hw)
  1766. return ICE_ERR_PARAM;
  1767. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1768. enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
  1769. if (l_type != ICE_SW_LKUP_VLAN)
  1770. return ICE_ERR_PARAM;
  1771. v_list_itr->status = ice_remove_rule_internal(hw,
  1772. ICE_SW_LKUP_VLAN,
  1773. v_list_itr);
  1774. if (v_list_itr->status)
  1775. return v_list_itr->status;
  1776. }
  1777. return 0;
  1778. }
  1779. /**
  1780. * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
  1781. * @fm_entry: filter entry to inspect
  1782. * @vsi_handle: VSI handle to compare with filter info
  1783. */
  1784. static bool
  1785. ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
  1786. {
  1787. return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
  1788. fm_entry->fltr_info.vsi_handle == vsi_handle) ||
  1789. (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
  1790. (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
  1791. }
  1792. /**
  1793. * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
  1794. * @hw: pointer to the hardware structure
  1795. * @vsi_handle: VSI handle to remove filters from
  1796. * @vsi_list_head: pointer to the list to add entry to
  1797. * @fi: pointer to fltr_info of filter entry to copy & add
  1798. *
  1799. * Helper function, used when creating a list of filters to remove from
  1800. * a specific VSI. The entry added to vsi_list_head is a COPY of the
  1801. * original filter entry, with the exception of fltr_info.fltr_act and
  1802. * fltr_info.fwd_id fields. These are set such that later logic can
  1803. * extract which VSI to remove the fltr from, and pass on that information.
  1804. */
  1805. static enum ice_status
  1806. ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
  1807. struct list_head *vsi_list_head,
  1808. struct ice_fltr_info *fi)
  1809. {
  1810. struct ice_fltr_list_entry *tmp;
  1811. /* this memory is freed up in the caller function
  1812. * once filters for this VSI are removed
  1813. */
  1814. tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
  1815. if (!tmp)
  1816. return ICE_ERR_NO_MEMORY;
  1817. tmp->fltr_info = *fi;
  1818. /* Overwrite these fields to indicate which VSI to remove filter from,
  1819. * so find and remove logic can extract the information from the
  1820. * list entries. Note that original entries will still have proper
  1821. * values.
  1822. */
  1823. tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1824. tmp->fltr_info.vsi_handle = vsi_handle;
  1825. tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
  1826. list_add(&tmp->list_entry, vsi_list_head);
  1827. return 0;
  1828. }
  1829. /**
  1830. * ice_add_to_vsi_fltr_list - Add VSI filters to the list
  1831. * @hw: pointer to the hardware structure
  1832. * @vsi_handle: VSI handle to remove filters from
  1833. * @lkup_list_head: pointer to the list that has certain lookup type filters
  1834. * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
  1835. *
  1836. * Locates all filters in lkup_list_head that are used by the given VSI,
  1837. * and adds COPIES of those entries to vsi_list_head (intended to be used
  1838. * to remove the listed filters).
  1839. * Note that this means all entries in vsi_list_head must be explicitly
  1840. * deallocated by the caller when done with list.
  1841. */
  1842. static enum ice_status
  1843. ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
  1844. struct list_head *lkup_list_head,
  1845. struct list_head *vsi_list_head)
  1846. {
  1847. struct ice_fltr_mgmt_list_entry *fm_entry;
  1848. enum ice_status status = 0;
  1849. /* check to make sure VSI id is valid and within boundary */
  1850. if (!ice_is_vsi_valid(hw, vsi_handle))
  1851. return ICE_ERR_PARAM;
  1852. list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
  1853. struct ice_fltr_info *fi;
  1854. fi = &fm_entry->fltr_info;
  1855. if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
  1856. continue;
  1857. status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
  1858. vsi_list_head, fi);
  1859. if (status)
  1860. return status;
  1861. }
  1862. return status;
  1863. }
  1864. /**
  1865. * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
  1866. * @hw: pointer to the hardware structure
  1867. * @vsi_handle: VSI handle to remove filters from
  1868. * @lkup: switch rule filter lookup type
  1869. */
  1870. static void
  1871. ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
  1872. enum ice_sw_lkup_type lkup)
  1873. {
  1874. struct ice_switch_info *sw = hw->switch_info;
  1875. struct ice_fltr_list_entry *fm_entry;
  1876. struct list_head remove_list_head;
  1877. struct list_head *rule_head;
  1878. struct ice_fltr_list_entry *tmp;
  1879. struct mutex *rule_lock; /* Lock to protect filter rule list */
  1880. enum ice_status status;
  1881. INIT_LIST_HEAD(&remove_list_head);
  1882. rule_lock = &sw->recp_list[lkup].filt_rule_lock;
  1883. rule_head = &sw->recp_list[lkup].filt_rules;
  1884. mutex_lock(rule_lock);
  1885. status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
  1886. &remove_list_head);
  1887. mutex_unlock(rule_lock);
  1888. if (status)
  1889. return;
  1890. switch (lkup) {
  1891. case ICE_SW_LKUP_MAC:
  1892. ice_remove_mac(hw, &remove_list_head);
  1893. break;
  1894. case ICE_SW_LKUP_VLAN:
  1895. ice_remove_vlan(hw, &remove_list_head);
  1896. break;
  1897. case ICE_SW_LKUP_MAC_VLAN:
  1898. case ICE_SW_LKUP_ETHERTYPE:
  1899. case ICE_SW_LKUP_ETHERTYPE_MAC:
  1900. case ICE_SW_LKUP_PROMISC:
  1901. case ICE_SW_LKUP_DFLT:
  1902. case ICE_SW_LKUP_PROMISC_VLAN:
  1903. case ICE_SW_LKUP_LAST:
  1904. default:
  1905. ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
  1906. break;
  1907. }
  1908. list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
  1909. list_del(&fm_entry->list_entry);
  1910. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  1911. }
  1912. }
  1913. /**
  1914. * ice_remove_vsi_fltr - Remove all filters for a VSI
  1915. * @hw: pointer to the hardware structure
  1916. * @vsi_handle: VSI handle to remove filters from
  1917. */
  1918. void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
  1919. {
  1920. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
  1921. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
  1922. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
  1923. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
  1924. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
  1925. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
  1926. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
  1927. ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
  1928. }
  1929. /**
  1930. * ice_replay_fltr - Replay all the filters stored by a specific list head
  1931. * @hw: pointer to the hardware structure
  1932. * @list_head: list for which filters needs to be replayed
  1933. * @recp_id: Recipe id for which rules need to be replayed
  1934. */
  1935. static enum ice_status
  1936. ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct list_head *list_head)
  1937. {
  1938. struct ice_fltr_mgmt_list_entry *itr;
  1939. struct list_head l_head;
  1940. enum ice_status status = 0;
  1941. if (list_empty(list_head))
  1942. return status;
  1943. /* Move entries from the given list_head to a temporary l_head so that
  1944. * they can be replayed. Otherwise when trying to re-add the same
  1945. * filter, the function will return already exists
  1946. */
  1947. list_replace_init(list_head, &l_head);
  1948. /* Mark the given list_head empty by reinitializing it so filters
  1949. * could be added again by *handler
  1950. */
  1951. list_for_each_entry(itr, &l_head, list_entry) {
  1952. struct ice_fltr_list_entry f_entry;
  1953. f_entry.fltr_info = itr->fltr_info;
  1954. if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
  1955. status = ice_add_rule_internal(hw, recp_id, &f_entry);
  1956. if (status)
  1957. goto end;
  1958. continue;
  1959. }
  1960. /* Add a filter per vsi separately */
  1961. while (1) {
  1962. u16 vsi;
  1963. vsi = find_first_bit(itr->vsi_list_info->vsi_map,
  1964. ICE_MAX_VSI);
  1965. if (vsi == ICE_MAX_VSI)
  1966. break;
  1967. clear_bit(vsi, itr->vsi_list_info->vsi_map);
  1968. f_entry.fltr_info.fwd_id.hw_vsi_id = vsi;
  1969. f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1970. if (recp_id == ICE_SW_LKUP_VLAN)
  1971. status = ice_add_vlan_internal(hw, &f_entry);
  1972. else
  1973. status = ice_add_rule_internal(hw, recp_id,
  1974. &f_entry);
  1975. if (status)
  1976. goto end;
  1977. }
  1978. }
  1979. end:
  1980. /* Clear the filter management list */
  1981. ice_rem_sw_rule_info(hw, &l_head);
  1982. return status;
  1983. }
  1984. /**
  1985. * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
  1986. * @hw: pointer to the hardware structure
  1987. *
  1988. * NOTE: This function does not clean up partially added filters on error.
  1989. * It is up to caller of the function to issue a reset or fail early.
  1990. */
  1991. enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
  1992. {
  1993. struct ice_switch_info *sw = hw->switch_info;
  1994. enum ice_status status = 0;
  1995. u8 i;
  1996. for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
  1997. struct list_head *head = &sw->recp_list[i].filt_rules;
  1998. status = ice_replay_fltr(hw, i, head);
  1999. if (status)
  2000. return status;
  2001. }
  2002. return status;
  2003. }