ice_switch.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_switch.h"
  4. #define ICE_ETH_DA_OFFSET 0
  5. #define ICE_ETH_ETHTYPE_OFFSET 12
  6. #define ICE_ETH_VLAN_TCI_OFFSET 14
  7. #define ICE_MAX_VLAN_ID 0xFFF
  8. /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
  9. * struct to configure any switch filter rules.
  10. * {DA (6 bytes), SA(6 bytes),
  11. * Ether type (2 bytes for header without VLAN tag) OR
  12. * VLAN tag (4 bytes for header with VLAN tag) }
  13. *
  14. * Word on Hardcoded values
  15. * byte 0 = 0x2: to identify it as locally administered DA MAC
  16. * byte 6 = 0x2: to identify it as locally administered SA MAC
  17. * byte 12 = 0x81 & byte 13 = 0x00:
  18. * In case of VLAN filter first two bytes defines ether type (0x8100)
  19. * and remaining two bytes are placeholder for programming a given VLAN id
  20. * In case of Ether type filter it is treated as header without VLAN tag
  21. * and byte 12 and 13 is used to program a given Ether type instead
  22. */
  23. #define DUMMY_ETH_HDR_LEN 16
  24. static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
  25. 0x2, 0, 0, 0, 0, 0,
  26. 0x81, 0, 0, 0};
  27. #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
  28. (sizeof(struct ice_aqc_sw_rules_elem) - \
  29. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  30. sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
  31. #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
  32. (sizeof(struct ice_aqc_sw_rules_elem) - \
  33. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  34. sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
  35. #define ICE_SW_RULE_LG_ACT_SIZE(n) \
  36. (sizeof(struct ice_aqc_sw_rules_elem) - \
  37. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  38. sizeof(struct ice_sw_rule_lg_act) - \
  39. sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
  40. ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
  41. #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
  42. (sizeof(struct ice_aqc_sw_rules_elem) - \
  43. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  44. sizeof(struct ice_sw_rule_vsi_list) - \
  45. sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
  46. ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
  47. /**
  48. * ice_aq_alloc_free_res - command to allocate/free resources
  49. * @hw: pointer to the hw struct
  50. * @num_entries: number of resource entries in buffer
  51. * @buf: Indirect buffer to hold data parameters and response
  52. * @buf_size: size of buffer for indirect commands
  53. * @opc: pass in the command opcode
  54. * @cd: pointer to command details structure or NULL
  55. *
  56. * Helper function to allocate/free resources using the admin queue commands
  57. */
  58. static enum ice_status
  59. ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
  60. struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
  61. enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  62. {
  63. struct ice_aqc_alloc_free_res_cmd *cmd;
  64. struct ice_aq_desc desc;
  65. cmd = &desc.params.sw_res_ctrl;
  66. if (!buf)
  67. return ICE_ERR_PARAM;
  68. if (buf_size < (num_entries * sizeof(buf->elem[0])))
  69. return ICE_ERR_PARAM;
  70. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  71. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  72. cmd->num_entries = cpu_to_le16(num_entries);
  73. return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  74. }
  75. /**
  76. * ice_aq_get_sw_cfg - get switch configuration
  77. * @hw: pointer to the hardware structure
  78. * @buf: pointer to the result buffer
  79. * @buf_size: length of the buffer available for response
  80. * @req_desc: pointer to requested descriptor
  81. * @num_elems: pointer to number of elements
  82. * @cd: pointer to command details structure or NULL
  83. *
  84. * Get switch configuration (0x0200) to be placed in 'buff'.
  85. * This admin command returns information such as initial VSI/port number
  86. * and switch ID it belongs to.
  87. *
  88. * NOTE: *req_desc is both an input/output parameter.
  89. * The caller of this function first calls this function with *request_desc set
  90. * to 0. If the response from f/w has *req_desc set to 0, all the switch
  91. * configuration information has been returned; if non-zero (meaning not all
  92. * the information was returned), the caller should call this function again
  93. * with *req_desc set to the previous value returned by f/w to get the
  94. * next block of switch configuration information.
  95. *
  96. * *num_elems is output only parameter. This reflects the number of elements
  97. * in response buffer. The caller of this function to use *num_elems while
  98. * parsing the response buffer.
  99. */
  100. static enum ice_status
  101. ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
  102. u16 buf_size, u16 *req_desc, u16 *num_elems,
  103. struct ice_sq_cd *cd)
  104. {
  105. struct ice_aqc_get_sw_cfg *cmd;
  106. enum ice_status status;
  107. struct ice_aq_desc desc;
  108. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
  109. cmd = &desc.params.get_sw_conf;
  110. cmd->element = cpu_to_le16(*req_desc);
  111. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  112. if (!status) {
  113. *req_desc = le16_to_cpu(cmd->element);
  114. *num_elems = le16_to_cpu(cmd->num_elems);
  115. }
  116. return status;
  117. }
  118. /**
  119. * ice_aq_add_vsi
  120. * @hw: pointer to the hw struct
  121. * @vsi_ctx: pointer to a VSI context struct
  122. * @cd: pointer to command details structure or NULL
  123. *
  124. * Add a VSI context to the hardware (0x0210)
  125. */
  126. enum ice_status
  127. ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  128. struct ice_sq_cd *cd)
  129. {
  130. struct ice_aqc_add_update_free_vsi_resp *res;
  131. struct ice_aqc_add_get_update_free_vsi *cmd;
  132. enum ice_status status;
  133. struct ice_aq_desc desc;
  134. cmd = &desc.params.vsi_cmd;
  135. res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
  136. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
  137. if (!vsi_ctx->alloc_from_pool)
  138. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
  139. ICE_AQ_VSI_IS_VALID);
  140. cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
  141. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  142. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  143. sizeof(vsi_ctx->info), cd);
  144. if (!status) {
  145. vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
  146. vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
  147. vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
  148. }
  149. return status;
  150. }
  151. /**
  152. * ice_aq_update_vsi
  153. * @hw: pointer to the hw struct
  154. * @vsi_ctx: pointer to a VSI context struct
  155. * @cd: pointer to command details structure or NULL
  156. *
  157. * Update VSI context in the hardware (0x0211)
  158. */
  159. enum ice_status
  160. ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  161. struct ice_sq_cd *cd)
  162. {
  163. struct ice_aqc_add_update_free_vsi_resp *resp;
  164. struct ice_aqc_add_get_update_free_vsi *cmd;
  165. struct ice_aq_desc desc;
  166. enum ice_status status;
  167. cmd = &desc.params.vsi_cmd;
  168. resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
  169. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
  170. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  171. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  172. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  173. sizeof(vsi_ctx->info), cd);
  174. if (!status) {
  175. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  176. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  177. }
  178. return status;
  179. }
  180. /**
  181. * ice_aq_free_vsi
  182. * @hw: pointer to the hw struct
  183. * @vsi_ctx: pointer to a VSI context struct
  184. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  185. * @cd: pointer to command details structure or NULL
  186. *
  187. * Get VSI context info from hardware (0x0213)
  188. */
  189. enum ice_status
  190. ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  191. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  192. {
  193. struct ice_aqc_add_update_free_vsi_resp *resp;
  194. struct ice_aqc_add_get_update_free_vsi *cmd;
  195. struct ice_aq_desc desc;
  196. enum ice_status status;
  197. cmd = &desc.params.vsi_cmd;
  198. resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
  199. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
  200. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  201. if (keep_vsi_alloc)
  202. cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
  203. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  204. if (!status) {
  205. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  206. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  207. }
  208. return status;
  209. }
  210. /**
  211. * ice_aq_alloc_free_vsi_list
  212. * @hw: pointer to the hw struct
  213. * @vsi_list_id: VSI list id returned or used for lookup
  214. * @lkup_type: switch rule filter lookup type
  215. * @opc: switch rules population command type - pass in the command opcode
  216. *
  217. * allocates or free a VSI list resource
  218. */
  219. static enum ice_status
  220. ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
  221. enum ice_sw_lkup_type lkup_type,
  222. enum ice_adminq_opc opc)
  223. {
  224. struct ice_aqc_alloc_free_res_elem *sw_buf;
  225. struct ice_aqc_res_elem *vsi_ele;
  226. enum ice_status status;
  227. u16 buf_len;
  228. buf_len = sizeof(*sw_buf);
  229. sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
  230. if (!sw_buf)
  231. return ICE_ERR_NO_MEMORY;
  232. sw_buf->num_elems = cpu_to_le16(1);
  233. if (lkup_type == ICE_SW_LKUP_MAC ||
  234. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  235. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  236. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  237. lkup_type == ICE_SW_LKUP_PROMISC ||
  238. lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
  239. sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
  240. } else if (lkup_type == ICE_SW_LKUP_VLAN) {
  241. sw_buf->res_type =
  242. cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
  243. } else {
  244. status = ICE_ERR_PARAM;
  245. goto ice_aq_alloc_free_vsi_list_exit;
  246. }
  247. if (opc == ice_aqc_opc_free_res)
  248. sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
  249. status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
  250. if (status)
  251. goto ice_aq_alloc_free_vsi_list_exit;
  252. if (opc == ice_aqc_opc_alloc_res) {
  253. vsi_ele = &sw_buf->elem[0];
  254. *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
  255. }
  256. ice_aq_alloc_free_vsi_list_exit:
  257. devm_kfree(ice_hw_to_dev(hw), sw_buf);
  258. return status;
  259. }
  260. /**
  261. * ice_aq_sw_rules - add/update/remove switch rules
  262. * @hw: pointer to the hw struct
  263. * @rule_list: pointer to switch rule population list
  264. * @rule_list_sz: total size of the rule list in bytes
  265. * @num_rules: number of switch rules in the rule_list
  266. * @opc: switch rules population command type - pass in the command opcode
  267. * @cd: pointer to command details structure or NULL
  268. *
  269. * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
  270. */
  271. static enum ice_status
  272. ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
  273. u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  274. {
  275. struct ice_aq_desc desc;
  276. if (opc != ice_aqc_opc_add_sw_rules &&
  277. opc != ice_aqc_opc_update_sw_rules &&
  278. opc != ice_aqc_opc_remove_sw_rules)
  279. return ICE_ERR_PARAM;
  280. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  281. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  282. desc.params.sw_rules.num_rules_fltr_entry_index =
  283. cpu_to_le16(num_rules);
  284. return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
  285. }
  286. /* ice_init_port_info - Initialize port_info with switch configuration data
  287. * @pi: pointer to port_info
  288. * @vsi_port_num: VSI number or port number
  289. * @type: Type of switch element (port or VSI)
  290. * @swid: switch ID of the switch the element is attached to
  291. * @pf_vf_num: PF or VF number
  292. * @is_vf: true if the element is a VF, false otherwise
  293. */
  294. static void
  295. ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
  296. u16 swid, u16 pf_vf_num, bool is_vf)
  297. {
  298. switch (type) {
  299. case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
  300. pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
  301. pi->sw_id = swid;
  302. pi->pf_vf_num = pf_vf_num;
  303. pi->is_vf = is_vf;
  304. pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  305. pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  306. break;
  307. default:
  308. ice_debug(pi->hw, ICE_DBG_SW,
  309. "incorrect VSI/port type received\n");
  310. break;
  311. }
  312. }
  313. /* ice_get_initial_sw_cfg - Get initial port and default VSI data
  314. * @hw: pointer to the hardware structure
  315. */
  316. enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
  317. {
  318. struct ice_aqc_get_sw_cfg_resp *rbuf;
  319. enum ice_status status;
  320. u16 req_desc = 0;
  321. u16 num_elems;
  322. u16 i;
  323. rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
  324. GFP_KERNEL);
  325. if (!rbuf)
  326. return ICE_ERR_NO_MEMORY;
  327. /* Multiple calls to ice_aq_get_sw_cfg may be required
  328. * to get all the switch configuration information. The need
  329. * for additional calls is indicated by ice_aq_get_sw_cfg
  330. * writing a non-zero value in req_desc
  331. */
  332. do {
  333. status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
  334. &req_desc, &num_elems, NULL);
  335. if (status)
  336. break;
  337. for (i = 0; i < num_elems; i++) {
  338. struct ice_aqc_get_sw_cfg_resp_elem *ele;
  339. u16 pf_vf_num, swid, vsi_port_num;
  340. bool is_vf = false;
  341. u8 type;
  342. ele = rbuf[i].elements;
  343. vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
  344. ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
  345. pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
  346. ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
  347. swid = le16_to_cpu(ele->swid);
  348. if (le16_to_cpu(ele->pf_vf_num) &
  349. ICE_AQC_GET_SW_CONF_RESP_IS_VF)
  350. is_vf = true;
  351. type = le16_to_cpu(ele->vsi_port_num) >>
  352. ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
  353. if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
  354. /* FW VSI is not needed. Just continue. */
  355. continue;
  356. }
  357. ice_init_port_info(hw->port_info, vsi_port_num,
  358. type, swid, pf_vf_num, is_vf);
  359. }
  360. } while (req_desc && !status);
  361. devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
  362. return status;
  363. }
  364. /**
  365. * ice_fill_sw_info - Helper function to populate lb_en and lan_en
  366. * @hw: pointer to the hardware structure
  367. * @f_info: filter info structure to fill/update
  368. *
  369. * This helper function populates the lb_en and lan_en elements of the provided
  370. * ice_fltr_info struct using the switch's type and characteristics of the
  371. * switch rule being configured.
  372. */
  373. static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
  374. {
  375. f_info->lb_en = false;
  376. f_info->lan_en = false;
  377. if ((f_info->flag & ICE_FLTR_TX) &&
  378. (f_info->fltr_act == ICE_FWD_TO_VSI ||
  379. f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
  380. f_info->fltr_act == ICE_FWD_TO_Q ||
  381. f_info->fltr_act == ICE_FWD_TO_QGRP)) {
  382. f_info->lb_en = true;
  383. if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
  384. is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
  385. f_info->lan_en = true;
  386. }
  387. }
  388. /**
  389. * ice_fill_sw_rule - Helper function to fill switch rule structure
  390. * @hw: pointer to the hardware structure
  391. * @f_info: entry containing packet forwarding information
  392. * @s_rule: switch rule structure to be filled in based on mac_entry
  393. * @opc: switch rules population command type - pass in the command opcode
  394. */
  395. static void
  396. ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
  397. struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
  398. {
  399. u16 vlan_id = ICE_MAX_VLAN_ID + 1;
  400. u8 eth_hdr[DUMMY_ETH_HDR_LEN];
  401. void *daddr = NULL;
  402. u32 act = 0;
  403. __be16 *off;
  404. if (opc == ice_aqc_opc_remove_sw_rules) {
  405. s_rule->pdata.lkup_tx_rx.act = 0;
  406. s_rule->pdata.lkup_tx_rx.index =
  407. cpu_to_le16(f_info->fltr_rule_id);
  408. s_rule->pdata.lkup_tx_rx.hdr_len = 0;
  409. return;
  410. }
  411. /* initialize the ether header with a dummy header */
  412. memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header));
  413. ice_fill_sw_info(hw, f_info);
  414. switch (f_info->fltr_act) {
  415. case ICE_FWD_TO_VSI:
  416. act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
  417. ICE_SINGLE_ACT_VSI_ID_M;
  418. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  419. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  420. ICE_SINGLE_ACT_VALID_BIT;
  421. break;
  422. case ICE_FWD_TO_VSI_LIST:
  423. act |= ICE_SINGLE_ACT_VSI_LIST;
  424. act |= (f_info->fwd_id.vsi_list_id <<
  425. ICE_SINGLE_ACT_VSI_LIST_ID_S) &
  426. ICE_SINGLE_ACT_VSI_LIST_ID_M;
  427. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  428. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  429. ICE_SINGLE_ACT_VALID_BIT;
  430. break;
  431. case ICE_FWD_TO_Q:
  432. act |= ICE_SINGLE_ACT_TO_Q;
  433. act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
  434. ICE_SINGLE_ACT_Q_INDEX_M;
  435. break;
  436. case ICE_FWD_TO_QGRP:
  437. act |= ICE_SINGLE_ACT_TO_Q;
  438. act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
  439. ICE_SINGLE_ACT_Q_REGION_M;
  440. break;
  441. case ICE_DROP_PACKET:
  442. act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
  443. break;
  444. default:
  445. return;
  446. }
  447. if (f_info->lb_en)
  448. act |= ICE_SINGLE_ACT_LB_ENABLE;
  449. if (f_info->lan_en)
  450. act |= ICE_SINGLE_ACT_LAN_ENABLE;
  451. switch (f_info->lkup_type) {
  452. case ICE_SW_LKUP_MAC:
  453. daddr = f_info->l_data.mac.mac_addr;
  454. break;
  455. case ICE_SW_LKUP_VLAN:
  456. vlan_id = f_info->l_data.vlan.vlan_id;
  457. if (f_info->fltr_act == ICE_FWD_TO_VSI ||
  458. f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
  459. act |= ICE_SINGLE_ACT_PRUNE;
  460. act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
  461. }
  462. break;
  463. case ICE_SW_LKUP_ETHERTYPE_MAC:
  464. daddr = f_info->l_data.ethertype_mac.mac_addr;
  465. /* fall-through */
  466. case ICE_SW_LKUP_ETHERTYPE:
  467. off = (__be16 *)&eth_hdr[ICE_ETH_ETHTYPE_OFFSET];
  468. *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
  469. break;
  470. case ICE_SW_LKUP_MAC_VLAN:
  471. daddr = f_info->l_data.mac_vlan.mac_addr;
  472. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  473. break;
  474. case ICE_SW_LKUP_PROMISC_VLAN:
  475. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  476. /* fall-through */
  477. case ICE_SW_LKUP_PROMISC:
  478. daddr = f_info->l_data.mac_vlan.mac_addr;
  479. break;
  480. default:
  481. break;
  482. }
  483. s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
  484. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
  485. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
  486. /* Recipe set depending on lookup type */
  487. s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
  488. s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
  489. s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
  490. if (daddr)
  491. ether_addr_copy(&eth_hdr[ICE_ETH_DA_OFFSET], daddr);
  492. if (!(vlan_id > ICE_MAX_VLAN_ID)) {
  493. off = (__be16 *)&eth_hdr[ICE_ETH_VLAN_TCI_OFFSET];
  494. *off = cpu_to_be16(vlan_id);
  495. }
  496. /* Create the switch rule with the final dummy Ethernet header */
  497. if (opc != ice_aqc_opc_update_sw_rules)
  498. s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr));
  499. memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr));
  500. }
  501. /**
  502. * ice_add_marker_act
  503. * @hw: pointer to the hardware structure
  504. * @m_ent: the management entry for which sw marker needs to be added
  505. * @sw_marker: sw marker to tag the Rx descriptor with
  506. * @l_id: large action resource id
  507. *
  508. * Create a large action to hold software marker and update the switch rule
  509. * entry pointed by m_ent with newly created large action
  510. */
  511. static enum ice_status
  512. ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
  513. u16 sw_marker, u16 l_id)
  514. {
  515. struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
  516. /* For software marker we need 3 large actions
  517. * 1. FWD action: FWD TO VSI or VSI LIST
  518. * 2. GENERIC VALUE action to hold the profile id
  519. * 3. GENERIC VALUE action to hold the software marker id
  520. */
  521. const u16 num_lg_acts = 3;
  522. enum ice_status status;
  523. u16 lg_act_size;
  524. u16 rules_size;
  525. u16 vsi_info;
  526. u32 act;
  527. if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
  528. return ICE_ERR_PARAM;
  529. /* Create two back-to-back switch rules and submit them to the HW using
  530. * one memory buffer:
  531. * 1. Large Action
  532. * 2. Look up tx rx
  533. */
  534. lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
  535. rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  536. lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
  537. if (!lg_act)
  538. return ICE_ERR_NO_MEMORY;
  539. rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
  540. /* Fill in the first switch rule i.e. large action */
  541. lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
  542. lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
  543. lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
  544. /* First action VSI forwarding or VSI list forwarding depending on how
  545. * many VSIs
  546. */
  547. vsi_info = (m_ent->vsi_count > 1) ?
  548. m_ent->fltr_info.fwd_id.vsi_list_id :
  549. m_ent->fltr_info.fwd_id.vsi_id;
  550. act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
  551. act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) &
  552. ICE_LG_ACT_VSI_LIST_ID_M;
  553. if (m_ent->vsi_count > 1)
  554. act |= ICE_LG_ACT_VSI_LIST;
  555. lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
  556. /* Second action descriptor type */
  557. act = ICE_LG_ACT_GENERIC;
  558. act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
  559. lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
  560. act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
  561. ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
  562. /* Third action Marker value */
  563. act |= ICE_LG_ACT_GENERIC;
  564. act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
  565. ICE_LG_ACT_GENERIC_VALUE_M;
  566. lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
  567. /* call the fill switch rule to fill the lookup tx rx structure */
  568. ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
  569. ice_aqc_opc_update_sw_rules);
  570. /* Update the action to point to the large action id */
  571. rx_tx->pdata.lkup_tx_rx.act =
  572. cpu_to_le32(ICE_SINGLE_ACT_PTR |
  573. ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
  574. ICE_SINGLE_ACT_PTR_VAL_M));
  575. /* Use the filter rule id of the previously created rule with single
  576. * act. Once the update happens, hardware will treat this as large
  577. * action
  578. */
  579. rx_tx->pdata.lkup_tx_rx.index =
  580. cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
  581. status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
  582. ice_aqc_opc_update_sw_rules, NULL);
  583. if (!status) {
  584. m_ent->lg_act_idx = l_id;
  585. m_ent->sw_marker_id = sw_marker;
  586. }
  587. devm_kfree(ice_hw_to_dev(hw), lg_act);
  588. return status;
  589. }
  590. /**
  591. * ice_create_vsi_list_map
  592. * @hw: pointer to the hardware structure
  593. * @vsi_array: array of VSIs to form a VSI list
  594. * @num_vsi: num VSI in the array
  595. * @vsi_list_id: VSI list id generated as part of allocate resource
  596. *
  597. * Helper function to create a new entry of VSI list id to VSI mapping
  598. * using the given VSI list id
  599. */
  600. static struct ice_vsi_list_map_info *
  601. ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
  602. u16 vsi_list_id)
  603. {
  604. struct ice_switch_info *sw = hw->switch_info;
  605. struct ice_vsi_list_map_info *v_map;
  606. int i;
  607. v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
  608. if (!v_map)
  609. return NULL;
  610. v_map->vsi_list_id = vsi_list_id;
  611. for (i = 0; i < num_vsi; i++)
  612. set_bit(vsi_array[i], v_map->vsi_map);
  613. list_add(&v_map->list_entry, &sw->vsi_list_map_head);
  614. return v_map;
  615. }
  616. /**
  617. * ice_update_vsi_list_rule
  618. * @hw: pointer to the hardware structure
  619. * @vsi_array: array of VSIs to form a VSI list
  620. * @num_vsi: num VSI in the array
  621. * @vsi_list_id: VSI list id generated as part of allocate resource
  622. * @remove: Boolean value to indicate if this is a remove action
  623. * @opc: switch rules population command type - pass in the command opcode
  624. * @lkup_type: lookup type of the filter
  625. *
  626. * Call AQ command to add a new switch rule or update existing switch rule
  627. * using the given VSI list id
  628. */
  629. static enum ice_status
  630. ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
  631. u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
  632. enum ice_sw_lkup_type lkup_type)
  633. {
  634. struct ice_aqc_sw_rules_elem *s_rule;
  635. enum ice_status status;
  636. u16 s_rule_size;
  637. u16 type;
  638. int i;
  639. if (!num_vsi)
  640. return ICE_ERR_PARAM;
  641. if (lkup_type == ICE_SW_LKUP_MAC ||
  642. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  643. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  644. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  645. lkup_type == ICE_SW_LKUP_PROMISC ||
  646. lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
  647. type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
  648. ICE_AQC_SW_RULES_T_VSI_LIST_SET;
  649. else if (lkup_type == ICE_SW_LKUP_VLAN)
  650. type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
  651. ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
  652. else
  653. return ICE_ERR_PARAM;
  654. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
  655. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  656. if (!s_rule)
  657. return ICE_ERR_NO_MEMORY;
  658. for (i = 0; i < num_vsi; i++)
  659. s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]);
  660. s_rule->type = cpu_to_le16(type);
  661. s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
  662. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  663. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
  664. devm_kfree(ice_hw_to_dev(hw), s_rule);
  665. return status;
  666. }
  667. /**
  668. * ice_create_vsi_list_rule - Creates and populates a VSI list rule
  669. * @hw: pointer to the hw struct
  670. * @vsi_array: array of VSIs to form a VSI list
  671. * @num_vsi: number of VSIs in the array
  672. * @vsi_list_id: stores the ID of the VSI list to be created
  673. * @lkup_type: switch rule filter's lookup type
  674. */
  675. static enum ice_status
  676. ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
  677. u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
  678. {
  679. enum ice_status status;
  680. int i;
  681. for (i = 0; i < num_vsi; i++)
  682. if (vsi_array[i] >= ICE_MAX_VSI)
  683. return ICE_ERR_OUT_OF_RANGE;
  684. status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
  685. ice_aqc_opc_alloc_res);
  686. if (status)
  687. return status;
  688. /* Update the newly created VSI list to include the specified VSIs */
  689. return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id,
  690. false, ice_aqc_opc_add_sw_rules,
  691. lkup_type);
  692. }
  693. /**
  694. * ice_create_pkt_fwd_rule
  695. * @hw: pointer to the hardware structure
  696. * @f_entry: entry containing packet forwarding information
  697. *
  698. * Create switch rule with given filter information and add an entry
  699. * to the corresponding filter management list to track this switch rule
  700. * and VSI mapping
  701. */
  702. static enum ice_status
  703. ice_create_pkt_fwd_rule(struct ice_hw *hw,
  704. struct ice_fltr_list_entry *f_entry)
  705. {
  706. struct ice_switch_info *sw = hw->switch_info;
  707. struct ice_fltr_mgmt_list_entry *fm_entry;
  708. struct ice_aqc_sw_rules_elem *s_rule;
  709. enum ice_sw_lkup_type l_type;
  710. enum ice_status status;
  711. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  712. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  713. if (!s_rule)
  714. return ICE_ERR_NO_MEMORY;
  715. fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
  716. GFP_KERNEL);
  717. if (!fm_entry) {
  718. status = ICE_ERR_NO_MEMORY;
  719. goto ice_create_pkt_fwd_rule_exit;
  720. }
  721. fm_entry->fltr_info = f_entry->fltr_info;
  722. /* Initialize all the fields for the management entry */
  723. fm_entry->vsi_count = 1;
  724. fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
  725. fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
  726. fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
  727. ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
  728. ice_aqc_opc_add_sw_rules);
  729. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  730. ice_aqc_opc_add_sw_rules, NULL);
  731. if (status) {
  732. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  733. goto ice_create_pkt_fwd_rule_exit;
  734. }
  735. f_entry->fltr_info.fltr_rule_id =
  736. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  737. fm_entry->fltr_info.fltr_rule_id =
  738. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  739. /* The book keeping entries will get removed when base driver
  740. * calls remove filter AQ command
  741. */
  742. l_type = fm_entry->fltr_info.lkup_type;
  743. if (l_type == ICE_SW_LKUP_MAC) {
  744. mutex_lock(&sw->mac_list_lock);
  745. list_add(&fm_entry->list_entry, &sw->mac_list_head);
  746. mutex_unlock(&sw->mac_list_lock);
  747. } else if (l_type == ICE_SW_LKUP_VLAN) {
  748. mutex_lock(&sw->vlan_list_lock);
  749. list_add(&fm_entry->list_entry, &sw->vlan_list_head);
  750. mutex_unlock(&sw->vlan_list_lock);
  751. } else if (l_type == ICE_SW_LKUP_ETHERTYPE ||
  752. l_type == ICE_SW_LKUP_ETHERTYPE_MAC) {
  753. mutex_lock(&sw->eth_m_list_lock);
  754. list_add(&fm_entry->list_entry, &sw->eth_m_list_head);
  755. mutex_unlock(&sw->eth_m_list_lock);
  756. } else if (l_type == ICE_SW_LKUP_PROMISC ||
  757. l_type == ICE_SW_LKUP_PROMISC_VLAN) {
  758. mutex_lock(&sw->promisc_list_lock);
  759. list_add(&fm_entry->list_entry, &sw->promisc_list_head);
  760. mutex_unlock(&sw->promisc_list_lock);
  761. } else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) {
  762. mutex_lock(&sw->mac_vlan_list_lock);
  763. list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head);
  764. mutex_unlock(&sw->mac_vlan_list_lock);
  765. } else {
  766. status = ICE_ERR_NOT_IMPL;
  767. }
  768. ice_create_pkt_fwd_rule_exit:
  769. devm_kfree(ice_hw_to_dev(hw), s_rule);
  770. return status;
  771. }
  772. /**
  773. * ice_update_pkt_fwd_rule
  774. * @hw: pointer to the hardware structure
  775. * @rule_id: rule of previously created switch rule to update
  776. * @vsi_list_id: VSI list id to be updated with
  777. * @f_info: ice_fltr_info to pull other information for switch rule
  778. *
  779. * Call AQ command to update a previously created switch rule with a
  780. * VSI list id
  781. */
  782. static enum ice_status
  783. ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
  784. struct ice_fltr_info f_info)
  785. {
  786. struct ice_aqc_sw_rules_elem *s_rule;
  787. struct ice_fltr_info tmp_fltr;
  788. enum ice_status status;
  789. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  790. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  791. if (!s_rule)
  792. return ICE_ERR_NO_MEMORY;
  793. tmp_fltr = f_info;
  794. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  795. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  796. ice_fill_sw_rule(hw, &tmp_fltr, s_rule,
  797. ice_aqc_opc_update_sw_rules);
  798. s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
  799. /* Update switch rule with new rule set to forward VSI list */
  800. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  801. ice_aqc_opc_update_sw_rules, NULL);
  802. devm_kfree(ice_hw_to_dev(hw), s_rule);
  803. return status;
  804. }
  805. /**
  806. * ice_handle_vsi_list_mgmt
  807. * @hw: pointer to the hardware structure
  808. * @m_entry: pointer to current filter management list entry
  809. * @cur_fltr: filter information from the book keeping entry
  810. * @new_fltr: filter information with the new VSI to be added
  811. *
  812. * Call AQ command to add or update previously created VSI list with new VSI.
  813. *
  814. * Helper function to do book keeping associated with adding filter information
  815. * The algorithm to do the booking keeping is described below :
  816. * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
  817. * if only one VSI has been added till now
  818. * Allocate a new VSI list and add two VSIs
  819. * to this list using switch rule command
  820. * Update the previously created switch rule with the
  821. * newly created VSI list id
  822. * if a VSI list was previously created
  823. * Add the new VSI to the previously created VSI list set
  824. * using the update switch rule command
  825. */
  826. static enum ice_status
  827. ice_handle_vsi_list_mgmt(struct ice_hw *hw,
  828. struct ice_fltr_mgmt_list_entry *m_entry,
  829. struct ice_fltr_info *cur_fltr,
  830. struct ice_fltr_info *new_fltr)
  831. {
  832. enum ice_status status = 0;
  833. u16 vsi_list_id = 0;
  834. if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
  835. cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
  836. return ICE_ERR_NOT_IMPL;
  837. if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
  838. new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
  839. (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
  840. cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
  841. return ICE_ERR_NOT_IMPL;
  842. if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
  843. /* Only one entry existed in the mapping and it was not already
  844. * a part of a VSI list. So, create a VSI list with the old and
  845. * new VSIs.
  846. */
  847. u16 vsi_id_arr[2];
  848. u16 fltr_rule;
  849. /* A rule already exists with the new VSI being added */
  850. if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id)
  851. return ICE_ERR_ALREADY_EXISTS;
  852. vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id;
  853. vsi_id_arr[1] = new_fltr->fwd_id.vsi_id;
  854. status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2,
  855. &vsi_list_id,
  856. new_fltr->lkup_type);
  857. if (status)
  858. return status;
  859. fltr_rule = cur_fltr->fltr_rule_id;
  860. /* Update the previous switch rule of "MAC forward to VSI" to
  861. * "MAC fwd to VSI list"
  862. */
  863. status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id,
  864. *new_fltr);
  865. if (status)
  866. return status;
  867. cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
  868. cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  869. m_entry->vsi_list_info =
  870. ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2,
  871. vsi_list_id);
  872. /* If this entry was large action then the large action needs
  873. * to be updated to point to FWD to VSI list
  874. */
  875. if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
  876. status =
  877. ice_add_marker_act(hw, m_entry,
  878. m_entry->sw_marker_id,
  879. m_entry->lg_act_idx);
  880. } else {
  881. u16 vsi_id = new_fltr->fwd_id.vsi_id;
  882. enum ice_adminq_opc opcode;
  883. /* A rule already exists with the new VSI being added */
  884. if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map))
  885. return 0;
  886. /* Update the previously created VSI list set with
  887. * the new VSI id passed in
  888. */
  889. vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
  890. opcode = ice_aqc_opc_update_sw_rules;
  891. status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
  892. false, opcode,
  893. new_fltr->lkup_type);
  894. /* update VSI list mapping info with new VSI id */
  895. if (!status)
  896. set_bit(vsi_id, m_entry->vsi_list_info->vsi_map);
  897. }
  898. if (!status)
  899. m_entry->vsi_count++;
  900. return status;
  901. }
  902. /**
  903. * ice_find_mac_entry
  904. * @hw: pointer to the hardware structure
  905. * @mac_addr: MAC address to search for
  906. *
  907. * Helper function to search for a MAC entry using a given MAC address
  908. * Returns pointer to the entry if found.
  909. */
  910. static struct ice_fltr_mgmt_list_entry *
  911. ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr)
  912. {
  913. struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL;
  914. struct ice_switch_info *sw = hw->switch_info;
  915. mutex_lock(&sw->mac_list_lock);
  916. list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) {
  917. u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
  918. if (ether_addr_equal(buf, mac_addr)) {
  919. mac_ret = m_list_itr;
  920. break;
  921. }
  922. }
  923. mutex_unlock(&sw->mac_list_lock);
  924. return mac_ret;
  925. }
  926. /**
  927. * ice_add_shared_mac - Add one MAC shared filter rule
  928. * @hw: pointer to the hardware structure
  929. * @f_entry: structure containing MAC forwarding information
  930. *
  931. * Adds or updates the book keeping list for the MAC addresses
  932. */
  933. static enum ice_status
  934. ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  935. {
  936. struct ice_fltr_info *new_fltr, *cur_fltr;
  937. struct ice_fltr_mgmt_list_entry *m_entry;
  938. new_fltr = &f_entry->fltr_info;
  939. m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]);
  940. if (!m_entry)
  941. return ice_create_pkt_fwd_rule(hw, f_entry);
  942. cur_fltr = &m_entry->fltr_info;
  943. return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr);
  944. }
  945. /**
  946. * ice_add_mac - Add a MAC address based filter rule
  947. * @hw: pointer to the hardware structure
  948. * @m_list: list of MAC addresses and forwarding information
  949. *
  950. * IMPORTANT: When the ucast_shared flag is set to false and m_list has
  951. * multiple unicast addresses, the function assumes that all the
  952. * addresses are unique in a given add_mac call. It doesn't
  953. * check for duplicates in this case, removing duplicates from a given
  954. * list should be taken care of in the caller of this function.
  955. */
  956. enum ice_status
  957. ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
  958. {
  959. struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
  960. struct ice_fltr_list_entry *m_list_itr;
  961. u16 elem_sent, total_elem_left;
  962. enum ice_status status = 0;
  963. u16 num_unicast = 0;
  964. u16 s_rule_size;
  965. if (!m_list || !hw)
  966. return ICE_ERR_PARAM;
  967. list_for_each_entry(m_list_itr, m_list, list_entry) {
  968. u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
  969. if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
  970. return ICE_ERR_PARAM;
  971. if (is_zero_ether_addr(add))
  972. return ICE_ERR_PARAM;
  973. if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
  974. /* Don't overwrite the unicast address */
  975. if (ice_find_mac_entry(hw, add))
  976. return ICE_ERR_ALREADY_EXISTS;
  977. num_unicast++;
  978. } else if (is_multicast_ether_addr(add) ||
  979. (is_unicast_ether_addr(add) && hw->ucast_shared)) {
  980. status = ice_add_shared_mac(hw, m_list_itr);
  981. if (status) {
  982. m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
  983. return status;
  984. }
  985. m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
  986. }
  987. }
  988. /* Exit if no suitable entries were found for adding bulk switch rule */
  989. if (!num_unicast)
  990. return 0;
  991. /* Allocate switch rule buffer for the bulk update for unicast */
  992. s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  993. s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
  994. GFP_KERNEL);
  995. if (!s_rule)
  996. return ICE_ERR_NO_MEMORY;
  997. r_iter = s_rule;
  998. list_for_each_entry(m_list_itr, m_list, list_entry) {
  999. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1000. u8 *addr = &f_info->l_data.mac.mac_addr[0];
  1001. if (is_unicast_ether_addr(addr)) {
  1002. ice_fill_sw_rule(hw, &m_list_itr->fltr_info,
  1003. r_iter, ice_aqc_opc_add_sw_rules);
  1004. r_iter = (struct ice_aqc_sw_rules_elem *)
  1005. ((u8 *)r_iter + s_rule_size);
  1006. }
  1007. }
  1008. /* Call AQ bulk switch rule update for all unicast addresses */
  1009. r_iter = s_rule;
  1010. /* Call AQ switch rule in AQ_MAX chunk */
  1011. for (total_elem_left = num_unicast; total_elem_left > 0;
  1012. total_elem_left -= elem_sent) {
  1013. struct ice_aqc_sw_rules_elem *entry = r_iter;
  1014. elem_sent = min(total_elem_left,
  1015. (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
  1016. status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
  1017. elem_sent, ice_aqc_opc_add_sw_rules,
  1018. NULL);
  1019. if (status)
  1020. goto ice_add_mac_exit;
  1021. r_iter = (struct ice_aqc_sw_rules_elem *)
  1022. ((u8 *)r_iter + (elem_sent * s_rule_size));
  1023. }
  1024. /* Fill up rule id based on the value returned from FW */
  1025. r_iter = s_rule;
  1026. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1027. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1028. u8 *addr = &f_info->l_data.mac.mac_addr[0];
  1029. struct ice_switch_info *sw = hw->switch_info;
  1030. struct ice_fltr_mgmt_list_entry *fm_entry;
  1031. if (is_unicast_ether_addr(addr)) {
  1032. f_info->fltr_rule_id =
  1033. le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
  1034. f_info->fltr_act = ICE_FWD_TO_VSI;
  1035. /* Create an entry to track this MAC address */
  1036. fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
  1037. sizeof(*fm_entry), GFP_KERNEL);
  1038. if (!fm_entry) {
  1039. status = ICE_ERR_NO_MEMORY;
  1040. goto ice_add_mac_exit;
  1041. }
  1042. fm_entry->fltr_info = *f_info;
  1043. fm_entry->vsi_count = 1;
  1044. /* The book keeping entries will get removed when
  1045. * base driver calls remove filter AQ command
  1046. */
  1047. mutex_lock(&sw->mac_list_lock);
  1048. list_add(&fm_entry->list_entry, &sw->mac_list_head);
  1049. mutex_unlock(&sw->mac_list_lock);
  1050. r_iter = (struct ice_aqc_sw_rules_elem *)
  1051. ((u8 *)r_iter + s_rule_size);
  1052. }
  1053. }
  1054. ice_add_mac_exit:
  1055. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1056. return status;
  1057. }
  1058. /**
  1059. * ice_find_vlan_entry
  1060. * @hw: pointer to the hardware structure
  1061. * @vlan_id: VLAN id to search for
  1062. *
  1063. * Helper function to search for a VLAN entry using a given VLAN id
  1064. * Returns pointer to the entry if found.
  1065. */
  1066. static struct ice_fltr_mgmt_list_entry *
  1067. ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id)
  1068. {
  1069. struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL;
  1070. struct ice_switch_info *sw = hw->switch_info;
  1071. mutex_lock(&sw->vlan_list_lock);
  1072. list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry)
  1073. if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) {
  1074. vlan_ret = vlan_list_itr;
  1075. break;
  1076. }
  1077. mutex_unlock(&sw->vlan_list_lock);
  1078. return vlan_ret;
  1079. }
  1080. /**
  1081. * ice_add_vlan_internal - Add one VLAN based filter rule
  1082. * @hw: pointer to the hardware structure
  1083. * @f_entry: filter entry containing one VLAN information
  1084. */
  1085. static enum ice_status
  1086. ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  1087. {
  1088. struct ice_fltr_info *new_fltr, *cur_fltr;
  1089. struct ice_fltr_mgmt_list_entry *v_list_itr;
  1090. u16 vlan_id;
  1091. new_fltr = &f_entry->fltr_info;
  1092. /* VLAN id should only be 12 bits */
  1093. if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
  1094. return ICE_ERR_PARAM;
  1095. vlan_id = new_fltr->l_data.vlan.vlan_id;
  1096. v_list_itr = ice_find_vlan_entry(hw, vlan_id);
  1097. if (!v_list_itr) {
  1098. u16 vsi_id = ICE_VSI_INVAL_ID;
  1099. enum ice_status status;
  1100. u16 vsi_list_id = 0;
  1101. if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
  1102. enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type;
  1103. /* All VLAN pruning rules use a VSI list.
  1104. * Convert the action to forwarding to a VSI list.
  1105. */
  1106. vsi_id = new_fltr->fwd_id.vsi_id;
  1107. status = ice_create_vsi_list_rule(hw, &vsi_id, 1,
  1108. &vsi_list_id,
  1109. lkup_type);
  1110. if (status)
  1111. return status;
  1112. new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1113. new_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1114. }
  1115. status = ice_create_pkt_fwd_rule(hw, f_entry);
  1116. if (!status && vsi_id != ICE_VSI_INVAL_ID) {
  1117. v_list_itr = ice_find_vlan_entry(hw, vlan_id);
  1118. if (!v_list_itr)
  1119. return ICE_ERR_DOES_NOT_EXIST;
  1120. v_list_itr->vsi_list_info =
  1121. ice_create_vsi_list_map(hw, &vsi_id, 1,
  1122. vsi_list_id);
  1123. }
  1124. return status;
  1125. }
  1126. cur_fltr = &v_list_itr->fltr_info;
  1127. return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr);
  1128. }
  1129. /**
  1130. * ice_add_vlan - Add VLAN based filter rule
  1131. * @hw: pointer to the hardware structure
  1132. * @v_list: list of VLAN entries and forwarding information
  1133. */
  1134. enum ice_status
  1135. ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
  1136. {
  1137. struct ice_fltr_list_entry *v_list_itr;
  1138. if (!v_list || !hw)
  1139. return ICE_ERR_PARAM;
  1140. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1141. enum ice_status status;
  1142. if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
  1143. return ICE_ERR_PARAM;
  1144. status = ice_add_vlan_internal(hw, v_list_itr);
  1145. if (status) {
  1146. v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
  1147. return status;
  1148. }
  1149. v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
  1150. }
  1151. return 0;
  1152. }
  1153. /**
  1154. * ice_remove_vsi_list_rule
  1155. * @hw: pointer to the hardware structure
  1156. * @vsi_list_id: VSI list id generated as part of allocate resource
  1157. * @lkup_type: switch rule filter lookup type
  1158. */
  1159. static enum ice_status
  1160. ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
  1161. enum ice_sw_lkup_type lkup_type)
  1162. {
  1163. struct ice_aqc_sw_rules_elem *s_rule;
  1164. enum ice_status status;
  1165. u16 s_rule_size;
  1166. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
  1167. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1168. if (!s_rule)
  1169. return ICE_ERR_NO_MEMORY;
  1170. s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
  1171. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  1172. /* FW expects number of VSIs in vsi_list resource to be 0 for clear
  1173. * command. Since memory is zero'ed out during initialization, it's not
  1174. * necessary to explicitly initialize the variable to 0.
  1175. */
  1176. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1,
  1177. ice_aqc_opc_remove_sw_rules, NULL);
  1178. if (!status)
  1179. /* Free the vsi_list resource that we allocated */
  1180. status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
  1181. ice_aqc_opc_free_res);
  1182. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1183. return status;
  1184. }
  1185. /**
  1186. * ice_handle_rem_vsi_list_mgmt
  1187. * @hw: pointer to the hardware structure
  1188. * @vsi_id: ID of the VSI to remove
  1189. * @fm_list_itr: filter management entry for which the VSI list management
  1190. * needs to be done
  1191. */
  1192. static enum ice_status
  1193. ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id,
  1194. struct ice_fltr_mgmt_list_entry *fm_list_itr)
  1195. {
  1196. struct ice_switch_info *sw = hw->switch_info;
  1197. enum ice_status status = 0;
  1198. enum ice_sw_lkup_type lkup_type;
  1199. bool is_last_elem = true;
  1200. bool conv_list = false;
  1201. bool del_list = false;
  1202. u16 vsi_list_id;
  1203. lkup_type = fm_list_itr->fltr_info.lkup_type;
  1204. vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id;
  1205. if (fm_list_itr->vsi_count > 1) {
  1206. status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
  1207. true,
  1208. ice_aqc_opc_update_sw_rules,
  1209. lkup_type);
  1210. if (status)
  1211. return status;
  1212. fm_list_itr->vsi_count--;
  1213. is_last_elem = false;
  1214. clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map);
  1215. }
  1216. /* For non-VLAN rules that forward packets to a VSI list, convert them
  1217. * to forwarding packets to a VSI if there is only one VSI left in the
  1218. * list. Unused lists are then removed.
  1219. * VLAN rules need to use VSI lists even with only one VSI.
  1220. */
  1221. if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) {
  1222. if (lkup_type == ICE_SW_LKUP_VLAN) {
  1223. del_list = is_last_elem;
  1224. } else if (fm_list_itr->vsi_count == 1) {
  1225. conv_list = true;
  1226. del_list = true;
  1227. }
  1228. }
  1229. if (del_list) {
  1230. /* Remove the VSI list since it is no longer used */
  1231. struct ice_vsi_list_map_info *vsi_list_info =
  1232. fm_list_itr->vsi_list_info;
  1233. status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
  1234. if (status)
  1235. return status;
  1236. if (conv_list) {
  1237. u16 rem_vsi_id;
  1238. rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
  1239. ICE_MAX_VSI);
  1240. /* Error out when the expected last element is not in
  1241. * the VSI list map
  1242. */
  1243. if (rem_vsi_id == ICE_MAX_VSI)
  1244. return ICE_ERR_OUT_OF_RANGE;
  1245. /* Change the list entry action from VSI_LIST to VSI */
  1246. fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1247. fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id;
  1248. }
  1249. list_del(&vsi_list_info->list_entry);
  1250. devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
  1251. fm_list_itr->vsi_list_info = NULL;
  1252. }
  1253. if (conv_list) {
  1254. /* Convert the rule's forward action to forwarding packets to
  1255. * a VSI
  1256. */
  1257. struct ice_aqc_sw_rules_elem *s_rule;
  1258. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  1259. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE,
  1260. GFP_KERNEL);
  1261. if (!s_rule)
  1262. return ICE_ERR_NO_MEMORY;
  1263. ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
  1264. ice_aqc_opc_update_sw_rules);
  1265. s_rule->pdata.lkup_tx_rx.index =
  1266. cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id);
  1267. status = ice_aq_sw_rules(hw, s_rule,
  1268. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  1269. ice_aqc_opc_update_sw_rules, NULL);
  1270. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1271. if (status)
  1272. return status;
  1273. }
  1274. if (is_last_elem) {
  1275. /* Remove the lookup rule */
  1276. struct ice_aqc_sw_rules_elem *s_rule;
  1277. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  1278. ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
  1279. GFP_KERNEL);
  1280. if (!s_rule)
  1281. return ICE_ERR_NO_MEMORY;
  1282. ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
  1283. ice_aqc_opc_remove_sw_rules);
  1284. status = ice_aq_sw_rules(hw, s_rule,
  1285. ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
  1286. ice_aqc_opc_remove_sw_rules, NULL);
  1287. if (status)
  1288. return status;
  1289. /* Remove a book keeping entry from the MAC address list */
  1290. mutex_lock(&sw->mac_list_lock);
  1291. list_del(&fm_list_itr->list_entry);
  1292. mutex_unlock(&sw->mac_list_lock);
  1293. devm_kfree(ice_hw_to_dev(hw), fm_list_itr);
  1294. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1295. }
  1296. return status;
  1297. }
  1298. /**
  1299. * ice_remove_mac_entry
  1300. * @hw: pointer to the hardware structure
  1301. * @f_entry: structure containing MAC forwarding information
  1302. */
  1303. static enum ice_status
  1304. ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  1305. {
  1306. struct ice_fltr_mgmt_list_entry *m_entry;
  1307. u16 vsi_id;
  1308. u8 *add;
  1309. add = &f_entry->fltr_info.l_data.mac.mac_addr[0];
  1310. m_entry = ice_find_mac_entry(hw, add);
  1311. if (!m_entry)
  1312. return ICE_ERR_PARAM;
  1313. vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
  1314. return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry);
  1315. }
  1316. /**
  1317. * ice_remove_mac - remove a MAC address based filter rule
  1318. * @hw: pointer to the hardware structure
  1319. * @m_list: list of MAC addresses and forwarding information
  1320. *
  1321. * This function removes either a MAC filter rule or a specific VSI from a
  1322. * VSI list for a multicast MAC address.
  1323. *
  1324. * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
  1325. * ice_add_mac. Caller should be aware that this call will only work if all
  1326. * the entries passed into m_list were added previously. It will not attempt to
  1327. * do a partial remove of entries that were found.
  1328. */
  1329. enum ice_status
  1330. ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
  1331. {
  1332. struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
  1333. u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
  1334. struct ice_switch_info *sw = hw->switch_info;
  1335. struct ice_fltr_mgmt_list_entry *m_entry;
  1336. struct ice_fltr_list_entry *m_list_itr;
  1337. u16 elem_sent, total_elem_left;
  1338. enum ice_status status = 0;
  1339. u16 num_unicast = 0;
  1340. if (!m_list)
  1341. return ICE_ERR_PARAM;
  1342. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1343. u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
  1344. if (is_unicast_ether_addr(addr) && !hw->ucast_shared)
  1345. num_unicast++;
  1346. else if (is_multicast_ether_addr(addr) ||
  1347. (is_unicast_ether_addr(addr) && hw->ucast_shared))
  1348. ice_remove_mac_entry(hw, m_list_itr);
  1349. }
  1350. /* Exit if no unicast addresses found. Multicast switch rules
  1351. * were added individually
  1352. */
  1353. if (!num_unicast)
  1354. return 0;
  1355. /* Allocate switch rule buffer for the bulk update for unicast */
  1356. s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
  1357. GFP_KERNEL);
  1358. if (!s_rule)
  1359. return ICE_ERR_NO_MEMORY;
  1360. r_iter = s_rule;
  1361. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1362. u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
  1363. if (is_unicast_ether_addr(addr)) {
  1364. m_entry = ice_find_mac_entry(hw, addr);
  1365. if (!m_entry) {
  1366. status = ICE_ERR_DOES_NOT_EXIST;
  1367. goto ice_remove_mac_exit;
  1368. }
  1369. ice_fill_sw_rule(hw, &m_entry->fltr_info,
  1370. r_iter, ice_aqc_opc_remove_sw_rules);
  1371. r_iter = (struct ice_aqc_sw_rules_elem *)
  1372. ((u8 *)r_iter + s_rule_size);
  1373. }
  1374. }
  1375. /* Call AQ bulk switch rule update for all unicast addresses */
  1376. r_iter = s_rule;
  1377. /* Call AQ switch rule in AQ_MAX chunk */
  1378. for (total_elem_left = num_unicast; total_elem_left > 0;
  1379. total_elem_left -= elem_sent) {
  1380. struct ice_aqc_sw_rules_elem *entry = r_iter;
  1381. elem_sent = min(total_elem_left,
  1382. (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
  1383. status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
  1384. elem_sent, ice_aqc_opc_remove_sw_rules,
  1385. NULL);
  1386. if (status)
  1387. break;
  1388. r_iter = (struct ice_aqc_sw_rules_elem *)
  1389. ((u8 *)r_iter + s_rule_size);
  1390. }
  1391. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1392. u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
  1393. if (is_unicast_ether_addr(addr)) {
  1394. m_entry = ice_find_mac_entry(hw, addr);
  1395. if (!m_entry)
  1396. return ICE_ERR_OUT_OF_RANGE;
  1397. mutex_lock(&sw->mac_list_lock);
  1398. list_del(&m_entry->list_entry);
  1399. mutex_unlock(&sw->mac_list_lock);
  1400. devm_kfree(ice_hw_to_dev(hw), m_entry);
  1401. }
  1402. }
  1403. ice_remove_mac_exit:
  1404. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1405. return status;
  1406. }
  1407. /**
  1408. * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default
  1409. * VSI for the switch (represented by swid)
  1410. * @hw: pointer to the hardware structure
  1411. * @vsi_id: number of VSI to set as default
  1412. * @set: true to add the above mentioned switch rule, false to remove it
  1413. * @direction: ICE_FLTR_RX or ICE_FLTR_TX
  1414. */
  1415. enum ice_status
  1416. ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
  1417. {
  1418. struct ice_aqc_sw_rules_elem *s_rule;
  1419. struct ice_fltr_info f_info;
  1420. enum ice_adminq_opc opcode;
  1421. enum ice_status status;
  1422. u16 s_rule_size;
  1423. s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
  1424. ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
  1425. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1426. if (!s_rule)
  1427. return ICE_ERR_NO_MEMORY;
  1428. memset(&f_info, 0, sizeof(f_info));
  1429. f_info.lkup_type = ICE_SW_LKUP_DFLT;
  1430. f_info.flag = direction;
  1431. f_info.fltr_act = ICE_FWD_TO_VSI;
  1432. f_info.fwd_id.vsi_id = vsi_id;
  1433. if (f_info.flag & ICE_FLTR_RX) {
  1434. f_info.src = hw->port_info->lport;
  1435. if (!set)
  1436. f_info.fltr_rule_id =
  1437. hw->port_info->dflt_rx_vsi_rule_id;
  1438. } else if (f_info.flag & ICE_FLTR_TX) {
  1439. f_info.src = vsi_id;
  1440. if (!set)
  1441. f_info.fltr_rule_id =
  1442. hw->port_info->dflt_tx_vsi_rule_id;
  1443. }
  1444. if (set)
  1445. opcode = ice_aqc_opc_add_sw_rules;
  1446. else
  1447. opcode = ice_aqc_opc_remove_sw_rules;
  1448. ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
  1449. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
  1450. if (status || !(f_info.flag & ICE_FLTR_TX_RX))
  1451. goto out;
  1452. if (set) {
  1453. u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  1454. if (f_info.flag & ICE_FLTR_TX) {
  1455. hw->port_info->dflt_tx_vsi_num = vsi_id;
  1456. hw->port_info->dflt_tx_vsi_rule_id = index;
  1457. } else if (f_info.flag & ICE_FLTR_RX) {
  1458. hw->port_info->dflt_rx_vsi_num = vsi_id;
  1459. hw->port_info->dflt_rx_vsi_rule_id = index;
  1460. }
  1461. } else {
  1462. if (f_info.flag & ICE_FLTR_TX) {
  1463. hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  1464. hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
  1465. } else if (f_info.flag & ICE_FLTR_RX) {
  1466. hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  1467. hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
  1468. }
  1469. }
  1470. out:
  1471. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1472. return status;
  1473. }
  1474. /**
  1475. * ice_remove_vlan_internal - Remove one VLAN based filter rule
  1476. * @hw: pointer to the hardware structure
  1477. * @f_entry: filter entry containing one VLAN information
  1478. */
  1479. static enum ice_status
  1480. ice_remove_vlan_internal(struct ice_hw *hw,
  1481. struct ice_fltr_list_entry *f_entry)
  1482. {
  1483. struct ice_fltr_info *new_fltr;
  1484. struct ice_fltr_mgmt_list_entry *v_list_elem;
  1485. u16 vsi_id;
  1486. new_fltr = &f_entry->fltr_info;
  1487. v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id);
  1488. if (!v_list_elem)
  1489. return ICE_ERR_PARAM;
  1490. vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
  1491. return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem);
  1492. }
  1493. /**
  1494. * ice_remove_vlan - Remove VLAN based filter rule
  1495. * @hw: pointer to the hardware structure
  1496. * @v_list: list of VLAN entries and forwarding information
  1497. */
  1498. enum ice_status
  1499. ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
  1500. {
  1501. struct ice_fltr_list_entry *v_list_itr;
  1502. enum ice_status status = 0;
  1503. if (!v_list || !hw)
  1504. return ICE_ERR_PARAM;
  1505. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1506. status = ice_remove_vlan_internal(hw, v_list_itr);
  1507. if (status) {
  1508. v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
  1509. return status;
  1510. }
  1511. v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
  1512. }
  1513. return status;
  1514. }
  1515. /**
  1516. * ice_add_to_vsi_fltr_list - Add VSI filters to the list
  1517. * @hw: pointer to the hardware structure
  1518. * @vsi_id: ID of VSI to remove filters from
  1519. * @lkup_list_head: pointer to the list that has certain lookup type filters
  1520. * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id
  1521. */
  1522. static enum ice_status
  1523. ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
  1524. struct list_head *lkup_list_head,
  1525. struct list_head *vsi_list_head)
  1526. {
  1527. struct ice_fltr_mgmt_list_entry *fm_entry;
  1528. /* check to make sure VSI id is valid and within boundary */
  1529. if (vsi_id >=
  1530. (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1))
  1531. return ICE_ERR_PARAM;
  1532. list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
  1533. struct ice_fltr_info *fi;
  1534. fi = &fm_entry->fltr_info;
  1535. if ((fi->fltr_act == ICE_FWD_TO_VSI &&
  1536. fi->fwd_id.vsi_id == vsi_id) ||
  1537. (fi->fltr_act == ICE_FWD_TO_VSI_LIST &&
  1538. (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) {
  1539. struct ice_fltr_list_entry *tmp;
  1540. /* this memory is freed up in the caller function
  1541. * ice_remove_vsi_lkup_fltr() once filters for
  1542. * this VSI are removed
  1543. */
  1544. tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp),
  1545. GFP_KERNEL);
  1546. if (!tmp)
  1547. return ICE_ERR_NO_MEMORY;
  1548. memcpy(&tmp->fltr_info, fi, sizeof(*fi));
  1549. /* Expected below fields to be set to ICE_FWD_TO_VSI and
  1550. * the particular VSI id since we are only removing this
  1551. * one VSI
  1552. */
  1553. if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) {
  1554. tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1555. tmp->fltr_info.fwd_id.vsi_id = vsi_id;
  1556. }
  1557. list_add(&tmp->list_entry, vsi_list_head);
  1558. }
  1559. }
  1560. return 0;
  1561. }
  1562. /**
  1563. * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
  1564. * @hw: pointer to the hardware structure
  1565. * @vsi_id: ID of VSI to remove filters from
  1566. * @lkup: switch rule filter lookup type
  1567. */
  1568. static void
  1569. ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
  1570. enum ice_sw_lkup_type lkup)
  1571. {
  1572. struct ice_switch_info *sw = hw->switch_info;
  1573. struct ice_fltr_list_entry *fm_entry;
  1574. struct list_head remove_list_head;
  1575. struct ice_fltr_list_entry *tmp;
  1576. enum ice_status status;
  1577. INIT_LIST_HEAD(&remove_list_head);
  1578. switch (lkup) {
  1579. case ICE_SW_LKUP_MAC:
  1580. mutex_lock(&sw->mac_list_lock);
  1581. status = ice_add_to_vsi_fltr_list(hw, vsi_id,
  1582. &sw->mac_list_head,
  1583. &remove_list_head);
  1584. mutex_unlock(&sw->mac_list_lock);
  1585. if (!status) {
  1586. ice_remove_mac(hw, &remove_list_head);
  1587. goto free_fltr_list;
  1588. }
  1589. break;
  1590. case ICE_SW_LKUP_VLAN:
  1591. mutex_lock(&sw->vlan_list_lock);
  1592. status = ice_add_to_vsi_fltr_list(hw, vsi_id,
  1593. &sw->vlan_list_head,
  1594. &remove_list_head);
  1595. mutex_unlock(&sw->vlan_list_lock);
  1596. if (!status) {
  1597. ice_remove_vlan(hw, &remove_list_head);
  1598. goto free_fltr_list;
  1599. }
  1600. break;
  1601. case ICE_SW_LKUP_MAC_VLAN:
  1602. case ICE_SW_LKUP_ETHERTYPE:
  1603. case ICE_SW_LKUP_ETHERTYPE_MAC:
  1604. case ICE_SW_LKUP_PROMISC:
  1605. case ICE_SW_LKUP_PROMISC_VLAN:
  1606. case ICE_SW_LKUP_DFLT:
  1607. ice_debug(hw, ICE_DBG_SW,
  1608. "Remove filters for this lookup type hasn't been implemented yet\n");
  1609. break;
  1610. }
  1611. return;
  1612. free_fltr_list:
  1613. list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
  1614. list_del(&fm_entry->list_entry);
  1615. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  1616. }
  1617. }
  1618. /**
  1619. * ice_remove_vsi_fltr - Remove all filters for a VSI
  1620. * @hw: pointer to the hardware structure
  1621. * @vsi_id: ID of VSI to remove filters from
  1622. */
  1623. void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id)
  1624. {
  1625. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC);
  1626. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN);
  1627. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC);
  1628. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN);
  1629. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT);
  1630. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE);
  1631. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC);
  1632. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN);
  1633. }