ice_sched.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_sched.h"
  4. /**
  5. * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
  6. * @pi: port information structure
  7. * @info: Scheduler element information from firmware
  8. *
  9. * This function inserts the root node of the scheduling tree topology
  10. * to the SW DB.
  11. */
  12. static enum ice_status
  13. ice_sched_add_root_node(struct ice_port_info *pi,
  14. struct ice_aqc_txsched_elem_data *info)
  15. {
  16. struct ice_sched_node *root;
  17. struct ice_hw *hw;
  18. if (!pi)
  19. return ICE_ERR_PARAM;
  20. hw = pi->hw;
  21. root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
  22. if (!root)
  23. return ICE_ERR_NO_MEMORY;
  24. /* coverity[suspicious_sizeof] */
  25. root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
  26. sizeof(*root), GFP_KERNEL);
  27. if (!root->children) {
  28. devm_kfree(ice_hw_to_dev(hw), root);
  29. return ICE_ERR_NO_MEMORY;
  30. }
  31. memcpy(&root->info, info, sizeof(*info));
  32. pi->root = root;
  33. return 0;
  34. }
  35. /**
  36. * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
  37. * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
  38. * @teid: node teid to search
  39. *
  40. * This function searches for a node matching the teid in the scheduling tree
  41. * from the SW DB. The search is recursive and is restricted by the number of
  42. * layers it has searched through; stopping at the max supported layer.
  43. *
  44. * This function needs to be called when holding the port_info->sched_lock
  45. */
  46. struct ice_sched_node *
  47. ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
  48. {
  49. u16 i;
  50. /* The TEID is same as that of the start_node */
  51. if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
  52. return start_node;
  53. /* The node has no children or is at the max layer */
  54. if (!start_node->num_children ||
  55. start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
  56. start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
  57. return NULL;
  58. /* Check if teid matches to any of the children nodes */
  59. for (i = 0; i < start_node->num_children; i++)
  60. if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
  61. return start_node->children[i];
  62. /* Search within each child's sub-tree */
  63. for (i = 0; i < start_node->num_children; i++) {
  64. struct ice_sched_node *tmp;
  65. tmp = ice_sched_find_node_by_teid(start_node->children[i],
  66. teid);
  67. if (tmp)
  68. return tmp;
  69. }
  70. return NULL;
  71. }
  72. /**
  73. * ice_aq_query_sched_elems - query scheduler elements
  74. * @hw: pointer to the hw struct
  75. * @elems_req: number of elements to query
  76. * @buf: pointer to buffer
  77. * @buf_size: buffer size in bytes
  78. * @elems_ret: returns total number of elements returned
  79. * @cd: pointer to command details structure or NULL
  80. *
  81. * Query scheduling elements (0x0404)
  82. */
  83. static enum ice_status
  84. ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
  85. struct ice_aqc_get_elem *buf, u16 buf_size,
  86. u16 *elems_ret, struct ice_sq_cd *cd)
  87. {
  88. struct ice_aqc_get_cfg_elem *cmd;
  89. struct ice_aq_desc desc;
  90. enum ice_status status;
  91. cmd = &desc.params.get_update_elem;
  92. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems);
  93. cmd->num_elem_req = cpu_to_le16(elems_req);
  94. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  95. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  96. if (!status && elems_ret)
  97. *elems_ret = le16_to_cpu(cmd->num_elem_resp);
  98. return status;
  99. }
  100. /**
  101. * ice_sched_query_elem - query element information from hw
  102. * @hw: pointer to the hw struct
  103. * @node_teid: node teid to be queried
  104. * @buf: buffer to element information
  105. *
  106. * This function queries HW element information
  107. */
  108. static enum ice_status
  109. ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
  110. struct ice_aqc_get_elem *buf)
  111. {
  112. u16 buf_size, num_elem_ret = 0;
  113. enum ice_status status;
  114. buf_size = sizeof(*buf);
  115. memset(buf, 0, buf_size);
  116. buf->generic[0].node_teid = cpu_to_le32(node_teid);
  117. status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
  118. NULL);
  119. if (status || num_elem_ret != 1)
  120. ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
  121. return status;
  122. }
  123. /**
  124. * ice_sched_add_node - Insert the Tx scheduler node in SW DB
  125. * @pi: port information structure
  126. * @layer: Scheduler layer of the node
  127. * @info: Scheduler element information from firmware
  128. *
  129. * This function inserts a scheduler node to the SW DB.
  130. */
  131. enum ice_status
  132. ice_sched_add_node(struct ice_port_info *pi, u8 layer,
  133. struct ice_aqc_txsched_elem_data *info)
  134. {
  135. struct ice_sched_node *parent;
  136. struct ice_aqc_get_elem elem;
  137. struct ice_sched_node *node;
  138. enum ice_status status;
  139. struct ice_hw *hw;
  140. if (!pi)
  141. return ICE_ERR_PARAM;
  142. hw = pi->hw;
  143. /* A valid parent node should be there */
  144. parent = ice_sched_find_node_by_teid(pi->root,
  145. le32_to_cpu(info->parent_teid));
  146. if (!parent) {
  147. ice_debug(hw, ICE_DBG_SCHED,
  148. "Parent Node not found for parent_teid=0x%x\n",
  149. le32_to_cpu(info->parent_teid));
  150. return ICE_ERR_PARAM;
  151. }
  152. /* query the current node information from FW before additing it
  153. * to the SW DB
  154. */
  155. status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
  156. if (status)
  157. return status;
  158. node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
  159. if (!node)
  160. return ICE_ERR_NO_MEMORY;
  161. if (hw->max_children[layer]) {
  162. /* coverity[suspicious_sizeof] */
  163. node->children = devm_kcalloc(ice_hw_to_dev(hw),
  164. hw->max_children[layer],
  165. sizeof(*node), GFP_KERNEL);
  166. if (!node->children) {
  167. devm_kfree(ice_hw_to_dev(hw), node);
  168. return ICE_ERR_NO_MEMORY;
  169. }
  170. }
  171. node->in_use = true;
  172. node->parent = parent;
  173. node->tx_sched_layer = layer;
  174. parent->children[parent->num_children++] = node;
  175. memcpy(&node->info, &elem.generic[0], sizeof(node->info));
  176. return 0;
  177. }
  178. /**
  179. * ice_aq_delete_sched_elems - delete scheduler elements
  180. * @hw: pointer to the hw struct
  181. * @grps_req: number of groups to delete
  182. * @buf: pointer to buffer
  183. * @buf_size: buffer size in bytes
  184. * @grps_del: returns total number of elements deleted
  185. * @cd: pointer to command details structure or NULL
  186. *
  187. * Delete scheduling elements (0x040F)
  188. */
  189. static enum ice_status
  190. ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
  191. struct ice_aqc_delete_elem *buf, u16 buf_size,
  192. u16 *grps_del, struct ice_sq_cd *cd)
  193. {
  194. struct ice_aqc_add_move_delete_elem *cmd;
  195. struct ice_aq_desc desc;
  196. enum ice_status status;
  197. cmd = &desc.params.add_move_delete_elem;
  198. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);
  199. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  200. cmd->num_grps_req = cpu_to_le16(grps_req);
  201. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  202. if (!status && grps_del)
  203. *grps_del = le16_to_cpu(cmd->num_grps_updated);
  204. return status;
  205. }
  206. /**
  207. * ice_sched_remove_elems - remove nodes from hw
  208. * @hw: pointer to the hw struct
  209. * @parent: pointer to the parent node
  210. * @num_nodes: number of nodes
  211. * @node_teids: array of node teids to be deleted
  212. *
  213. * This function remove nodes from hw
  214. */
  215. static enum ice_status
  216. ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
  217. u16 num_nodes, u32 *node_teids)
  218. {
  219. struct ice_aqc_delete_elem *buf;
  220. u16 i, num_groups_removed = 0;
  221. enum ice_status status;
  222. u16 buf_size;
  223. buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
  224. buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
  225. if (!buf)
  226. return ICE_ERR_NO_MEMORY;
  227. buf->hdr.parent_teid = parent->info.node_teid;
  228. buf->hdr.num_elems = cpu_to_le16(num_nodes);
  229. for (i = 0; i < num_nodes; i++)
  230. buf->teid[i] = cpu_to_le32(node_teids[i]);
  231. status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
  232. &num_groups_removed, NULL);
  233. if (status || num_groups_removed != 1)
  234. ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
  235. devm_kfree(ice_hw_to_dev(hw), buf);
  236. return status;
  237. }
  238. /**
  239. * ice_sched_get_first_node - get the first node of the given layer
  240. * @hw: pointer to the hw struct
  241. * @parent: pointer the base node of the subtree
  242. * @layer: layer number
  243. *
  244. * This function retrieves the first node of the given layer from the subtree
  245. */
  246. static struct ice_sched_node *
  247. ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent,
  248. u8 layer)
  249. {
  250. u8 i;
  251. if (layer < hw->sw_entry_point_layer)
  252. return NULL;
  253. for (i = 0; i < parent->num_children; i++) {
  254. struct ice_sched_node *node = parent->children[i];
  255. if (node) {
  256. if (node->tx_sched_layer == layer)
  257. return node;
  258. /* this recursion is intentional, and wouldn't
  259. * go more than 9 calls
  260. */
  261. return ice_sched_get_first_node(hw, node, layer);
  262. }
  263. }
  264. return NULL;
  265. }
  266. /**
  267. * ice_sched_get_tc_node - get pointer to TC node
  268. * @pi: port information structure
  269. * @tc: TC number
  270. *
  271. * This function returns the TC node pointer
  272. */
  273. struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
  274. {
  275. u8 i;
  276. if (!pi)
  277. return NULL;
  278. for (i = 0; i < pi->root->num_children; i++)
  279. if (pi->root->children[i]->tc_num == tc)
  280. return pi->root->children[i];
  281. return NULL;
  282. }
  283. /**
  284. * ice_free_sched_node - Free a Tx scheduler node from SW DB
  285. * @pi: port information structure
  286. * @node: pointer to the ice_sched_node struct
  287. *
  288. * This function frees up a node from SW DB as well as from HW
  289. *
  290. * This function needs to be called with the port_info->sched_lock held
  291. */
  292. void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
  293. {
  294. struct ice_sched_node *parent;
  295. struct ice_hw *hw = pi->hw;
  296. u8 i, j;
  297. /* Free the children before freeing up the parent node
  298. * The parent array is updated below and that shifts the nodes
  299. * in the array. So always pick the first child if num children > 0
  300. */
  301. while (node->num_children)
  302. ice_free_sched_node(pi, node->children[0]);
  303. /* Leaf, TC and root nodes can't be deleted by SW */
  304. if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
  305. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
  306. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
  307. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
  308. u32 teid = le32_to_cpu(node->info.node_teid);
  309. enum ice_status status;
  310. status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
  311. if (status)
  312. ice_debug(hw, ICE_DBG_SCHED,
  313. "remove element failed %d\n", status);
  314. }
  315. parent = node->parent;
  316. /* root has no parent */
  317. if (parent) {
  318. struct ice_sched_node *p, *tc_node;
  319. /* update the parent */
  320. for (i = 0; i < parent->num_children; i++)
  321. if (parent->children[i] == node) {
  322. for (j = i + 1; j < parent->num_children; j++)
  323. parent->children[j - 1] =
  324. parent->children[j];
  325. parent->num_children--;
  326. break;
  327. }
  328. /* search for previous sibling that points to this node and
  329. * remove the reference
  330. */
  331. tc_node = ice_sched_get_tc_node(pi, node->tc_num);
  332. if (!tc_node) {
  333. ice_debug(hw, ICE_DBG_SCHED,
  334. "Invalid TC number %d\n", node->tc_num);
  335. goto err_exit;
  336. }
  337. p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);
  338. while (p) {
  339. if (p->sibling == node) {
  340. p->sibling = node->sibling;
  341. break;
  342. }
  343. p = p->sibling;
  344. }
  345. }
  346. err_exit:
  347. /* leaf nodes have no children */
  348. if (node->children)
  349. devm_kfree(ice_hw_to_dev(hw), node->children);
  350. devm_kfree(ice_hw_to_dev(hw), node);
  351. }
  352. /**
  353. * ice_aq_get_dflt_topo - gets default scheduler topology
  354. * @hw: pointer to the hw struct
  355. * @lport: logical port number
  356. * @buf: pointer to buffer
  357. * @buf_size: buffer size in bytes
  358. * @num_branches: returns total number of queue to port branches
  359. * @cd: pointer to command details structure or NULL
  360. *
  361. * Get default scheduler topology (0x400)
  362. */
  363. static enum ice_status
  364. ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
  365. struct ice_aqc_get_topo_elem *buf, u16 buf_size,
  366. u8 *num_branches, struct ice_sq_cd *cd)
  367. {
  368. struct ice_aqc_get_topo *cmd;
  369. struct ice_aq_desc desc;
  370. enum ice_status status;
  371. cmd = &desc.params.get_topo;
  372. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
  373. cmd->port_num = lport;
  374. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  375. if (!status && num_branches)
  376. *num_branches = cmd->num_branches;
  377. return status;
  378. }
  379. /**
  380. * ice_aq_add_sched_elems - adds scheduling element
  381. * @hw: pointer to the hw struct
  382. * @grps_req: the number of groups that are requested to be added
  383. * @buf: pointer to buffer
  384. * @buf_size: buffer size in bytes
  385. * @grps_added: returns total number of groups added
  386. * @cd: pointer to command details structure or NULL
  387. *
  388. * Add scheduling elements (0x0401)
  389. */
  390. static enum ice_status
  391. ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
  392. struct ice_aqc_add_elem *buf, u16 buf_size,
  393. u16 *grps_added, struct ice_sq_cd *cd)
  394. {
  395. struct ice_aqc_add_move_delete_elem *cmd;
  396. struct ice_aq_desc desc;
  397. enum ice_status status;
  398. cmd = &desc.params.add_move_delete_elem;
  399. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems);
  400. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  401. cmd->num_grps_req = cpu_to_le16(grps_req);
  402. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  403. if (!status && grps_added)
  404. *grps_added = le16_to_cpu(cmd->num_grps_updated);
  405. return status;
  406. }
  407. /**
  408. * ice_suspend_resume_elems - suspend/resume scheduler elements
  409. * @hw: pointer to the hw struct
  410. * @elems_req: number of elements to suspend
  411. * @buf: pointer to buffer
  412. * @buf_size: buffer size in bytes
  413. * @elems_ret: returns total number of elements suspended
  414. * @cd: pointer to command details structure or NULL
  415. * @cmd_code: command code for suspend or resume
  416. *
  417. * suspend/resume scheduler elements
  418. */
  419. static enum ice_status
  420. ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req,
  421. struct ice_aqc_suspend_resume_elem *buf, u16 buf_size,
  422. u16 *elems_ret, struct ice_sq_cd *cd,
  423. enum ice_adminq_opc cmd_code)
  424. {
  425. struct ice_aqc_get_cfg_elem *cmd;
  426. struct ice_aq_desc desc;
  427. enum ice_status status;
  428. cmd = &desc.params.get_update_elem;
  429. ice_fill_dflt_direct_cmd_desc(&desc, cmd_code);
  430. cmd->num_elem_req = cpu_to_le16(elems_req);
  431. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  432. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  433. if (!status && elems_ret)
  434. *elems_ret = le16_to_cpu(cmd->num_elem_resp);
  435. return status;
  436. }
  437. /**
  438. * ice_aq_suspend_sched_elems - suspend scheduler elements
  439. * @hw: pointer to the hw struct
  440. * @elems_req: number of elements to suspend
  441. * @buf: pointer to buffer
  442. * @buf_size: buffer size in bytes
  443. * @elems_ret: returns total number of elements suspended
  444. * @cd: pointer to command details structure or NULL
  445. *
  446. * Suspend scheduling elements (0x0409)
  447. */
  448. static enum ice_status
  449. ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
  450. struct ice_aqc_suspend_resume_elem *buf,
  451. u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
  452. {
  453. return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
  454. cd, ice_aqc_opc_suspend_sched_elems);
  455. }
  456. /**
  457. * ice_aq_resume_sched_elems - resume scheduler elements
  458. * @hw: pointer to the hw struct
  459. * @elems_req: number of elements to resume
  460. * @buf: pointer to buffer
  461. * @buf_size: buffer size in bytes
  462. * @elems_ret: returns total number of elements resumed
  463. * @cd: pointer to command details structure or NULL
  464. *
  465. * resume scheduling elements (0x040A)
  466. */
  467. static enum ice_status
  468. ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
  469. struct ice_aqc_suspend_resume_elem *buf,
  470. u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
  471. {
  472. return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
  473. cd, ice_aqc_opc_resume_sched_elems);
  474. }
  475. /**
  476. * ice_aq_query_sched_res - query scheduler resource
  477. * @hw: pointer to the hw struct
  478. * @buf_size: buffer size in bytes
  479. * @buf: pointer to buffer
  480. * @cd: pointer to command details structure or NULL
  481. *
  482. * Query scheduler resource allocation (0x0412)
  483. */
  484. static enum ice_status
  485. ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
  486. struct ice_aqc_query_txsched_res_resp *buf,
  487. struct ice_sq_cd *cd)
  488. {
  489. struct ice_aq_desc desc;
  490. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
  491. return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  492. }
  493. /**
  494. * ice_sched_suspend_resume_elems - suspend or resume hw nodes
  495. * @hw: pointer to the hw struct
  496. * @num_nodes: number of nodes
  497. * @node_teids: array of node teids to be suspended or resumed
  498. * @suspend: true means suspend / false means resume
  499. *
  500. * This function suspends or resumes hw nodes
  501. */
  502. static enum ice_status
  503. ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
  504. bool suspend)
  505. {
  506. struct ice_aqc_suspend_resume_elem *buf;
  507. u16 i, buf_size, num_elem_ret = 0;
  508. enum ice_status status;
  509. buf_size = sizeof(*buf) * num_nodes;
  510. buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
  511. if (!buf)
  512. return ICE_ERR_NO_MEMORY;
  513. for (i = 0; i < num_nodes; i++)
  514. buf->teid[i] = cpu_to_le32(node_teids[i]);
  515. if (suspend)
  516. status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
  517. buf_size, &num_elem_ret,
  518. NULL);
  519. else
  520. status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
  521. buf_size, &num_elem_ret,
  522. NULL);
  523. if (status || num_elem_ret != num_nodes)
  524. ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
  525. devm_kfree(ice_hw_to_dev(hw), buf);
  526. return status;
  527. }
  528. /**
  529. * ice_sched_clear_tx_topo - clears the schduler tree nodes
  530. * @pi: port information structure
  531. *
  532. * This function removes all the nodes from HW as well as from SW DB.
  533. */
  534. static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
  535. {
  536. struct ice_sched_agg_info *agg_info;
  537. struct ice_sched_agg_info *atmp;
  538. struct ice_hw *hw;
  539. if (!pi)
  540. return;
  541. hw = pi->hw;
  542. list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
  543. struct ice_sched_agg_vsi_info *agg_vsi_info;
  544. struct ice_sched_agg_vsi_info *vtmp;
  545. list_for_each_entry_safe(agg_vsi_info, vtmp,
  546. &agg_info->agg_vsi_list, list_entry) {
  547. list_del(&agg_vsi_info->list_entry);
  548. devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
  549. }
  550. }
  551. if (pi->root) {
  552. ice_free_sched_node(pi, pi->root);
  553. pi->root = NULL;
  554. }
  555. }
  556. /**
  557. * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
  558. * @pi: port information structure
  559. *
  560. * Cleanup scheduling elements from SW DB
  561. */
  562. static void ice_sched_clear_port(struct ice_port_info *pi)
  563. {
  564. if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
  565. return;
  566. pi->port_state = ICE_SCHED_PORT_STATE_INIT;
  567. mutex_lock(&pi->sched_lock);
  568. ice_sched_clear_tx_topo(pi);
  569. mutex_unlock(&pi->sched_lock);
  570. mutex_destroy(&pi->sched_lock);
  571. }
  572. /**
  573. * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
  574. * @hw: pointer to the hw struct
  575. *
  576. * Cleanup scheduling elements from SW DB for all the ports
  577. */
  578. void ice_sched_cleanup_all(struct ice_hw *hw)
  579. {
  580. if (!hw)
  581. return;
  582. if (hw->layer_info) {
  583. devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
  584. hw->layer_info = NULL;
  585. }
  586. if (hw->port_info)
  587. ice_sched_clear_port(hw->port_info);
  588. hw->num_tx_sched_layers = 0;
  589. hw->num_tx_sched_phys_layers = 0;
  590. hw->flattened_layers = 0;
  591. hw->max_cgds = 0;
  592. }
  593. /**
  594. * ice_sched_add_elems - add nodes to hw and SW DB
  595. * @pi: port information structure
  596. * @tc_node: pointer to the branch node
  597. * @parent: pointer to the parent node
  598. * @layer: layer number to add nodes
  599. * @num_nodes: number of nodes
  600. * @num_nodes_added: pointer to num nodes added
  601. * @first_node_teid: if new nodes are added then return the teid of first node
  602. *
  603. * This function add nodes to hw as well as to SW DB for a given layer
  604. */
  605. static enum ice_status
  606. ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
  607. struct ice_sched_node *parent, u8 layer, u16 num_nodes,
  608. u16 *num_nodes_added, u32 *first_node_teid)
  609. {
  610. struct ice_sched_node *prev, *new_node;
  611. struct ice_aqc_add_elem *buf;
  612. u16 i, num_groups_added = 0;
  613. enum ice_status status = 0;
  614. struct ice_hw *hw = pi->hw;
  615. u16 buf_size;
  616. u32 teid;
  617. buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1);
  618. buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
  619. if (!buf)
  620. return ICE_ERR_NO_MEMORY;
  621. buf->hdr.parent_teid = parent->info.node_teid;
  622. buf->hdr.num_elems = cpu_to_le16(num_nodes);
  623. for (i = 0; i < num_nodes; i++) {
  624. buf->generic[i].parent_teid = parent->info.node_teid;
  625. buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
  626. buf->generic[i].data.valid_sections =
  627. ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
  628. ICE_AQC_ELEM_VALID_EIR;
  629. buf->generic[i].data.generic = 0;
  630. buf->generic[i].data.cir_bw.bw_profile_idx =
  631. cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
  632. buf->generic[i].data.cir_bw.bw_alloc =
  633. cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
  634. buf->generic[i].data.eir_bw.bw_profile_idx =
  635. cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
  636. buf->generic[i].data.eir_bw.bw_alloc =
  637. cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
  638. }
  639. status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
  640. &num_groups_added, NULL);
  641. if (status || num_groups_added != 1) {
  642. ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
  643. devm_kfree(ice_hw_to_dev(hw), buf);
  644. return ICE_ERR_CFG;
  645. }
  646. *num_nodes_added = num_nodes;
  647. /* add nodes to the SW DB */
  648. for (i = 0; i < num_nodes; i++) {
  649. status = ice_sched_add_node(pi, layer, &buf->generic[i]);
  650. if (status) {
  651. ice_debug(hw, ICE_DBG_SCHED,
  652. "add nodes in SW DB failed status =%d\n",
  653. status);
  654. break;
  655. }
  656. teid = le32_to_cpu(buf->generic[i].node_teid);
  657. new_node = ice_sched_find_node_by_teid(parent, teid);
  658. if (!new_node) {
  659. ice_debug(hw, ICE_DBG_SCHED,
  660. "Node is missing for teid =%d\n", teid);
  661. break;
  662. }
  663. new_node->sibling = NULL;
  664. new_node->tc_num = tc_node->tc_num;
  665. /* add it to previous node sibling pointer */
  666. /* Note: siblings are not linked across branches */
  667. prev = ice_sched_get_first_node(hw, tc_node, layer);
  668. if (prev && prev != new_node) {
  669. while (prev->sibling)
  670. prev = prev->sibling;
  671. prev->sibling = new_node;
  672. }
  673. if (i == 0)
  674. *first_node_teid = teid;
  675. }
  676. devm_kfree(ice_hw_to_dev(hw), buf);
  677. return status;
  678. }
  679. /**
  680. * ice_sched_add_nodes_to_layer - Add nodes to a given layer
  681. * @pi: port information structure
  682. * @tc_node: pointer to TC node
  683. * @parent: pointer to parent node
  684. * @layer: layer number to add nodes
  685. * @num_nodes: number of nodes to be added
  686. * @first_node_teid: pointer to the first node teid
  687. * @num_nodes_added: pointer to number of nodes added
  688. *
  689. * This function add nodes to a given layer.
  690. */
  691. static enum ice_status
  692. ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
  693. struct ice_sched_node *tc_node,
  694. struct ice_sched_node *parent, u8 layer,
  695. u16 num_nodes, u32 *first_node_teid,
  696. u16 *num_nodes_added)
  697. {
  698. u32 *first_teid_ptr = first_node_teid;
  699. u16 new_num_nodes, max_child_nodes;
  700. enum ice_status status = 0;
  701. struct ice_hw *hw = pi->hw;
  702. u16 num_added = 0;
  703. u32 temp;
  704. *num_nodes_added = 0;
  705. if (!num_nodes)
  706. return status;
  707. if (!parent || layer < hw->sw_entry_point_layer)
  708. return ICE_ERR_PARAM;
  709. /* max children per node per layer */
  710. max_child_nodes = hw->max_children[parent->tx_sched_layer];
  711. /* current number of children + required nodes exceed max children ? */
  712. if ((parent->num_children + num_nodes) > max_child_nodes) {
  713. /* Fail if the parent is a TC node */
  714. if (parent == tc_node)
  715. return ICE_ERR_CFG;
  716. /* utilize all the spaces if the parent is not full */
  717. if (parent->num_children < max_child_nodes) {
  718. new_num_nodes = max_child_nodes - parent->num_children;
  719. /* this recursion is intentional, and wouldn't
  720. * go more than 2 calls
  721. */
  722. status = ice_sched_add_nodes_to_layer(pi, tc_node,
  723. parent, layer,
  724. new_num_nodes,
  725. first_node_teid,
  726. &num_added);
  727. if (status)
  728. return status;
  729. *num_nodes_added += num_added;
  730. }
  731. /* Don't modify the first node teid memory if the first node was
  732. * added already in the above call. Instead send some temp
  733. * memory for all other recursive calls.
  734. */
  735. if (num_added)
  736. first_teid_ptr = &temp;
  737. new_num_nodes = num_nodes - num_added;
  738. /* This parent is full, try the next sibling */
  739. parent = parent->sibling;
  740. /* this recursion is intentional, for 1024 queues
  741. * per VSI, it goes max of 16 iterations.
  742. * 1024 / 8 = 128 layer 8 nodes
  743. * 128 /8 = 16 (add 8 nodes per iteration)
  744. */
  745. status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
  746. layer, new_num_nodes,
  747. first_teid_ptr,
  748. &num_added);
  749. *num_nodes_added += num_added;
  750. return status;
  751. }
  752. status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
  753. num_nodes_added, first_node_teid);
  754. return status;
  755. }
  756. /**
  757. * ice_sched_get_qgrp_layer - get the current queue group layer number
  758. * @hw: pointer to the hw struct
  759. *
  760. * This function returns the current queue group layer number
  761. */
  762. static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
  763. {
  764. /* It's always total layers - 1, the array is 0 relative so -2 */
  765. return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
  766. }
  767. /**
  768. * ice_sched_get_vsi_layer - get the current VSI layer number
  769. * @hw: pointer to the hw struct
  770. *
  771. * This function returns the current VSI layer number
  772. */
  773. static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
  774. {
  775. /* Num Layers VSI layer
  776. * 9 6
  777. * 7 4
  778. * 5 or less sw_entry_point_layer
  779. */
  780. /* calculate the vsi layer based on number of layers. */
  781. if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
  782. u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
  783. if (layer > hw->sw_entry_point_layer)
  784. return layer;
  785. }
  786. return hw->sw_entry_point_layer;
  787. }
  788. /**
  789. * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
  790. * @pi: port information structure
  791. *
  792. * This function removes the leaf node that was created by the FW
  793. * during initialization
  794. */
  795. static void
  796. ice_rm_dflt_leaf_node(struct ice_port_info *pi)
  797. {
  798. struct ice_sched_node *node;
  799. node = pi->root;
  800. while (node) {
  801. if (!node->num_children)
  802. break;
  803. node = node->children[0];
  804. }
  805. if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
  806. u32 teid = le32_to_cpu(node->info.node_teid);
  807. enum ice_status status;
  808. /* remove the default leaf node */
  809. status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
  810. if (!status)
  811. ice_free_sched_node(pi, node);
  812. }
  813. }
  814. /**
  815. * ice_sched_rm_dflt_nodes - free the default nodes in the tree
  816. * @pi: port information structure
  817. *
  818. * This function frees all the nodes except root and TC that were created by
  819. * the FW during initialization
  820. */
  821. static void
  822. ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
  823. {
  824. struct ice_sched_node *node;
  825. ice_rm_dflt_leaf_node(pi);
  826. /* remove the default nodes except TC and root nodes */
  827. node = pi->root;
  828. while (node) {
  829. if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
  830. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
  831. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
  832. ice_free_sched_node(pi, node);
  833. break;
  834. }
  835. if (!node->num_children)
  836. break;
  837. node = node->children[0];
  838. }
  839. }
  840. /**
  841. * ice_sched_init_port - Initialize scheduler by querying information from FW
  842. * @pi: port info structure for the tree to cleanup
  843. *
  844. * This function is the initial call to find the total number of Tx scheduler
  845. * resources, default topology created by firmware and storing the information
  846. * in SW DB.
  847. */
  848. enum ice_status ice_sched_init_port(struct ice_port_info *pi)
  849. {
  850. struct ice_aqc_get_topo_elem *buf;
  851. enum ice_status status;
  852. struct ice_hw *hw;
  853. u8 num_branches;
  854. u16 num_elems;
  855. u8 i, j;
  856. if (!pi)
  857. return ICE_ERR_PARAM;
  858. hw = pi->hw;
  859. /* Query the Default Topology from FW */
  860. buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
  861. if (!buf)
  862. return ICE_ERR_NO_MEMORY;
  863. /* Query default scheduling tree topology */
  864. status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
  865. &num_branches, NULL);
  866. if (status)
  867. goto err_init_port;
  868. /* num_branches should be between 1-8 */
  869. if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
  870. ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
  871. num_branches);
  872. status = ICE_ERR_PARAM;
  873. goto err_init_port;
  874. }
  875. /* get the number of elements on the default/first branch */
  876. num_elems = le16_to_cpu(buf[0].hdr.num_elems);
  877. /* num_elems should always be between 1-9 */
  878. if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
  879. ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
  880. num_elems);
  881. status = ICE_ERR_PARAM;
  882. goto err_init_port;
  883. }
  884. /* If the last node is a leaf node then the index of the Q group
  885. * layer is two less than the number of elements.
  886. */
  887. if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
  888. ICE_AQC_ELEM_TYPE_LEAF)
  889. pi->last_node_teid =
  890. le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
  891. else
  892. pi->last_node_teid =
  893. le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
  894. /* Insert the Tx Sched root node */
  895. status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
  896. if (status)
  897. goto err_init_port;
  898. /* Parse the default tree and cache the information */
  899. for (i = 0; i < num_branches; i++) {
  900. num_elems = le16_to_cpu(buf[i].hdr.num_elems);
  901. /* Skip root element as already inserted */
  902. for (j = 1; j < num_elems; j++) {
  903. /* update the sw entry point */
  904. if (buf[0].generic[j].data.elem_type ==
  905. ICE_AQC_ELEM_TYPE_ENTRY_POINT)
  906. hw->sw_entry_point_layer = j;
  907. status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
  908. if (status)
  909. goto err_init_port;
  910. }
  911. }
  912. /* Remove the default nodes. */
  913. if (pi->root)
  914. ice_sched_rm_dflt_nodes(pi);
  915. /* initialize the port for handling the scheduler tree */
  916. pi->port_state = ICE_SCHED_PORT_STATE_READY;
  917. mutex_init(&pi->sched_lock);
  918. INIT_LIST_HEAD(&pi->agg_list);
  919. err_init_port:
  920. if (status && pi->root) {
  921. ice_free_sched_node(pi, pi->root);
  922. pi->root = NULL;
  923. }
  924. devm_kfree(ice_hw_to_dev(hw), buf);
  925. return status;
  926. }
  927. /**
  928. * ice_sched_query_res_alloc - query the FW for num of logical sched layers
  929. * @hw: pointer to the HW struct
  930. *
  931. * query FW for allocated scheduler resources and store in HW struct
  932. */
  933. enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
  934. {
  935. struct ice_aqc_query_txsched_res_resp *buf;
  936. enum ice_status status = 0;
  937. __le16 max_sibl;
  938. u8 i;
  939. if (hw->layer_info)
  940. return status;
  941. buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
  942. if (!buf)
  943. return ICE_ERR_NO_MEMORY;
  944. status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
  945. if (status)
  946. goto sched_query_out;
  947. hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
  948. hw->num_tx_sched_phys_layers =
  949. le16_to_cpu(buf->sched_props.phys_levels);
  950. hw->flattened_layers = buf->sched_props.flattening_bitmap;
  951. hw->max_cgds = buf->sched_props.max_pf_cgds;
  952. /* max sibling group size of current layer refers to the max children
  953. * of the below layer node.
  954. * layer 1 node max children will be layer 2 max sibling group size
  955. * layer 2 node max children will be layer 3 max sibling group size
  956. * and so on. This array will be populated from root (index 0) to
  957. * qgroup layer 7. Leaf node has no children.
  958. */
  959. for (i = 0; i < hw->num_tx_sched_layers; i++) {
  960. max_sibl = buf->layer_props[i].max_sibl_grp_sz;
  961. hw->max_children[i] = le16_to_cpu(max_sibl);
  962. }
  963. hw->layer_info = (struct ice_aqc_layer_props *)
  964. devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
  965. (hw->num_tx_sched_layers *
  966. sizeof(*hw->layer_info)),
  967. GFP_KERNEL);
  968. if (!hw->layer_info) {
  969. status = ICE_ERR_NO_MEMORY;
  970. goto sched_query_out;
  971. }
  972. sched_query_out:
  973. devm_kfree(ice_hw_to_dev(hw), buf);
  974. return status;
  975. }
  976. /**
  977. * ice_sched_find_node_in_subtree - Find node in part of base node subtree
  978. * @hw: pointer to the hw struct
  979. * @base: pointer to the base node
  980. * @node: pointer to the node to search
  981. *
  982. * This function checks whether a given node is part of the base node
  983. * subtree or not
  984. */
  985. static bool
  986. ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
  987. struct ice_sched_node *node)
  988. {
  989. u8 i;
  990. for (i = 0; i < base->num_children; i++) {
  991. struct ice_sched_node *child = base->children[i];
  992. if (node == child)
  993. return true;
  994. if (child->tx_sched_layer > node->tx_sched_layer)
  995. return false;
  996. /* this recursion is intentional, and wouldn't
  997. * go more than 8 calls
  998. */
  999. if (ice_sched_find_node_in_subtree(hw, child, node))
  1000. return true;
  1001. }
  1002. return false;
  1003. }
  1004. /**
  1005. * ice_sched_get_free_qparent - Get a free lan or rdma q group node
  1006. * @pi: port information structure
  1007. * @vsi_handle: software VSI handle
  1008. * @tc: branch number
  1009. * @owner: lan or rdma
  1010. *
  1011. * This function retrieves a free lan or rdma q group node
  1012. */
  1013. struct ice_sched_node *
  1014. ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
  1015. u8 owner)
  1016. {
  1017. struct ice_sched_node *vsi_node, *qgrp_node = NULL;
  1018. struct ice_vsi_ctx *vsi_ctx;
  1019. u16 max_children;
  1020. u8 qgrp_layer;
  1021. qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
  1022. max_children = pi->hw->max_children[qgrp_layer];
  1023. vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
  1024. if (!vsi_ctx)
  1025. return NULL;
  1026. vsi_node = vsi_ctx->sched.vsi_node[tc];
  1027. /* validate invalid VSI id */
  1028. if (!vsi_node)
  1029. goto lan_q_exit;
  1030. /* get the first q group node from VSI sub-tree */
  1031. qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
  1032. while (qgrp_node) {
  1033. /* make sure the qgroup node is part of the VSI subtree */
  1034. if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
  1035. if (qgrp_node->num_children < max_children &&
  1036. qgrp_node->owner == owner)
  1037. break;
  1038. qgrp_node = qgrp_node->sibling;
  1039. }
  1040. lan_q_exit:
  1041. return qgrp_node;
  1042. }
  1043. /**
  1044. * ice_sched_get_vsi_node - Get a VSI node based on VSI id
  1045. * @hw: pointer to the hw struct
  1046. * @tc_node: pointer to the TC node
  1047. * @vsi_handle: software VSI handle
  1048. *
  1049. * This function retrieves a VSI node for a given VSI id from a given
  1050. * TC branch
  1051. */
  1052. static struct ice_sched_node *
  1053. ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
  1054. u16 vsi_handle)
  1055. {
  1056. struct ice_sched_node *node;
  1057. u8 vsi_layer;
  1058. vsi_layer = ice_sched_get_vsi_layer(hw);
  1059. node = ice_sched_get_first_node(hw, tc_node, vsi_layer);
  1060. /* Check whether it already exists */
  1061. while (node) {
  1062. if (node->vsi_handle == vsi_handle)
  1063. return node;
  1064. node = node->sibling;
  1065. }
  1066. return node;
  1067. }
  1068. /**
  1069. * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
  1070. * @hw: pointer to the hw struct
  1071. * @num_qs: number of queues
  1072. * @num_nodes: num nodes array
  1073. *
  1074. * This function calculates the number of VSI child nodes based on the
  1075. * number of queues.
  1076. */
  1077. static void
  1078. ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
  1079. {
  1080. u16 num = num_qs;
  1081. u8 i, qgl, vsil;
  1082. qgl = ice_sched_get_qgrp_layer(hw);
  1083. vsil = ice_sched_get_vsi_layer(hw);
  1084. /* calculate num nodes from q group to VSI layer */
  1085. for (i = qgl; i > vsil; i--) {
  1086. /* round to the next integer if there is a remainder */
  1087. num = DIV_ROUND_UP(num, hw->max_children[i]);
  1088. /* need at least one node */
  1089. num_nodes[i] = num ? num : 1;
  1090. }
  1091. }
  1092. /**
  1093. * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
  1094. * @pi: port information structure
  1095. * @vsi_handle: software VSI handle
  1096. * @tc_node: pointer to the TC node
  1097. * @num_nodes: pointer to the num nodes that needs to be added per layer
  1098. * @owner: node owner (lan or rdma)
  1099. *
  1100. * This function adds the VSI child nodes to tree. It gets called for
  1101. * lan and rdma separately.
  1102. */
  1103. static enum ice_status
  1104. ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
  1105. struct ice_sched_node *tc_node, u16 *num_nodes,
  1106. u8 owner)
  1107. {
  1108. struct ice_sched_node *parent, *node;
  1109. struct ice_hw *hw = pi->hw;
  1110. enum ice_status status;
  1111. u32 first_node_teid;
  1112. u16 num_added = 0;
  1113. u8 i, qgl, vsil;
  1114. qgl = ice_sched_get_qgrp_layer(hw);
  1115. vsil = ice_sched_get_vsi_layer(hw);
  1116. parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
  1117. for (i = vsil + 1; i <= qgl; i++) {
  1118. if (!parent)
  1119. return ICE_ERR_CFG;
  1120. status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
  1121. num_nodes[i],
  1122. &first_node_teid,
  1123. &num_added);
  1124. if (status || num_nodes[i] != num_added)
  1125. return ICE_ERR_CFG;
  1126. /* The newly added node can be a new parent for the next
  1127. * layer nodes
  1128. */
  1129. if (num_added) {
  1130. parent = ice_sched_find_node_by_teid(tc_node,
  1131. first_node_teid);
  1132. node = parent;
  1133. while (node) {
  1134. node->owner = owner;
  1135. node = node->sibling;
  1136. }
  1137. } else {
  1138. parent = parent->children[0];
  1139. }
  1140. }
  1141. return 0;
  1142. }
  1143. /**
  1144. * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
  1145. * @pi: port information structure
  1146. * @vsi_node: pointer to the VSI node
  1147. * @num_nodes: pointer to the num nodes that needs to be removed per layer
  1148. * @owner: node owner (lan or rdma)
  1149. *
  1150. * This function removes the VSI child nodes from the tree. It gets called for
  1151. * lan and rdma separately.
  1152. */
  1153. static void
  1154. ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
  1155. struct ice_sched_node *vsi_node, u16 *num_nodes,
  1156. u8 owner)
  1157. {
  1158. struct ice_sched_node *node, *next;
  1159. u8 i, qgl, vsil;
  1160. u16 num;
  1161. qgl = ice_sched_get_qgrp_layer(pi->hw);
  1162. vsil = ice_sched_get_vsi_layer(pi->hw);
  1163. for (i = qgl; i > vsil; i--) {
  1164. num = num_nodes[i];
  1165. node = ice_sched_get_first_node(pi->hw, vsi_node, i);
  1166. while (node && num) {
  1167. next = node->sibling;
  1168. if (node->owner == owner && !node->num_children) {
  1169. ice_free_sched_node(pi, node);
  1170. num--;
  1171. }
  1172. node = next;
  1173. }
  1174. }
  1175. }
  1176. /**
  1177. * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
  1178. * @hw: pointer to the hw struct
  1179. * @tc_node: pointer to TC node
  1180. * @num_nodes: pointer to num nodes array
  1181. *
  1182. * This function calculates the number of supported nodes needed to add this
  1183. * VSI into tx tree including the VSI, parent and intermediate nodes in below
  1184. * layers
  1185. */
  1186. static void
  1187. ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
  1188. struct ice_sched_node *tc_node, u16 *num_nodes)
  1189. {
  1190. struct ice_sched_node *node;
  1191. u8 vsil;
  1192. int i;
  1193. vsil = ice_sched_get_vsi_layer(hw);
  1194. for (i = vsil; i >= hw->sw_entry_point_layer; i--)
  1195. /* Add intermediate nodes if TC has no children and
  1196. * need at least one node for VSI
  1197. */
  1198. if (!tc_node->num_children || i == vsil) {
  1199. num_nodes[i]++;
  1200. } else {
  1201. /* If intermediate nodes are reached max children
  1202. * then add a new one.
  1203. */
  1204. node = ice_sched_get_first_node(hw, tc_node, (u8)i);
  1205. /* scan all the siblings */
  1206. while (node) {
  1207. if (node->num_children < hw->max_children[i])
  1208. break;
  1209. node = node->sibling;
  1210. }
  1211. /* all the nodes are full, allocate a new one */
  1212. if (!node)
  1213. num_nodes[i]++;
  1214. }
  1215. }
  1216. /**
  1217. * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
  1218. * @pi: port information structure
  1219. * @vsi_handle: software VSI handle
  1220. * @tc_node: pointer to TC node
  1221. * @num_nodes: pointer to num nodes array
  1222. *
  1223. * This function adds the VSI supported nodes into tx tree including the
  1224. * VSI, its parent and intermediate nodes in below layers
  1225. */
  1226. static enum ice_status
  1227. ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
  1228. struct ice_sched_node *tc_node, u16 *num_nodes)
  1229. {
  1230. struct ice_sched_node *parent = tc_node;
  1231. enum ice_status status;
  1232. u32 first_node_teid;
  1233. u16 num_added = 0;
  1234. u8 i, vsil;
  1235. if (!pi)
  1236. return ICE_ERR_PARAM;
  1237. vsil = ice_sched_get_vsi_layer(pi->hw);
  1238. for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
  1239. status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
  1240. i, num_nodes[i],
  1241. &first_node_teid,
  1242. &num_added);
  1243. if (status || num_nodes[i] != num_added)
  1244. return ICE_ERR_CFG;
  1245. /* The newly added node can be a new parent for the next
  1246. * layer nodes
  1247. */
  1248. if (num_added)
  1249. parent = ice_sched_find_node_by_teid(tc_node,
  1250. first_node_teid);
  1251. else
  1252. parent = parent->children[0];
  1253. if (!parent)
  1254. return ICE_ERR_CFG;
  1255. if (i == vsil)
  1256. parent->vsi_handle = vsi_handle;
  1257. }
  1258. return 0;
  1259. }
  1260. /**
  1261. * ice_sched_add_vsi_to_topo - add a new VSI into tree
  1262. * @pi: port information structure
  1263. * @vsi_handle: software VSI handle
  1264. * @tc: TC number
  1265. *
  1266. * This function adds a new VSI into scheduler tree
  1267. */
  1268. static enum ice_status
  1269. ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
  1270. {
  1271. u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
  1272. struct ice_sched_node *tc_node;
  1273. struct ice_hw *hw = pi->hw;
  1274. tc_node = ice_sched_get_tc_node(pi, tc);
  1275. if (!tc_node)
  1276. return ICE_ERR_PARAM;
  1277. /* calculate number of supported nodes needed for this VSI */
  1278. ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
  1279. /* add vsi supported nodes to tc subtree */
  1280. return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
  1281. num_nodes);
  1282. }
  1283. /**
  1284. * ice_sched_update_vsi_child_nodes - update VSI child nodes
  1285. * @pi: port information structure
  1286. * @vsi_handle: software VSI handle
  1287. * @tc: TC number
  1288. * @new_numqs: new number of max queues
  1289. * @owner: owner of this subtree
  1290. *
  1291. * This function updates the VSI child nodes based on the number of queues
  1292. */
  1293. static enum ice_status
  1294. ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
  1295. u8 tc, u16 new_numqs, u8 owner)
  1296. {
  1297. u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
  1298. u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
  1299. struct ice_sched_node *vsi_node;
  1300. struct ice_sched_node *tc_node;
  1301. struct ice_vsi_ctx *vsi_ctx;
  1302. enum ice_status status = 0;
  1303. struct ice_hw *hw = pi->hw;
  1304. u16 prev_numqs;
  1305. u8 i;
  1306. tc_node = ice_sched_get_tc_node(pi, tc);
  1307. if (!tc_node)
  1308. return ICE_ERR_CFG;
  1309. vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
  1310. if (!vsi_node)
  1311. return ICE_ERR_CFG;
  1312. vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
  1313. if (!vsi_ctx)
  1314. return ICE_ERR_PARAM;
  1315. if (owner == ICE_SCHED_NODE_OWNER_LAN)
  1316. prev_numqs = vsi_ctx->sched.max_lanq[tc];
  1317. else
  1318. return ICE_ERR_PARAM;
  1319. /* num queues are not changed */
  1320. if (prev_numqs == new_numqs)
  1321. return status;
  1322. /* calculate number of nodes based on prev/new number of qs */
  1323. if (prev_numqs)
  1324. ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
  1325. if (new_numqs)
  1326. ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
  1327. if (prev_numqs > new_numqs) {
  1328. for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
  1329. new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
  1330. ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
  1331. owner);
  1332. } else {
  1333. for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
  1334. new_num_nodes[i] -= prev_num_nodes[i];
  1335. status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
  1336. new_num_nodes, owner);
  1337. if (status)
  1338. return status;
  1339. }
  1340. vsi_ctx->sched.max_lanq[tc] = new_numqs;
  1341. return status;
  1342. }
  1343. /**
  1344. * ice_sched_cfg_vsi - configure the new/exisiting VSI
  1345. * @pi: port information structure
  1346. * @vsi_handle: software VSI handle
  1347. * @tc: TC number
  1348. * @maxqs: max number of queues
  1349. * @owner: lan or rdma
  1350. * @enable: TC enabled or disabled
  1351. *
  1352. * This function adds/updates VSI nodes based on the number of queues. If TC is
  1353. * enabled and VSI is in suspended state then resume the VSI back. If TC is
  1354. * disabled then suspend the VSI if it is not already.
  1355. */
  1356. enum ice_status
  1357. ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
  1358. u8 owner, bool enable)
  1359. {
  1360. struct ice_sched_node *vsi_node, *tc_node;
  1361. struct ice_vsi_ctx *vsi_ctx;
  1362. enum ice_status status = 0;
  1363. struct ice_hw *hw = pi->hw;
  1364. tc_node = ice_sched_get_tc_node(pi, tc);
  1365. if (!tc_node)
  1366. return ICE_ERR_PARAM;
  1367. vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
  1368. if (!vsi_ctx)
  1369. return ICE_ERR_PARAM;
  1370. vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
  1371. /* suspend the VSI if tc is not enabled */
  1372. if (!enable) {
  1373. if (vsi_node && vsi_node->in_use) {
  1374. u32 teid = le32_to_cpu(vsi_node->info.node_teid);
  1375. status = ice_sched_suspend_resume_elems(hw, 1, &teid,
  1376. true);
  1377. if (!status)
  1378. vsi_node->in_use = false;
  1379. }
  1380. return status;
  1381. }
  1382. /* TC is enabled, if it is a new VSI then add it to the tree */
  1383. if (!vsi_node) {
  1384. status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
  1385. if (status)
  1386. return status;
  1387. vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
  1388. if (!vsi_node)
  1389. return ICE_ERR_CFG;
  1390. vsi_ctx->sched.vsi_node[tc] = vsi_node;
  1391. vsi_node->in_use = true;
  1392. /* invalidate the max queues whenever VSI gets added first time
  1393. * into the scheduler tree (boot or after reset). We need to
  1394. * recreate the child nodes all the time in these cases.
  1395. */
  1396. vsi_ctx->sched.max_lanq[tc] = 0;
  1397. }
  1398. /* update the VSI child nodes */
  1399. status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
  1400. owner);
  1401. if (status)
  1402. return status;
  1403. /* TC is enabled, resume the VSI if it is in the suspend state */
  1404. if (!vsi_node->in_use) {
  1405. u32 teid = le32_to_cpu(vsi_node->info.node_teid);
  1406. status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
  1407. if (!status)
  1408. vsi_node->in_use = true;
  1409. }
  1410. return status;
  1411. }