ice_sched.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_sched.h"
  4. /**
  5. * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
  6. * @pi: port information structure
  7. * @info: Scheduler element information from firmware
  8. *
  9. * This function inserts the root node of the scheduling tree topology
  10. * to the SW DB.
  11. */
  12. static enum ice_status
  13. ice_sched_add_root_node(struct ice_port_info *pi,
  14. struct ice_aqc_txsched_elem_data *info)
  15. {
  16. struct ice_sched_node *root;
  17. struct ice_hw *hw;
  18. if (!pi)
  19. return ICE_ERR_PARAM;
  20. hw = pi->hw;
  21. root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
  22. if (!root)
  23. return ICE_ERR_NO_MEMORY;
  24. /* coverity[suspicious_sizeof] */
  25. root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
  26. sizeof(*root), GFP_KERNEL);
  27. if (!root->children) {
  28. devm_kfree(ice_hw_to_dev(hw), root);
  29. return ICE_ERR_NO_MEMORY;
  30. }
  31. memcpy(&root->info, info, sizeof(*info));
  32. pi->root = root;
  33. return 0;
  34. }
  35. /**
  36. * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
  37. * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
  38. * @teid: node teid to search
  39. *
  40. * This function searches for a node matching the teid in the scheduling tree
  41. * from the SW DB. The search is recursive and is restricted by the number of
  42. * layers it has searched through; stopping at the max supported layer.
  43. *
  44. * This function needs to be called when holding the port_info->sched_lock
  45. */
  46. struct ice_sched_node *
  47. ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
  48. {
  49. u16 i;
  50. /* The TEID is same as that of the start_node */
  51. if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
  52. return start_node;
  53. /* The node has no children or is at the max layer */
  54. if (!start_node->num_children ||
  55. start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
  56. start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
  57. return NULL;
  58. /* Check if teid matches to any of the children nodes */
  59. for (i = 0; i < start_node->num_children; i++)
  60. if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
  61. return start_node->children[i];
  62. /* Search within each child's sub-tree */
  63. for (i = 0; i < start_node->num_children; i++) {
  64. struct ice_sched_node *tmp;
  65. tmp = ice_sched_find_node_by_teid(start_node->children[i],
  66. teid);
  67. if (tmp)
  68. return tmp;
  69. }
  70. return NULL;
  71. }
  72. /**
  73. * ice_aq_query_sched_elems - query scheduler elements
  74. * @hw: pointer to the hw struct
  75. * @elems_req: number of elements to query
  76. * @buf: pointer to buffer
  77. * @buf_size: buffer size in bytes
  78. * @elems_ret: returns total number of elements returned
  79. * @cd: pointer to command details structure or NULL
  80. *
  81. * Query scheduling elements (0x0404)
  82. */
  83. static enum ice_status
  84. ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
  85. struct ice_aqc_get_elem *buf, u16 buf_size,
  86. u16 *elems_ret, struct ice_sq_cd *cd)
  87. {
  88. struct ice_aqc_get_cfg_elem *cmd;
  89. struct ice_aq_desc desc;
  90. enum ice_status status;
  91. cmd = &desc.params.get_update_elem;
  92. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems);
  93. cmd->num_elem_req = cpu_to_le16(elems_req);
  94. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  95. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  96. if (!status && elems_ret)
  97. *elems_ret = le16_to_cpu(cmd->num_elem_resp);
  98. return status;
  99. }
  100. /**
  101. * ice_sched_query_elem - query element information from hw
  102. * @hw: pointer to the hw struct
  103. * @node_teid: node teid to be queried
  104. * @buf: buffer to element information
  105. *
  106. * This function queries HW element information
  107. */
  108. static enum ice_status
  109. ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
  110. struct ice_aqc_get_elem *buf)
  111. {
  112. u16 buf_size, num_elem_ret = 0;
  113. enum ice_status status;
  114. buf_size = sizeof(*buf);
  115. memset(buf, 0, buf_size);
  116. buf->generic[0].node_teid = cpu_to_le32(node_teid);
  117. status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
  118. NULL);
  119. if (status || num_elem_ret != 1)
  120. ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
  121. return status;
  122. }
  123. /**
  124. * ice_sched_add_node - Insert the Tx scheduler node in SW DB
  125. * @pi: port information structure
  126. * @layer: Scheduler layer of the node
  127. * @info: Scheduler element information from firmware
  128. *
  129. * This function inserts a scheduler node to the SW DB.
  130. */
  131. enum ice_status
  132. ice_sched_add_node(struct ice_port_info *pi, u8 layer,
  133. struct ice_aqc_txsched_elem_data *info)
  134. {
  135. struct ice_sched_node *parent;
  136. struct ice_aqc_get_elem elem;
  137. struct ice_sched_node *node;
  138. enum ice_status status;
  139. struct ice_hw *hw;
  140. if (!pi)
  141. return ICE_ERR_PARAM;
  142. hw = pi->hw;
  143. /* A valid parent node should be there */
  144. parent = ice_sched_find_node_by_teid(pi->root,
  145. le32_to_cpu(info->parent_teid));
  146. if (!parent) {
  147. ice_debug(hw, ICE_DBG_SCHED,
  148. "Parent Node not found for parent_teid=0x%x\n",
  149. le32_to_cpu(info->parent_teid));
  150. return ICE_ERR_PARAM;
  151. }
  152. /* query the current node information from FW before additing it
  153. * to the SW DB
  154. */
  155. status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
  156. if (status)
  157. return status;
  158. node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
  159. if (!node)
  160. return ICE_ERR_NO_MEMORY;
  161. if (hw->max_children[layer]) {
  162. /* coverity[suspicious_sizeof] */
  163. node->children = devm_kcalloc(ice_hw_to_dev(hw),
  164. hw->max_children[layer],
  165. sizeof(*node), GFP_KERNEL);
  166. if (!node->children) {
  167. devm_kfree(ice_hw_to_dev(hw), node);
  168. return ICE_ERR_NO_MEMORY;
  169. }
  170. }
  171. node->in_use = true;
  172. node->parent = parent;
  173. node->tx_sched_layer = layer;
  174. parent->children[parent->num_children++] = node;
  175. memcpy(&node->info, &elem.generic[0], sizeof(node->info));
  176. return 0;
  177. }
  178. /**
  179. * ice_aq_delete_sched_elems - delete scheduler elements
  180. * @hw: pointer to the hw struct
  181. * @grps_req: number of groups to delete
  182. * @buf: pointer to buffer
  183. * @buf_size: buffer size in bytes
  184. * @grps_del: returns total number of elements deleted
  185. * @cd: pointer to command details structure or NULL
  186. *
  187. * Delete scheduling elements (0x040F)
  188. */
  189. static enum ice_status
  190. ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
  191. struct ice_aqc_delete_elem *buf, u16 buf_size,
  192. u16 *grps_del, struct ice_sq_cd *cd)
  193. {
  194. struct ice_aqc_add_move_delete_elem *cmd;
  195. struct ice_aq_desc desc;
  196. enum ice_status status;
  197. cmd = &desc.params.add_move_delete_elem;
  198. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);
  199. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  200. cmd->num_grps_req = cpu_to_le16(grps_req);
  201. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  202. if (!status && grps_del)
  203. *grps_del = le16_to_cpu(cmd->num_grps_updated);
  204. return status;
  205. }
  206. /**
  207. * ice_sched_remove_elems - remove nodes from hw
  208. * @hw: pointer to the hw struct
  209. * @parent: pointer to the parent node
  210. * @num_nodes: number of nodes
  211. * @node_teids: array of node teids to be deleted
  212. *
  213. * This function remove nodes from hw
  214. */
  215. static enum ice_status
  216. ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
  217. u16 num_nodes, u32 *node_teids)
  218. {
  219. struct ice_aqc_delete_elem *buf;
  220. u16 i, num_groups_removed = 0;
  221. enum ice_status status;
  222. u16 buf_size;
  223. buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
  224. buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
  225. if (!buf)
  226. return ICE_ERR_NO_MEMORY;
  227. buf->hdr.parent_teid = parent->info.node_teid;
  228. buf->hdr.num_elems = cpu_to_le16(num_nodes);
  229. for (i = 0; i < num_nodes; i++)
  230. buf->teid[i] = cpu_to_le32(node_teids[i]);
  231. status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
  232. &num_groups_removed, NULL);
  233. if (status || num_groups_removed != 1)
  234. ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
  235. devm_kfree(ice_hw_to_dev(hw), buf);
  236. return status;
  237. }
  238. /**
  239. * ice_sched_get_first_node - get the first node of the given layer
  240. * @hw: pointer to the hw struct
  241. * @parent: pointer the base node of the subtree
  242. * @layer: layer number
  243. *
  244. * This function retrieves the first node of the given layer from the subtree
  245. */
  246. static struct ice_sched_node *
  247. ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent,
  248. u8 layer)
  249. {
  250. u8 i;
  251. if (layer < hw->sw_entry_point_layer)
  252. return NULL;
  253. for (i = 0; i < parent->num_children; i++) {
  254. struct ice_sched_node *node = parent->children[i];
  255. if (node) {
  256. if (node->tx_sched_layer == layer)
  257. return node;
  258. /* this recursion is intentional, and wouldn't
  259. * go more than 9 calls
  260. */
  261. return ice_sched_get_first_node(hw, node, layer);
  262. }
  263. }
  264. return NULL;
  265. }
  266. /**
  267. * ice_sched_get_tc_node - get pointer to TC node
  268. * @pi: port information structure
  269. * @tc: TC number
  270. *
  271. * This function returns the TC node pointer
  272. */
  273. struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
  274. {
  275. u8 i;
  276. if (!pi)
  277. return NULL;
  278. for (i = 0; i < pi->root->num_children; i++)
  279. if (pi->root->children[i]->tc_num == tc)
  280. return pi->root->children[i];
  281. return NULL;
  282. }
  283. /**
  284. * ice_free_sched_node - Free a Tx scheduler node from SW DB
  285. * @pi: port information structure
  286. * @node: pointer to the ice_sched_node struct
  287. *
  288. * This function frees up a node from SW DB as well as from HW
  289. *
  290. * This function needs to be called with the port_info->sched_lock held
  291. */
  292. void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
  293. {
  294. struct ice_sched_node *parent;
  295. struct ice_hw *hw = pi->hw;
  296. u8 i, j;
  297. /* Free the children before freeing up the parent node
  298. * The parent array is updated below and that shifts the nodes
  299. * in the array. So always pick the first child if num children > 0
  300. */
  301. while (node->num_children)
  302. ice_free_sched_node(pi, node->children[0]);
  303. /* Leaf, TC and root nodes can't be deleted by SW */
  304. if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
  305. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
  306. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
  307. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
  308. u32 teid = le32_to_cpu(node->info.node_teid);
  309. enum ice_status status;
  310. status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
  311. if (status)
  312. ice_debug(hw, ICE_DBG_SCHED,
  313. "remove element failed %d\n", status);
  314. }
  315. parent = node->parent;
  316. /* root has no parent */
  317. if (parent) {
  318. struct ice_sched_node *p, *tc_node;
  319. /* update the parent */
  320. for (i = 0; i < parent->num_children; i++)
  321. if (parent->children[i] == node) {
  322. for (j = i + 1; j < parent->num_children; j++)
  323. parent->children[j - 1] =
  324. parent->children[j];
  325. parent->num_children--;
  326. break;
  327. }
  328. /* search for previous sibling that points to this node and
  329. * remove the reference
  330. */
  331. tc_node = ice_sched_get_tc_node(pi, node->tc_num);
  332. if (!tc_node) {
  333. ice_debug(hw, ICE_DBG_SCHED,
  334. "Invalid TC number %d\n", node->tc_num);
  335. goto err_exit;
  336. }
  337. p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);
  338. while (p) {
  339. if (p->sibling == node) {
  340. p->sibling = node->sibling;
  341. break;
  342. }
  343. p = p->sibling;
  344. }
  345. }
  346. err_exit:
  347. /* leaf nodes have no children */
  348. if (node->children)
  349. devm_kfree(ice_hw_to_dev(hw), node->children);
  350. devm_kfree(ice_hw_to_dev(hw), node);
  351. }
  352. /**
  353. * ice_aq_get_dflt_topo - gets default scheduler topology
  354. * @hw: pointer to the hw struct
  355. * @lport: logical port number
  356. * @buf: pointer to buffer
  357. * @buf_size: buffer size in bytes
  358. * @num_branches: returns total number of queue to port branches
  359. * @cd: pointer to command details structure or NULL
  360. *
  361. * Get default scheduler topology (0x400)
  362. */
  363. static enum ice_status
  364. ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
  365. struct ice_aqc_get_topo_elem *buf, u16 buf_size,
  366. u8 *num_branches, struct ice_sq_cd *cd)
  367. {
  368. struct ice_aqc_get_topo *cmd;
  369. struct ice_aq_desc desc;
  370. enum ice_status status;
  371. cmd = &desc.params.get_topo;
  372. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
  373. cmd->port_num = lport;
  374. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  375. if (!status && num_branches)
  376. *num_branches = cmd->num_branches;
  377. return status;
  378. }
  379. /**
  380. * ice_aq_add_sched_elems - adds scheduling element
  381. * @hw: pointer to the hw struct
  382. * @grps_req: the number of groups that are requested to be added
  383. * @buf: pointer to buffer
  384. * @buf_size: buffer size in bytes
  385. * @grps_added: returns total number of groups added
  386. * @cd: pointer to command details structure or NULL
  387. *
  388. * Add scheduling elements (0x0401)
  389. */
  390. static enum ice_status
  391. ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
  392. struct ice_aqc_add_elem *buf, u16 buf_size,
  393. u16 *grps_added, struct ice_sq_cd *cd)
  394. {
  395. struct ice_aqc_add_move_delete_elem *cmd;
  396. struct ice_aq_desc desc;
  397. enum ice_status status;
  398. cmd = &desc.params.add_move_delete_elem;
  399. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems);
  400. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  401. cmd->num_grps_req = cpu_to_le16(grps_req);
  402. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  403. if (!status && grps_added)
  404. *grps_added = le16_to_cpu(cmd->num_grps_updated);
  405. return status;
  406. }
  407. /**
  408. * ice_suspend_resume_elems - suspend/resume scheduler elements
  409. * @hw: pointer to the hw struct
  410. * @elems_req: number of elements to suspend
  411. * @buf: pointer to buffer
  412. * @buf_size: buffer size in bytes
  413. * @elems_ret: returns total number of elements suspended
  414. * @cd: pointer to command details structure or NULL
  415. * @cmd_code: command code for suspend or resume
  416. *
  417. * suspend/resume scheduler elements
  418. */
  419. static enum ice_status
  420. ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req,
  421. struct ice_aqc_suspend_resume_elem *buf, u16 buf_size,
  422. u16 *elems_ret, struct ice_sq_cd *cd,
  423. enum ice_adminq_opc cmd_code)
  424. {
  425. struct ice_aqc_get_cfg_elem *cmd;
  426. struct ice_aq_desc desc;
  427. enum ice_status status;
  428. cmd = &desc.params.get_update_elem;
  429. ice_fill_dflt_direct_cmd_desc(&desc, cmd_code);
  430. cmd->num_elem_req = cpu_to_le16(elems_req);
  431. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  432. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  433. if (!status && elems_ret)
  434. *elems_ret = le16_to_cpu(cmd->num_elem_resp);
  435. return status;
  436. }
  437. /**
  438. * ice_aq_suspend_sched_elems - suspend scheduler elements
  439. * @hw: pointer to the hw struct
  440. * @elems_req: number of elements to suspend
  441. * @buf: pointer to buffer
  442. * @buf_size: buffer size in bytes
  443. * @elems_ret: returns total number of elements suspended
  444. * @cd: pointer to command details structure or NULL
  445. *
  446. * Suspend scheduling elements (0x0409)
  447. */
  448. static enum ice_status
  449. ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
  450. struct ice_aqc_suspend_resume_elem *buf,
  451. u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
  452. {
  453. return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
  454. cd, ice_aqc_opc_suspend_sched_elems);
  455. }
  456. /**
  457. * ice_aq_resume_sched_elems - resume scheduler elements
  458. * @hw: pointer to the hw struct
  459. * @elems_req: number of elements to resume
  460. * @buf: pointer to buffer
  461. * @buf_size: buffer size in bytes
  462. * @elems_ret: returns total number of elements resumed
  463. * @cd: pointer to command details structure or NULL
  464. *
  465. * resume scheduling elements (0x040A)
  466. */
  467. static enum ice_status
  468. ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
  469. struct ice_aqc_suspend_resume_elem *buf,
  470. u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
  471. {
  472. return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
  473. cd, ice_aqc_opc_resume_sched_elems);
  474. }
  475. /**
  476. * ice_aq_query_sched_res - query scheduler resource
  477. * @hw: pointer to the hw struct
  478. * @buf_size: buffer size in bytes
  479. * @buf: pointer to buffer
  480. * @cd: pointer to command details structure or NULL
  481. *
  482. * Query scheduler resource allocation (0x0412)
  483. */
  484. static enum ice_status
  485. ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
  486. struct ice_aqc_query_txsched_res_resp *buf,
  487. struct ice_sq_cd *cd)
  488. {
  489. struct ice_aq_desc desc;
  490. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
  491. return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  492. }
  493. /**
  494. * ice_sched_suspend_resume_elems - suspend or resume hw nodes
  495. * @hw: pointer to the hw struct
  496. * @num_nodes: number of nodes
  497. * @node_teids: array of node teids to be suspended or resumed
  498. * @suspend: true means suspend / false means resume
  499. *
  500. * This function suspends or resumes hw nodes
  501. */
  502. static enum ice_status
  503. ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
  504. bool suspend)
  505. {
  506. struct ice_aqc_suspend_resume_elem *buf;
  507. u16 i, buf_size, num_elem_ret = 0;
  508. enum ice_status status;
  509. buf_size = sizeof(*buf) * num_nodes;
  510. buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
  511. if (!buf)
  512. return ICE_ERR_NO_MEMORY;
  513. for (i = 0; i < num_nodes; i++)
  514. buf->teid[i] = cpu_to_le32(node_teids[i]);
  515. if (suspend)
  516. status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
  517. buf_size, &num_elem_ret,
  518. NULL);
  519. else
  520. status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
  521. buf_size, &num_elem_ret,
  522. NULL);
  523. if (status || num_elem_ret != num_nodes)
  524. ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
  525. devm_kfree(ice_hw_to_dev(hw), buf);
  526. return status;
  527. }
  528. /**
  529. * ice_sched_clear_tx_topo - clears the schduler tree nodes
  530. * @pi: port information structure
  531. *
  532. * This function removes all the nodes from HW as well as from SW DB.
  533. */
  534. static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
  535. {
  536. struct ice_sched_agg_info *agg_info;
  537. struct ice_sched_vsi_info *vsi_elem;
  538. struct ice_sched_agg_info *atmp;
  539. struct ice_sched_vsi_info *tmp;
  540. struct ice_hw *hw;
  541. if (!pi)
  542. return;
  543. hw = pi->hw;
  544. list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
  545. struct ice_sched_agg_vsi_info *agg_vsi_info;
  546. struct ice_sched_agg_vsi_info *vtmp;
  547. list_for_each_entry_safe(agg_vsi_info, vtmp,
  548. &agg_info->agg_vsi_list, list_entry) {
  549. list_del(&agg_vsi_info->list_entry);
  550. devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
  551. }
  552. }
  553. /* remove the vsi list */
  554. list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,
  555. list_entry) {
  556. list_del(&vsi_elem->list_entry);
  557. devm_kfree(ice_hw_to_dev(hw), vsi_elem);
  558. }
  559. if (pi->root) {
  560. ice_free_sched_node(pi, pi->root);
  561. pi->root = NULL;
  562. }
  563. }
  564. /**
  565. * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
  566. * @pi: port information structure
  567. *
  568. * Cleanup scheduling elements from SW DB
  569. */
  570. static void ice_sched_clear_port(struct ice_port_info *pi)
  571. {
  572. if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
  573. return;
  574. pi->port_state = ICE_SCHED_PORT_STATE_INIT;
  575. mutex_lock(&pi->sched_lock);
  576. ice_sched_clear_tx_topo(pi);
  577. mutex_unlock(&pi->sched_lock);
  578. mutex_destroy(&pi->sched_lock);
  579. }
  580. /**
  581. * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
  582. * @hw: pointer to the hw struct
  583. *
  584. * Cleanup scheduling elements from SW DB for all the ports
  585. */
  586. void ice_sched_cleanup_all(struct ice_hw *hw)
  587. {
  588. if (!hw)
  589. return;
  590. if (hw->layer_info) {
  591. devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
  592. hw->layer_info = NULL;
  593. }
  594. if (hw->port_info)
  595. ice_sched_clear_port(hw->port_info);
  596. hw->num_tx_sched_layers = 0;
  597. hw->num_tx_sched_phys_layers = 0;
  598. hw->flattened_layers = 0;
  599. hw->max_cgds = 0;
  600. }
  601. /**
  602. * ice_sched_create_vsi_info_entry - create an empty new VSI entry
  603. * @pi: port information structure
  604. * @vsi_id: VSI Id
  605. *
  606. * This function creates a new VSI entry and adds it to list
  607. */
  608. static struct ice_sched_vsi_info *
  609. ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
  610. {
  611. struct ice_sched_vsi_info *vsi_elem;
  612. if (!pi)
  613. return NULL;
  614. vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem),
  615. GFP_KERNEL);
  616. if (!vsi_elem)
  617. return NULL;
  618. list_add(&vsi_elem->list_entry, &pi->vsi_info_list);
  619. vsi_elem->vsi_id = vsi_id;
  620. return vsi_elem;
  621. }
  622. /**
  623. * ice_sched_add_elems - add nodes to hw and SW DB
  624. * @pi: port information structure
  625. * @tc_node: pointer to the branch node
  626. * @parent: pointer to the parent node
  627. * @layer: layer number to add nodes
  628. * @num_nodes: number of nodes
  629. * @num_nodes_added: pointer to num nodes added
  630. * @first_node_teid: if new nodes are added then return the teid of first node
  631. *
  632. * This function add nodes to hw as well as to SW DB for a given layer
  633. */
  634. static enum ice_status
  635. ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
  636. struct ice_sched_node *parent, u8 layer, u16 num_nodes,
  637. u16 *num_nodes_added, u32 *first_node_teid)
  638. {
  639. struct ice_sched_node *prev, *new_node;
  640. struct ice_aqc_add_elem *buf;
  641. u16 i, num_groups_added = 0;
  642. enum ice_status status = 0;
  643. struct ice_hw *hw = pi->hw;
  644. u16 buf_size;
  645. u32 teid;
  646. buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1);
  647. buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
  648. if (!buf)
  649. return ICE_ERR_NO_MEMORY;
  650. buf->hdr.parent_teid = parent->info.node_teid;
  651. buf->hdr.num_elems = cpu_to_le16(num_nodes);
  652. for (i = 0; i < num_nodes; i++) {
  653. buf->generic[i].parent_teid = parent->info.node_teid;
  654. buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
  655. buf->generic[i].data.valid_sections =
  656. ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
  657. ICE_AQC_ELEM_VALID_EIR;
  658. buf->generic[i].data.generic = 0;
  659. buf->generic[i].data.cir_bw.bw_profile_idx =
  660. cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
  661. buf->generic[i].data.cir_bw.bw_alloc =
  662. cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
  663. buf->generic[i].data.eir_bw.bw_profile_idx =
  664. cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
  665. buf->generic[i].data.eir_bw.bw_alloc =
  666. cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
  667. }
  668. status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
  669. &num_groups_added, NULL);
  670. if (status || num_groups_added != 1) {
  671. ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
  672. devm_kfree(ice_hw_to_dev(hw), buf);
  673. return ICE_ERR_CFG;
  674. }
  675. *num_nodes_added = num_nodes;
  676. /* add nodes to the SW DB */
  677. for (i = 0; i < num_nodes; i++) {
  678. status = ice_sched_add_node(pi, layer, &buf->generic[i]);
  679. if (status) {
  680. ice_debug(hw, ICE_DBG_SCHED,
  681. "add nodes in SW DB failed status =%d\n",
  682. status);
  683. break;
  684. }
  685. teid = le32_to_cpu(buf->generic[i].node_teid);
  686. new_node = ice_sched_find_node_by_teid(parent, teid);
  687. if (!new_node) {
  688. ice_debug(hw, ICE_DBG_SCHED,
  689. "Node is missing for teid =%d\n", teid);
  690. break;
  691. }
  692. new_node->sibling = NULL;
  693. new_node->tc_num = tc_node->tc_num;
  694. /* add it to previous node sibling pointer */
  695. /* Note: siblings are not linked across branches */
  696. prev = ice_sched_get_first_node(hw, tc_node, layer);
  697. if (prev && prev != new_node) {
  698. while (prev->sibling)
  699. prev = prev->sibling;
  700. prev->sibling = new_node;
  701. }
  702. if (i == 0)
  703. *first_node_teid = teid;
  704. }
  705. devm_kfree(ice_hw_to_dev(hw), buf);
  706. return status;
  707. }
  708. /**
  709. * ice_sched_add_nodes_to_layer - Add nodes to a given layer
  710. * @pi: port information structure
  711. * @tc_node: pointer to TC node
  712. * @parent: pointer to parent node
  713. * @layer: layer number to add nodes
  714. * @num_nodes: number of nodes to be added
  715. * @first_node_teid: pointer to the first node teid
  716. * @num_nodes_added: pointer to number of nodes added
  717. *
  718. * This function add nodes to a given layer.
  719. */
  720. static enum ice_status
  721. ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
  722. struct ice_sched_node *tc_node,
  723. struct ice_sched_node *parent, u8 layer,
  724. u16 num_nodes, u32 *first_node_teid,
  725. u16 *num_nodes_added)
  726. {
  727. u32 *first_teid_ptr = first_node_teid;
  728. u16 new_num_nodes, max_child_nodes;
  729. enum ice_status status = 0;
  730. struct ice_hw *hw = pi->hw;
  731. u16 num_added = 0;
  732. u32 temp;
  733. *num_nodes_added = 0;
  734. if (!num_nodes)
  735. return status;
  736. if (!parent || layer < hw->sw_entry_point_layer)
  737. return ICE_ERR_PARAM;
  738. /* max children per node per layer */
  739. max_child_nodes = hw->max_children[parent->tx_sched_layer];
  740. /* current number of children + required nodes exceed max children ? */
  741. if ((parent->num_children + num_nodes) > max_child_nodes) {
  742. /* Fail if the parent is a TC node */
  743. if (parent == tc_node)
  744. return ICE_ERR_CFG;
  745. /* utilize all the spaces if the parent is not full */
  746. if (parent->num_children < max_child_nodes) {
  747. new_num_nodes = max_child_nodes - parent->num_children;
  748. /* this recursion is intentional, and wouldn't
  749. * go more than 2 calls
  750. */
  751. status = ice_sched_add_nodes_to_layer(pi, tc_node,
  752. parent, layer,
  753. new_num_nodes,
  754. first_node_teid,
  755. &num_added);
  756. if (status)
  757. return status;
  758. *num_nodes_added += num_added;
  759. }
  760. /* Don't modify the first node teid memory if the first node was
  761. * added already in the above call. Instead send some temp
  762. * memory for all other recursive calls.
  763. */
  764. if (num_added)
  765. first_teid_ptr = &temp;
  766. new_num_nodes = num_nodes - num_added;
  767. /* This parent is full, try the next sibling */
  768. parent = parent->sibling;
  769. /* this recursion is intentional, for 1024 queues
  770. * per VSI, it goes max of 16 iterations.
  771. * 1024 / 8 = 128 layer 8 nodes
  772. * 128 /8 = 16 (add 8 nodes per iteration)
  773. */
  774. status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
  775. layer, new_num_nodes,
  776. first_teid_ptr,
  777. &num_added);
  778. *num_nodes_added += num_added;
  779. return status;
  780. }
  781. status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
  782. num_nodes_added, first_node_teid);
  783. return status;
  784. }
  785. /**
  786. * ice_sched_get_qgrp_layer - get the current queue group layer number
  787. * @hw: pointer to the hw struct
  788. *
  789. * This function returns the current queue group layer number
  790. */
  791. static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
  792. {
  793. /* It's always total layers - 1, the array is 0 relative so -2 */
  794. return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
  795. }
  796. /**
  797. * ice_sched_get_vsi_layer - get the current VSI layer number
  798. * @hw: pointer to the hw struct
  799. *
  800. * This function returns the current VSI layer number
  801. */
  802. static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
  803. {
  804. /* Num Layers VSI layer
  805. * 9 6
  806. * 7 4
  807. * 5 or less sw_entry_point_layer
  808. */
  809. /* calculate the vsi layer based on number of layers. */
  810. if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
  811. u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
  812. if (layer > hw->sw_entry_point_layer)
  813. return layer;
  814. }
  815. return hw->sw_entry_point_layer;
  816. }
  817. /**
  818. * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
  819. * @pi: port information structure
  820. *
  821. * This function removes the leaf node that was created by the FW
  822. * during initialization
  823. */
  824. static void
  825. ice_rm_dflt_leaf_node(struct ice_port_info *pi)
  826. {
  827. struct ice_sched_node *node;
  828. node = pi->root;
  829. while (node) {
  830. if (!node->num_children)
  831. break;
  832. node = node->children[0];
  833. }
  834. if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
  835. u32 teid = le32_to_cpu(node->info.node_teid);
  836. enum ice_status status;
  837. /* remove the default leaf node */
  838. status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
  839. if (!status)
  840. ice_free_sched_node(pi, node);
  841. }
  842. }
  843. /**
  844. * ice_sched_rm_dflt_nodes - free the default nodes in the tree
  845. * @pi: port information structure
  846. *
  847. * This function frees all the nodes except root and TC that were created by
  848. * the FW during initialization
  849. */
  850. static void
  851. ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
  852. {
  853. struct ice_sched_node *node;
  854. ice_rm_dflt_leaf_node(pi);
  855. /* remove the default nodes except TC and root nodes */
  856. node = pi->root;
  857. while (node) {
  858. if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
  859. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
  860. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
  861. ice_free_sched_node(pi, node);
  862. break;
  863. }
  864. if (!node->num_children)
  865. break;
  866. node = node->children[0];
  867. }
  868. }
  869. /**
  870. * ice_sched_init_port - Initialize scheduler by querying information from FW
  871. * @pi: port info structure for the tree to cleanup
  872. *
  873. * This function is the initial call to find the total number of Tx scheduler
  874. * resources, default topology created by firmware and storing the information
  875. * in SW DB.
  876. */
  877. enum ice_status ice_sched_init_port(struct ice_port_info *pi)
  878. {
  879. struct ice_aqc_get_topo_elem *buf;
  880. enum ice_status status;
  881. struct ice_hw *hw;
  882. u8 num_branches;
  883. u16 num_elems;
  884. u8 i, j;
  885. if (!pi)
  886. return ICE_ERR_PARAM;
  887. hw = pi->hw;
  888. /* Query the Default Topology from FW */
  889. buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
  890. if (!buf)
  891. return ICE_ERR_NO_MEMORY;
  892. /* Query default scheduling tree topology */
  893. status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
  894. &num_branches, NULL);
  895. if (status)
  896. goto err_init_port;
  897. /* num_branches should be between 1-8 */
  898. if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
  899. ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
  900. num_branches);
  901. status = ICE_ERR_PARAM;
  902. goto err_init_port;
  903. }
  904. /* get the number of elements on the default/first branch */
  905. num_elems = le16_to_cpu(buf[0].hdr.num_elems);
  906. /* num_elems should always be between 1-9 */
  907. if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
  908. ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
  909. num_elems);
  910. status = ICE_ERR_PARAM;
  911. goto err_init_port;
  912. }
  913. /* If the last node is a leaf node then the index of the Q group
  914. * layer is two less than the number of elements.
  915. */
  916. if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
  917. ICE_AQC_ELEM_TYPE_LEAF)
  918. pi->last_node_teid =
  919. le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
  920. else
  921. pi->last_node_teid =
  922. le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
  923. /* Insert the Tx Sched root node */
  924. status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
  925. if (status)
  926. goto err_init_port;
  927. /* Parse the default tree and cache the information */
  928. for (i = 0; i < num_branches; i++) {
  929. num_elems = le16_to_cpu(buf[i].hdr.num_elems);
  930. /* Skip root element as already inserted */
  931. for (j = 1; j < num_elems; j++) {
  932. /* update the sw entry point */
  933. if (buf[0].generic[j].data.elem_type ==
  934. ICE_AQC_ELEM_TYPE_ENTRY_POINT)
  935. hw->sw_entry_point_layer = j;
  936. status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
  937. if (status)
  938. goto err_init_port;
  939. }
  940. }
  941. /* Remove the default nodes. */
  942. if (pi->root)
  943. ice_sched_rm_dflt_nodes(pi);
  944. /* initialize the port for handling the scheduler tree */
  945. pi->port_state = ICE_SCHED_PORT_STATE_READY;
  946. mutex_init(&pi->sched_lock);
  947. INIT_LIST_HEAD(&pi->agg_list);
  948. INIT_LIST_HEAD(&pi->vsi_info_list);
  949. err_init_port:
  950. if (status && pi->root) {
  951. ice_free_sched_node(pi, pi->root);
  952. pi->root = NULL;
  953. }
  954. devm_kfree(ice_hw_to_dev(hw), buf);
  955. return status;
  956. }
  957. /**
  958. * ice_sched_query_res_alloc - query the FW for num of logical sched layers
  959. * @hw: pointer to the HW struct
  960. *
  961. * query FW for allocated scheduler resources and store in HW struct
  962. */
  963. enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
  964. {
  965. struct ice_aqc_query_txsched_res_resp *buf;
  966. enum ice_status status = 0;
  967. __le16 max_sibl;
  968. u8 i;
  969. if (hw->layer_info)
  970. return status;
  971. buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
  972. if (!buf)
  973. return ICE_ERR_NO_MEMORY;
  974. status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
  975. if (status)
  976. goto sched_query_out;
  977. hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
  978. hw->num_tx_sched_phys_layers =
  979. le16_to_cpu(buf->sched_props.phys_levels);
  980. hw->flattened_layers = buf->sched_props.flattening_bitmap;
  981. hw->max_cgds = buf->sched_props.max_pf_cgds;
  982. /* max sibling group size of current layer refers to the max children
  983. * of the below layer node.
  984. * layer 1 node max children will be layer 2 max sibling group size
  985. * layer 2 node max children will be layer 3 max sibling group size
  986. * and so on. This array will be populated from root (index 0) to
  987. * qgroup layer 7. Leaf node has no children.
  988. */
  989. for (i = 0; i < hw->num_tx_sched_layers; i++) {
  990. max_sibl = buf->layer_props[i].max_sibl_grp_sz;
  991. hw->max_children[i] = le16_to_cpu(max_sibl);
  992. }
  993. hw->layer_info = (struct ice_aqc_layer_props *)
  994. devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
  995. (hw->num_tx_sched_layers *
  996. sizeof(*hw->layer_info)),
  997. GFP_KERNEL);
  998. if (!hw->layer_info) {
  999. status = ICE_ERR_NO_MEMORY;
  1000. goto sched_query_out;
  1001. }
  1002. sched_query_out:
  1003. devm_kfree(ice_hw_to_dev(hw), buf);
  1004. return status;
  1005. }
  1006. /**
  1007. * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id
  1008. * @pi: port information structure
  1009. * @vsi_id: vsi id
  1010. *
  1011. * This function retrieves the vsi list for the given vsi id
  1012. */
  1013. static struct ice_sched_vsi_info *
  1014. ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
  1015. {
  1016. struct ice_sched_vsi_info *list_elem;
  1017. if (!pi)
  1018. return NULL;
  1019. list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)
  1020. if (list_elem->vsi_id == vsi_id)
  1021. return list_elem;
  1022. return NULL;
  1023. }
  1024. /**
  1025. * ice_sched_find_node_in_subtree - Find node in part of base node subtree
  1026. * @hw: pointer to the hw struct
  1027. * @base: pointer to the base node
  1028. * @node: pointer to the node to search
  1029. *
  1030. * This function checks whether a given node is part of the base node
  1031. * subtree or not
  1032. */
  1033. static bool
  1034. ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
  1035. struct ice_sched_node *node)
  1036. {
  1037. u8 i;
  1038. for (i = 0; i < base->num_children; i++) {
  1039. struct ice_sched_node *child = base->children[i];
  1040. if (node == child)
  1041. return true;
  1042. if (child->tx_sched_layer > node->tx_sched_layer)
  1043. return false;
  1044. /* this recursion is intentional, and wouldn't
  1045. * go more than 8 calls
  1046. */
  1047. if (ice_sched_find_node_in_subtree(hw, child, node))
  1048. return true;
  1049. }
  1050. return false;
  1051. }
  1052. /**
  1053. * ice_sched_get_free_qparent - Get a free lan or rdma q group node
  1054. * @pi: port information structure
  1055. * @vsi_id: vsi id
  1056. * @tc: branch number
  1057. * @owner: lan or rdma
  1058. *
  1059. * This function retrieves a free lan or rdma q group node
  1060. */
  1061. struct ice_sched_node *
  1062. ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
  1063. u8 owner)
  1064. {
  1065. struct ice_sched_node *vsi_node, *qgrp_node = NULL;
  1066. struct ice_sched_vsi_info *list_elem;
  1067. u16 max_children;
  1068. u8 qgrp_layer;
  1069. qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
  1070. max_children = pi->hw->max_children[qgrp_layer];
  1071. list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
  1072. if (!list_elem)
  1073. goto lan_q_exit;
  1074. vsi_node = list_elem->vsi_node[tc];
  1075. /* validate invalid VSI id */
  1076. if (!vsi_node)
  1077. goto lan_q_exit;
  1078. /* get the first q group node from VSI sub-tree */
  1079. qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
  1080. while (qgrp_node) {
  1081. /* make sure the qgroup node is part of the VSI subtree */
  1082. if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
  1083. if (qgrp_node->num_children < max_children &&
  1084. qgrp_node->owner == owner)
  1085. break;
  1086. qgrp_node = qgrp_node->sibling;
  1087. }
  1088. lan_q_exit:
  1089. return qgrp_node;
  1090. }
  1091. /**
  1092. * ice_sched_get_vsi_node - Get a VSI node based on VSI id
  1093. * @hw: pointer to the hw struct
  1094. * @tc_node: pointer to the TC node
  1095. * @vsi_id: VSI id
  1096. *
  1097. * This function retrieves a VSI node for a given VSI id from a given
  1098. * TC branch
  1099. */
  1100. static struct ice_sched_node *
  1101. ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
  1102. u16 vsi_id)
  1103. {
  1104. struct ice_sched_node *node;
  1105. u8 vsi_layer;
  1106. vsi_layer = ice_sched_get_vsi_layer(hw);
  1107. node = ice_sched_get_first_node(hw, tc_node, vsi_layer);
  1108. /* Check whether it already exists */
  1109. while (node) {
  1110. if (node->vsi_id == vsi_id)
  1111. return node;
  1112. node = node->sibling;
  1113. }
  1114. return node;
  1115. }
  1116. /**
  1117. * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
  1118. * @hw: pointer to the hw struct
  1119. * @num_qs: number of queues
  1120. * @num_nodes: num nodes array
  1121. *
  1122. * This function calculates the number of VSI child nodes based on the
  1123. * number of queues.
  1124. */
  1125. static void
  1126. ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
  1127. {
  1128. u16 num = num_qs;
  1129. u8 i, qgl, vsil;
  1130. qgl = ice_sched_get_qgrp_layer(hw);
  1131. vsil = ice_sched_get_vsi_layer(hw);
  1132. /* calculate num nodes from q group to VSI layer */
  1133. for (i = qgl; i > vsil; i--) {
  1134. /* round to the next integer if there is a remainder */
  1135. num = DIV_ROUND_UP(num, hw->max_children[i]);
  1136. /* need at least one node */
  1137. num_nodes[i] = num ? num : 1;
  1138. }
  1139. }
  1140. /**
  1141. * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
  1142. * @pi: port information structure
  1143. * @vsi_id: VSI id
  1144. * @tc_node: pointer to the TC node
  1145. * @num_nodes: pointer to the num nodes that needs to be added per layer
  1146. * @owner: node owner (lan or rdma)
  1147. *
  1148. * This function adds the VSI child nodes to tree. It gets called for
  1149. * lan and rdma separately.
  1150. */
  1151. static enum ice_status
  1152. ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
  1153. struct ice_sched_node *tc_node, u16 *num_nodes,
  1154. u8 owner)
  1155. {
  1156. struct ice_sched_node *parent, *node;
  1157. struct ice_hw *hw = pi->hw;
  1158. enum ice_status status;
  1159. u32 first_node_teid;
  1160. u16 num_added = 0;
  1161. u8 i, qgl, vsil;
  1162. qgl = ice_sched_get_qgrp_layer(hw);
  1163. vsil = ice_sched_get_vsi_layer(hw);
  1164. parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
  1165. for (i = vsil + 1; i <= qgl; i++) {
  1166. if (!parent)
  1167. return ICE_ERR_CFG;
  1168. status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
  1169. num_nodes[i],
  1170. &first_node_teid,
  1171. &num_added);
  1172. if (status || num_nodes[i] != num_added)
  1173. return ICE_ERR_CFG;
  1174. /* The newly added node can be a new parent for the next
  1175. * layer nodes
  1176. */
  1177. if (num_added) {
  1178. parent = ice_sched_find_node_by_teid(tc_node,
  1179. first_node_teid);
  1180. node = parent;
  1181. while (node) {
  1182. node->owner = owner;
  1183. node = node->sibling;
  1184. }
  1185. } else {
  1186. parent = parent->children[0];
  1187. }
  1188. }
  1189. return 0;
  1190. }
  1191. /**
  1192. * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
  1193. * @pi: port information structure
  1194. * @vsi_node: pointer to the VSI node
  1195. * @num_nodes: pointer to the num nodes that needs to be removed per layer
  1196. * @owner: node owner (lan or rdma)
  1197. *
  1198. * This function removes the VSI child nodes from the tree. It gets called for
  1199. * lan and rdma separately.
  1200. */
  1201. static void
  1202. ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
  1203. struct ice_sched_node *vsi_node, u16 *num_nodes,
  1204. u8 owner)
  1205. {
  1206. struct ice_sched_node *node, *next;
  1207. u8 i, qgl, vsil;
  1208. u16 num;
  1209. qgl = ice_sched_get_qgrp_layer(pi->hw);
  1210. vsil = ice_sched_get_vsi_layer(pi->hw);
  1211. for (i = qgl; i > vsil; i--) {
  1212. num = num_nodes[i];
  1213. node = ice_sched_get_first_node(pi->hw, vsi_node, i);
  1214. while (node && num) {
  1215. next = node->sibling;
  1216. if (node->owner == owner && !node->num_children) {
  1217. ice_free_sched_node(pi, node);
  1218. num--;
  1219. }
  1220. node = next;
  1221. }
  1222. }
  1223. }
  1224. /**
  1225. * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
  1226. * @hw: pointer to the hw struct
  1227. * @tc_node: pointer to TC node
  1228. * @num_nodes: pointer to num nodes array
  1229. *
  1230. * This function calculates the number of supported nodes needed to add this
  1231. * VSI into tx tree including the VSI, parent and intermediate nodes in below
  1232. * layers
  1233. */
  1234. static void
  1235. ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
  1236. struct ice_sched_node *tc_node, u16 *num_nodes)
  1237. {
  1238. struct ice_sched_node *node;
  1239. u8 vsil;
  1240. int i;
  1241. vsil = ice_sched_get_vsi_layer(hw);
  1242. for (i = vsil; i >= hw->sw_entry_point_layer; i--)
  1243. /* Add intermediate nodes if TC has no children and
  1244. * need at least one node for VSI
  1245. */
  1246. if (!tc_node->num_children || i == vsil) {
  1247. num_nodes[i]++;
  1248. } else {
  1249. /* If intermediate nodes are reached max children
  1250. * then add a new one.
  1251. */
  1252. node = ice_sched_get_first_node(hw, tc_node, (u8)i);
  1253. /* scan all the siblings */
  1254. while (node) {
  1255. if (node->num_children < hw->max_children[i])
  1256. break;
  1257. node = node->sibling;
  1258. }
  1259. /* all the nodes are full, allocate a new one */
  1260. if (!node)
  1261. num_nodes[i]++;
  1262. }
  1263. }
  1264. /**
  1265. * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
  1266. * @pi: port information structure
  1267. * @vsi_id: VSI Id
  1268. * @tc_node: pointer to TC node
  1269. * @num_nodes: pointer to num nodes array
  1270. *
  1271. * This function adds the VSI supported nodes into tx tree including the
  1272. * VSI, its parent and intermediate nodes in below layers
  1273. */
  1274. static enum ice_status
  1275. ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
  1276. struct ice_sched_node *tc_node, u16 *num_nodes)
  1277. {
  1278. struct ice_sched_node *parent = tc_node;
  1279. enum ice_status status;
  1280. u32 first_node_teid;
  1281. u16 num_added = 0;
  1282. u8 i, vsil;
  1283. if (!pi)
  1284. return ICE_ERR_PARAM;
  1285. vsil = ice_sched_get_vsi_layer(pi->hw);
  1286. for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
  1287. status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
  1288. i, num_nodes[i],
  1289. &first_node_teid,
  1290. &num_added);
  1291. if (status || num_nodes[i] != num_added)
  1292. return ICE_ERR_CFG;
  1293. /* The newly added node can be a new parent for the next
  1294. * layer nodes
  1295. */
  1296. if (num_added)
  1297. parent = ice_sched_find_node_by_teid(tc_node,
  1298. first_node_teid);
  1299. else
  1300. parent = parent->children[0];
  1301. if (!parent)
  1302. return ICE_ERR_CFG;
  1303. if (i == vsil)
  1304. parent->vsi_id = vsi_id;
  1305. }
  1306. return 0;
  1307. }
  1308. /**
  1309. * ice_sched_add_vsi_to_topo - add a new VSI into tree
  1310. * @pi: port information structure
  1311. * @vsi_id: VSI Id
  1312. * @tc: TC number
  1313. *
  1314. * This function adds a new VSI into scheduler tree
  1315. */
  1316. static enum ice_status
  1317. ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
  1318. {
  1319. u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
  1320. struct ice_sched_node *tc_node;
  1321. struct ice_hw *hw = pi->hw;
  1322. tc_node = ice_sched_get_tc_node(pi, tc);
  1323. if (!tc_node)
  1324. return ICE_ERR_PARAM;
  1325. /* calculate number of supported nodes needed for this VSI */
  1326. ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
  1327. /* add vsi supported nodes to tc subtree */
  1328. return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes);
  1329. }
  1330. /**
  1331. * ice_sched_update_vsi_child_nodes - update VSI child nodes
  1332. * @pi: port information structure
  1333. * @vsi_id: VSI Id
  1334. * @tc: TC number
  1335. * @new_numqs: new number of max queues
  1336. * @owner: owner of this subtree
  1337. *
  1338. * This function updates the VSI child nodes based on the number of queues
  1339. */
  1340. static enum ice_status
  1341. ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
  1342. u16 new_numqs, u8 owner)
  1343. {
  1344. u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
  1345. u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
  1346. struct ice_sched_node *vsi_node;
  1347. struct ice_sched_node *tc_node;
  1348. struct ice_sched_vsi_info *vsi;
  1349. enum ice_status status = 0;
  1350. struct ice_hw *hw = pi->hw;
  1351. u16 prev_numqs;
  1352. u8 i;
  1353. tc_node = ice_sched_get_tc_node(pi, tc);
  1354. if (!tc_node)
  1355. return ICE_ERR_CFG;
  1356. vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
  1357. if (!vsi_node)
  1358. return ICE_ERR_CFG;
  1359. vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
  1360. if (!vsi)
  1361. return ICE_ERR_CFG;
  1362. if (owner == ICE_SCHED_NODE_OWNER_LAN)
  1363. prev_numqs = vsi->max_lanq[tc];
  1364. else
  1365. return ICE_ERR_PARAM;
  1366. /* num queues are not changed */
  1367. if (prev_numqs == new_numqs)
  1368. return status;
  1369. /* calculate number of nodes based on prev/new number of qs */
  1370. if (prev_numqs)
  1371. ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
  1372. if (new_numqs)
  1373. ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
  1374. if (prev_numqs > new_numqs) {
  1375. for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
  1376. new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
  1377. ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
  1378. owner);
  1379. } else {
  1380. for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
  1381. new_num_nodes[i] -= prev_num_nodes[i];
  1382. status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node,
  1383. new_num_nodes, owner);
  1384. if (status)
  1385. return status;
  1386. }
  1387. vsi->max_lanq[tc] = new_numqs;
  1388. return status;
  1389. }
  1390. /**
  1391. * ice_sched_cfg_vsi - configure the new/exisiting VSI
  1392. * @pi: port information structure
  1393. * @vsi_id: VSI Id
  1394. * @tc: TC number
  1395. * @maxqs: max number of queues
  1396. * @owner: lan or rdma
  1397. * @enable: TC enabled or disabled
  1398. *
  1399. * This function adds/updates VSI nodes based on the number of queues. If TC is
  1400. * enabled and VSI is in suspended state then resume the VSI back. If TC is
  1401. * disabled then suspend the VSI if it is not already.
  1402. */
  1403. enum ice_status
  1404. ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
  1405. u8 owner, bool enable)
  1406. {
  1407. struct ice_sched_node *vsi_node, *tc_node;
  1408. struct ice_sched_vsi_info *vsi;
  1409. enum ice_status status = 0;
  1410. struct ice_hw *hw = pi->hw;
  1411. tc_node = ice_sched_get_tc_node(pi, tc);
  1412. if (!tc_node)
  1413. return ICE_ERR_PARAM;
  1414. vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
  1415. if (!vsi)
  1416. vsi = ice_sched_create_vsi_info_entry(pi, vsi_id);
  1417. if (!vsi)
  1418. return ICE_ERR_NO_MEMORY;
  1419. vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
  1420. /* suspend the VSI if tc is not enabled */
  1421. if (!enable) {
  1422. if (vsi_node && vsi_node->in_use) {
  1423. u32 teid = le32_to_cpu(vsi_node->info.node_teid);
  1424. status = ice_sched_suspend_resume_elems(hw, 1, &teid,
  1425. true);
  1426. if (!status)
  1427. vsi_node->in_use = false;
  1428. }
  1429. return status;
  1430. }
  1431. /* TC is enabled, if it is a new VSI then add it to the tree */
  1432. if (!vsi_node) {
  1433. status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
  1434. if (status)
  1435. return status;
  1436. vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
  1437. if (!vsi_node)
  1438. return ICE_ERR_CFG;
  1439. vsi->vsi_node[tc] = vsi_node;
  1440. vsi_node->in_use = true;
  1441. }
  1442. /* update the VSI child nodes */
  1443. status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner);
  1444. if (status)
  1445. return status;
  1446. /* TC is enabled, resume the VSI if it is in the suspend state */
  1447. if (!vsi_node->in_use) {
  1448. u32 teid = le32_to_cpu(vsi_node->info.node_teid);
  1449. status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
  1450. if (!status)
  1451. vsi_node->in_use = true;
  1452. }
  1453. return status;
  1454. }