ice_main.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. /* Intel(R) Ethernet Connection E800 Series Linux Driver */
  4. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  5. #include "ice.h"
  6. #define DRV_VERSION "ice-0.0.1-k"
  7. #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
  8. static const char ice_drv_ver[] = DRV_VERSION;
  9. static const char ice_driver_string[] = DRV_SUMMARY;
  10. static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
  11. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  12. MODULE_DESCRIPTION(DRV_SUMMARY);
  13. MODULE_LICENSE("GPL");
  14. MODULE_VERSION(DRV_VERSION);
  15. static int debug = -1;
  16. module_param(debug, int, 0644);
  17. #ifndef CONFIG_DYNAMIC_DEBUG
  18. MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
  19. #else
  20. MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
  21. #endif /* !CONFIG_DYNAMIC_DEBUG */
  22. static struct workqueue_struct *ice_wq;
  23. static int ice_vsi_release(struct ice_vsi *vsi);
  24. /**
  25. * ice_get_free_slot - get the next non-NULL location index in array
  26. * @array: array to search
  27. * @size: size of the array
  28. * @curr: last known occupied index to be used as a search hint
  29. *
  30. * void * is being used to keep the functionality generic. This lets us use this
  31. * function on any array of pointers.
  32. */
  33. static int ice_get_free_slot(void *array, int size, int curr)
  34. {
  35. int **tmp_array = (int **)array;
  36. int next;
  37. if (curr < (size - 1) && !tmp_array[curr + 1]) {
  38. next = curr + 1;
  39. } else {
  40. int i = 0;
  41. while ((i < size) && (tmp_array[i]))
  42. i++;
  43. if (i == size)
  44. next = ICE_NO_VSI;
  45. else
  46. next = i;
  47. }
  48. return next;
  49. }
  50. /**
  51. * ice_search_res - Search the tracker for a block of resources
  52. * @res: pointer to the resource
  53. * @needed: size of the block needed
  54. * @id: identifier to track owner
  55. * Returns the base item index of the block, or -ENOMEM for error
  56. */
  57. static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
  58. {
  59. int start = res->search_hint;
  60. int end = start;
  61. id |= ICE_RES_VALID_BIT;
  62. do {
  63. /* skip already allocated entries */
  64. if (res->list[end++] & ICE_RES_VALID_BIT) {
  65. start = end;
  66. if ((start + needed) > res->num_entries)
  67. break;
  68. }
  69. if (end == (start + needed)) {
  70. int i = start;
  71. /* there was enough, so assign it to the requestor */
  72. while (i != end)
  73. res->list[i++] = id;
  74. if (end == res->num_entries)
  75. end = 0;
  76. res->search_hint = end;
  77. return start;
  78. }
  79. } while (1);
  80. return -ENOMEM;
  81. }
  82. /**
  83. * ice_get_res - get a block of resources
  84. * @pf: board private structure
  85. * @res: pointer to the resource
  86. * @needed: size of the block needed
  87. * @id: identifier to track owner
  88. *
  89. * Returns the base item index of the block, or -ENOMEM for error
  90. * The search_hint trick and lack of advanced fit-finding only works
  91. * because we're highly likely to have all the same sized requests.
  92. * Linear search time and any fragmentation should be minimal.
  93. */
  94. static int
  95. ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
  96. {
  97. int ret;
  98. if (!res || !pf)
  99. return -EINVAL;
  100. if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
  101. dev_err(&pf->pdev->dev,
  102. "param err: needed=%d, num_entries = %d id=0x%04x\n",
  103. needed, res->num_entries, id);
  104. return -EINVAL;
  105. }
  106. /* search based on search_hint */
  107. ret = ice_search_res(res, needed, id);
  108. if (ret < 0) {
  109. /* previous search failed. Reset search hint and try again */
  110. res->search_hint = 0;
  111. ret = ice_search_res(res, needed, id);
  112. }
  113. return ret;
  114. }
  115. /**
  116. * ice_free_res - free a block of resources
  117. * @res: pointer to the resource
  118. * @index: starting index previously returned by ice_get_res
  119. * @id: identifier to track owner
  120. * Returns number of resources freed
  121. */
  122. static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
  123. {
  124. int count = 0;
  125. int i;
  126. if (!res || index >= res->num_entries)
  127. return -EINVAL;
  128. id |= ICE_RES_VALID_BIT;
  129. for (i = index; i < res->num_entries && res->list[i] == id; i++) {
  130. res->list[i] = 0;
  131. count++;
  132. }
  133. return count;
  134. }
  135. /**
  136. * ice_add_mac_to_list - Add a mac address filter entry to the list
  137. * @vsi: the VSI to be forwarded to
  138. * @add_list: pointer to the list which contains MAC filter entries
  139. * @macaddr: the MAC address to be added.
  140. *
  141. * Adds mac address filter entry to the temp list
  142. *
  143. * Returns 0 on success or ENOMEM on failure.
  144. */
  145. static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
  146. const u8 *macaddr)
  147. {
  148. struct ice_fltr_list_entry *tmp;
  149. struct ice_pf *pf = vsi->back;
  150. tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
  151. if (!tmp)
  152. return -ENOMEM;
  153. tmp->fltr_info.flag = ICE_FLTR_TX;
  154. tmp->fltr_info.src = vsi->vsi_num;
  155. tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
  156. tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  157. tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
  158. ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
  159. INIT_LIST_HEAD(&tmp->list_entry);
  160. list_add(&tmp->list_entry, add_list);
  161. return 0;
  162. }
  163. /**
  164. * ice_free_fltr_list - free filter lists helper
  165. * @dev: pointer to the device struct
  166. * @h: pointer to the list head to be freed
  167. *
  168. * Helper function to free filter lists previously created using
  169. * ice_add_mac_to_list
  170. */
  171. static void ice_free_fltr_list(struct device *dev, struct list_head *h)
  172. {
  173. struct ice_fltr_list_entry *e, *tmp;
  174. list_for_each_entry_safe(e, tmp, h, list_entry) {
  175. list_del(&e->list_entry);
  176. devm_kfree(dev, e);
  177. }
  178. }
  179. /**
  180. * __ice_clean_ctrlq - helper function to clean controlq rings
  181. * @pf: ptr to struct ice_pf
  182. * @q_type: specific Control queue type
  183. */
  184. static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
  185. {
  186. struct ice_rq_event_info event;
  187. struct ice_hw *hw = &pf->hw;
  188. struct ice_ctl_q_info *cq;
  189. u16 pending, i = 0;
  190. const char *qtype;
  191. u32 oldval, val;
  192. switch (q_type) {
  193. case ICE_CTL_Q_ADMIN:
  194. cq = &hw->adminq;
  195. qtype = "Admin";
  196. break;
  197. default:
  198. dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
  199. q_type);
  200. return 0;
  201. }
  202. /* check for error indications - PF_xx_AxQLEN register layout for
  203. * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
  204. */
  205. val = rd32(hw, cq->rq.len);
  206. if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
  207. PF_FW_ARQLEN_ARQCRIT_M)) {
  208. oldval = val;
  209. if (val & PF_FW_ARQLEN_ARQVFE_M)
  210. dev_dbg(&pf->pdev->dev,
  211. "%s Receive Queue VF Error detected\n", qtype);
  212. if (val & PF_FW_ARQLEN_ARQOVFL_M) {
  213. dev_dbg(&pf->pdev->dev,
  214. "%s Receive Queue Overflow Error detected\n",
  215. qtype);
  216. }
  217. if (val & PF_FW_ARQLEN_ARQCRIT_M)
  218. dev_dbg(&pf->pdev->dev,
  219. "%s Receive Queue Critical Error detected\n",
  220. qtype);
  221. val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
  222. PF_FW_ARQLEN_ARQCRIT_M);
  223. if (oldval != val)
  224. wr32(hw, cq->rq.len, val);
  225. }
  226. val = rd32(hw, cq->sq.len);
  227. if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
  228. PF_FW_ATQLEN_ATQCRIT_M)) {
  229. oldval = val;
  230. if (val & PF_FW_ATQLEN_ATQVFE_M)
  231. dev_dbg(&pf->pdev->dev,
  232. "%s Send Queue VF Error detected\n", qtype);
  233. if (val & PF_FW_ATQLEN_ATQOVFL_M) {
  234. dev_dbg(&pf->pdev->dev,
  235. "%s Send Queue Overflow Error detected\n",
  236. qtype);
  237. }
  238. if (val & PF_FW_ATQLEN_ATQCRIT_M)
  239. dev_dbg(&pf->pdev->dev,
  240. "%s Send Queue Critical Error detected\n",
  241. qtype);
  242. val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
  243. PF_FW_ATQLEN_ATQCRIT_M);
  244. if (oldval != val)
  245. wr32(hw, cq->sq.len, val);
  246. }
  247. event.buf_len = cq->rq_buf_size;
  248. event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
  249. GFP_KERNEL);
  250. if (!event.msg_buf)
  251. return 0;
  252. do {
  253. enum ice_status ret;
  254. ret = ice_clean_rq_elem(hw, cq, &event, &pending);
  255. if (ret == ICE_ERR_AQ_NO_WORK)
  256. break;
  257. if (ret) {
  258. dev_err(&pf->pdev->dev,
  259. "%s Receive Queue event error %d\n", qtype,
  260. ret);
  261. break;
  262. }
  263. } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
  264. devm_kfree(&pf->pdev->dev, event.msg_buf);
  265. return pending && (i == ICE_DFLT_IRQ_WORK);
  266. }
  267. /**
  268. * ice_clean_adminq_subtask - clean the AdminQ rings
  269. * @pf: board private structure
  270. */
  271. static void ice_clean_adminq_subtask(struct ice_pf *pf)
  272. {
  273. struct ice_hw *hw = &pf->hw;
  274. u32 val;
  275. if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
  276. return;
  277. if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
  278. return;
  279. clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
  280. /* re-enable Admin queue interrupt causes */
  281. val = rd32(hw, PFINT_FW_CTL);
  282. wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M));
  283. ice_flush(hw);
  284. }
  285. /**
  286. * ice_service_task_schedule - schedule the service task to wake up
  287. * @pf: board private structure
  288. *
  289. * If not already scheduled, this puts the task into the work queue.
  290. */
  291. static void ice_service_task_schedule(struct ice_pf *pf)
  292. {
  293. if (!test_bit(__ICE_DOWN, pf->state) &&
  294. !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state))
  295. queue_work(ice_wq, &pf->serv_task);
  296. }
  297. /**
  298. * ice_service_task_complete - finish up the service task
  299. * @pf: board private structure
  300. */
  301. static void ice_service_task_complete(struct ice_pf *pf)
  302. {
  303. WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
  304. /* force memory (pf->state) to sync before next service task */
  305. smp_mb__before_atomic();
  306. clear_bit(__ICE_SERVICE_SCHED, pf->state);
  307. }
  308. /**
  309. * ice_service_timer - timer callback to schedule service task
  310. * @t: pointer to timer_list
  311. */
  312. static void ice_service_timer(struct timer_list *t)
  313. {
  314. struct ice_pf *pf = from_timer(pf, t, serv_tmr);
  315. mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
  316. ice_service_task_schedule(pf);
  317. }
  318. /**
  319. * ice_service_task - manage and run subtasks
  320. * @work: pointer to work_struct contained by the PF struct
  321. */
  322. static void ice_service_task(struct work_struct *work)
  323. {
  324. struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
  325. unsigned long start_time = jiffies;
  326. /* subtasks */
  327. ice_clean_adminq_subtask(pf);
  328. /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
  329. ice_service_task_complete(pf);
  330. /* If the tasks have taken longer than one service timer period
  331. * or there is more work to be done, reset the service timer to
  332. * schedule the service task now.
  333. */
  334. if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
  335. test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
  336. mod_timer(&pf->serv_tmr, jiffies);
  337. }
  338. /**
  339. * ice_set_ctrlq_len - helper function to set controlq length
  340. * @hw: pointer to the hw instance
  341. */
  342. static void ice_set_ctrlq_len(struct ice_hw *hw)
  343. {
  344. hw->adminq.num_rq_entries = ICE_AQ_LEN;
  345. hw->adminq.num_sq_entries = ICE_AQ_LEN;
  346. hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
  347. hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
  348. }
  349. /**
  350. * ice_vsi_delete - delete a VSI from the switch
  351. * @vsi: pointer to VSI being removed
  352. */
  353. static void ice_vsi_delete(struct ice_vsi *vsi)
  354. {
  355. struct ice_pf *pf = vsi->back;
  356. struct ice_vsi_ctx ctxt;
  357. enum ice_status status;
  358. ctxt.vsi_num = vsi->vsi_num;
  359. memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
  360. status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
  361. if (status)
  362. dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
  363. vsi->vsi_num);
  364. }
  365. /**
  366. * ice_vsi_setup_q_map - Setup a VSI queue map
  367. * @vsi: the VSI being configured
  368. * @ctxt: VSI context structure
  369. */
  370. static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
  371. {
  372. u16 offset = 0, qmap = 0, pow = 0, qcount;
  373. u16 qcount_tx = vsi->alloc_txq;
  374. u16 qcount_rx = vsi->alloc_rxq;
  375. bool ena_tc0 = false;
  376. int i;
  377. /* at least TC0 should be enabled by default */
  378. if (vsi->tc_cfg.numtc) {
  379. if (!(vsi->tc_cfg.ena_tc & BIT(0)))
  380. ena_tc0 = true;
  381. } else {
  382. ena_tc0 = true;
  383. }
  384. if (ena_tc0) {
  385. vsi->tc_cfg.numtc++;
  386. vsi->tc_cfg.ena_tc |= 1;
  387. }
  388. qcount = qcount_rx / vsi->tc_cfg.numtc;
  389. /* find higher power-of-2 of qcount */
  390. pow = ilog2(qcount);
  391. if (!is_power_of_2(qcount))
  392. pow++;
  393. /* TC mapping is a function of the number of Rx queues assigned to the
  394. * VSI for each traffic class and the offset of these queues.
  395. * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
  396. * queues allocated to TC0. No:of queues is a power-of-2.
  397. *
  398. * If TC is not enabled, the queue offset is set to 0, and allocate one
  399. * queue, this way, traffic for the given TC will be sent to the default
  400. * queue.
  401. *
  402. * Setup number and offset of Rx queues for all TCs for the VSI
  403. */
  404. for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
  405. if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
  406. /* TC is not enabled */
  407. vsi->tc_cfg.tc_info[i].qoffset = 0;
  408. vsi->tc_cfg.tc_info[i].qcount = 1;
  409. ctxt->info.tc_mapping[i] = 0;
  410. continue;
  411. }
  412. /* TC is enabled */
  413. vsi->tc_cfg.tc_info[i].qoffset = offset;
  414. vsi->tc_cfg.tc_info[i].qcount = qcount;
  415. qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
  416. ICE_AQ_VSI_TC_Q_OFFSET_M) |
  417. ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
  418. ICE_AQ_VSI_TC_Q_NUM_M);
  419. offset += qcount;
  420. ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
  421. }
  422. vsi->num_txq = qcount_tx;
  423. vsi->num_rxq = offset;
  424. /* Rx queue mapping */
  425. ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
  426. /* q_mapping buffer holds the info for the first queue allocated for
  427. * this VSI in the PF space and also the number of queues associated
  428. * with this VSI.
  429. */
  430. ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
  431. ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
  432. }
  433. /**
  434. * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
  435. * @ctxt: the VSI context being set
  436. *
  437. * This initializes a default VSI context for all sections except the Queues.
  438. */
  439. static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
  440. {
  441. u32 table = 0;
  442. memset(&ctxt->info, 0, sizeof(ctxt->info));
  443. /* VSI's should be allocated from shared pool */
  444. ctxt->alloc_from_pool = true;
  445. /* Src pruning enabled by default */
  446. ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
  447. /* Traffic from VSI can be sent to LAN */
  448. ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
  449. /* Allow all packets untagged/tagged */
  450. ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL &
  451. ICE_AQ_VSI_PVLAN_MODE_M) >>
  452. ICE_AQ_VSI_PVLAN_MODE_S);
  453. /* Show VLAN/UP from packets in Rx descriptors */
  454. ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH &
  455. ICE_AQ_VSI_PVLAN_EMOD_M) >>
  456. ICE_AQ_VSI_PVLAN_EMOD_S);
  457. /* Have 1:1 UP mapping for both ingress/egress tables */
  458. table |= ICE_UP_TABLE_TRANSLATE(0, 0);
  459. table |= ICE_UP_TABLE_TRANSLATE(1, 1);
  460. table |= ICE_UP_TABLE_TRANSLATE(2, 2);
  461. table |= ICE_UP_TABLE_TRANSLATE(3, 3);
  462. table |= ICE_UP_TABLE_TRANSLATE(4, 4);
  463. table |= ICE_UP_TABLE_TRANSLATE(5, 5);
  464. table |= ICE_UP_TABLE_TRANSLATE(6, 6);
  465. table |= ICE_UP_TABLE_TRANSLATE(7, 7);
  466. ctxt->info.ingress_table = cpu_to_le32(table);
  467. ctxt->info.egress_table = cpu_to_le32(table);
  468. /* Have 1:1 UP mapping for outer to inner UP table */
  469. ctxt->info.outer_up_table = cpu_to_le32(table);
  470. /* No Outer tag support outer_tag_flags remains to zero */
  471. }
  472. /**
  473. * ice_vsi_add - Create a new VSI or fetch preallocated VSI
  474. * @vsi: the VSI being configured
  475. *
  476. * This initializes a VSI context depending on the VSI type to be added and
  477. * passes it down to the add_vsi aq command to create a new VSI.
  478. */
  479. static int ice_vsi_add(struct ice_vsi *vsi)
  480. {
  481. struct ice_vsi_ctx ctxt = { 0 };
  482. struct ice_pf *pf = vsi->back;
  483. struct ice_hw *hw = &pf->hw;
  484. int ret = 0;
  485. switch (vsi->type) {
  486. case ICE_VSI_PF:
  487. ctxt.flags = ICE_AQ_VSI_TYPE_PF;
  488. break;
  489. default:
  490. return -ENODEV;
  491. }
  492. ice_set_dflt_vsi_ctx(&ctxt);
  493. /* if the switch is in VEB mode, allow VSI loopback */
  494. if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
  495. ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
  496. ctxt.info.sw_id = vsi->port_info->sw_id;
  497. ice_vsi_setup_q_map(vsi, &ctxt);
  498. ret = ice_aq_add_vsi(hw, &ctxt, NULL);
  499. if (ret) {
  500. dev_err(&vsi->back->pdev->dev,
  501. "Add VSI AQ call failed, err %d\n", ret);
  502. return -EIO;
  503. }
  504. vsi->info = ctxt.info;
  505. vsi->vsi_num = ctxt.vsi_num;
  506. return ret;
  507. }
  508. /**
  509. * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
  510. * @vsi: the VSI having rings deallocated
  511. */
  512. static void ice_vsi_clear_rings(struct ice_vsi *vsi)
  513. {
  514. int i;
  515. if (vsi->tx_rings) {
  516. for (i = 0; i < vsi->alloc_txq; i++) {
  517. if (vsi->tx_rings[i]) {
  518. kfree_rcu(vsi->tx_rings[i], rcu);
  519. vsi->tx_rings[i] = NULL;
  520. }
  521. }
  522. }
  523. if (vsi->rx_rings) {
  524. for (i = 0; i < vsi->alloc_rxq; i++) {
  525. if (vsi->rx_rings[i]) {
  526. kfree_rcu(vsi->rx_rings[i], rcu);
  527. vsi->rx_rings[i] = NULL;
  528. }
  529. }
  530. }
  531. }
  532. /**
  533. * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
  534. * @vsi: VSI which is having rings allocated
  535. */
  536. static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
  537. {
  538. struct ice_pf *pf = vsi->back;
  539. int i;
  540. /* Allocate tx_rings */
  541. for (i = 0; i < vsi->alloc_txq; i++) {
  542. struct ice_ring *ring;
  543. /* allocate with kzalloc(), free with kfree_rcu() */
  544. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  545. if (!ring)
  546. goto err_out;
  547. ring->q_index = i;
  548. ring->reg_idx = vsi->txq_map[i];
  549. ring->ring_active = false;
  550. ring->vsi = vsi;
  551. ring->netdev = vsi->netdev;
  552. ring->dev = &pf->pdev->dev;
  553. ring->count = vsi->num_desc;
  554. vsi->tx_rings[i] = ring;
  555. }
  556. /* Allocate rx_rings */
  557. for (i = 0; i < vsi->alloc_rxq; i++) {
  558. struct ice_ring *ring;
  559. /* allocate with kzalloc(), free with kfree_rcu() */
  560. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  561. if (!ring)
  562. goto err_out;
  563. ring->q_index = i;
  564. ring->reg_idx = vsi->rxq_map[i];
  565. ring->ring_active = false;
  566. ring->vsi = vsi;
  567. ring->netdev = vsi->netdev;
  568. ring->dev = &pf->pdev->dev;
  569. ring->count = vsi->num_desc;
  570. vsi->rx_rings[i] = ring;
  571. }
  572. return 0;
  573. err_out:
  574. ice_vsi_clear_rings(vsi);
  575. return -ENOMEM;
  576. }
  577. /**
  578. * ice_ena_misc_vector - enable the non-queue interrupts
  579. * @pf: board private structure
  580. */
  581. static void ice_ena_misc_vector(struct ice_pf *pf)
  582. {
  583. struct ice_hw *hw = &pf->hw;
  584. u32 val;
  585. /* clear things first */
  586. wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
  587. rd32(hw, PFINT_OICR); /* read to clear */
  588. val = (PFINT_OICR_HLP_RDY_M |
  589. PFINT_OICR_CPM_RDY_M |
  590. PFINT_OICR_ECC_ERR_M |
  591. PFINT_OICR_MAL_DETECT_M |
  592. PFINT_OICR_GRST_M |
  593. PFINT_OICR_PCI_EXCEPTION_M |
  594. PFINT_OICR_GPIO_M |
  595. PFINT_OICR_STORM_DETECT_M |
  596. PFINT_OICR_HMC_ERR_M);
  597. wr32(hw, PFINT_OICR_ENA, val);
  598. /* SW_ITR_IDX = 0, but don't change INTENA */
  599. wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
  600. GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
  601. }
  602. /**
  603. * ice_misc_intr - misc interrupt handler
  604. * @irq: interrupt number
  605. * @data: pointer to a q_vector
  606. */
  607. static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
  608. {
  609. struct ice_pf *pf = (struct ice_pf *)data;
  610. struct ice_hw *hw = &pf->hw;
  611. irqreturn_t ret = IRQ_NONE;
  612. u32 oicr, ena_mask;
  613. set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
  614. oicr = rd32(hw, PFINT_OICR);
  615. ena_mask = rd32(hw, PFINT_OICR_ENA);
  616. if (!(oicr & PFINT_OICR_INTEVENT_M))
  617. goto ena_intr;
  618. if (oicr & PFINT_OICR_HMC_ERR_M) {
  619. ena_mask &= ~PFINT_OICR_HMC_ERR_M;
  620. dev_dbg(&pf->pdev->dev,
  621. "HMC Error interrupt - info 0x%x, data 0x%x\n",
  622. rd32(hw, PFHMC_ERRORINFO),
  623. rd32(hw, PFHMC_ERRORDATA));
  624. }
  625. /* Report and mask off any remaining unexpected interrupts */
  626. oicr &= ena_mask;
  627. if (oicr) {
  628. dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
  629. oicr);
  630. /* If a critical error is pending there is no choice but to
  631. * reset the device.
  632. */
  633. if (oicr & (PFINT_OICR_PE_CRITERR_M |
  634. PFINT_OICR_PCI_EXCEPTION_M |
  635. PFINT_OICR_ECC_ERR_M))
  636. set_bit(__ICE_PFR_REQ, pf->state);
  637. ena_mask &= ~oicr;
  638. }
  639. ret = IRQ_HANDLED;
  640. ena_intr:
  641. /* re-enable interrupt causes that are not handled during this pass */
  642. wr32(hw, PFINT_OICR_ENA, ena_mask);
  643. if (!test_bit(__ICE_DOWN, pf->state)) {
  644. ice_service_task_schedule(pf);
  645. ice_irq_dynamic_ena(hw);
  646. }
  647. return ret;
  648. }
  649. /**
  650. * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
  651. * @vsi: the VSI being configured
  652. *
  653. * This function maps descriptor rings to the queue-specific vectors allotted
  654. * through the MSI-X enabling code. On a constrained vector budget, we map Tx
  655. * and Rx rings to the vector as "efficiently" as possible.
  656. */
  657. static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
  658. {
  659. int q_vectors = vsi->num_q_vectors;
  660. int tx_rings_rem, rx_rings_rem;
  661. int v_id;
  662. /* initially assigning remaining rings count to VSIs num queue value */
  663. tx_rings_rem = vsi->num_txq;
  664. rx_rings_rem = vsi->num_rxq;
  665. for (v_id = 0; v_id < q_vectors; v_id++) {
  666. struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
  667. int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
  668. /* Tx rings mapping to vector */
  669. tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
  670. q_vector->num_ring_tx = tx_rings_per_v;
  671. q_vector->tx.ring = NULL;
  672. q_base = vsi->num_txq - tx_rings_rem;
  673. for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
  674. struct ice_ring *tx_ring = vsi->tx_rings[q_id];
  675. tx_ring->q_vector = q_vector;
  676. tx_ring->next = q_vector->tx.ring;
  677. q_vector->tx.ring = tx_ring;
  678. }
  679. tx_rings_rem -= tx_rings_per_v;
  680. /* Rx rings mapping to vector */
  681. rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
  682. q_vector->num_ring_rx = rx_rings_per_v;
  683. q_vector->rx.ring = NULL;
  684. q_base = vsi->num_rxq - rx_rings_rem;
  685. for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
  686. struct ice_ring *rx_ring = vsi->rx_rings[q_id];
  687. rx_ring->q_vector = q_vector;
  688. rx_ring->next = q_vector->rx.ring;
  689. q_vector->rx.ring = rx_ring;
  690. }
  691. rx_rings_rem -= rx_rings_per_v;
  692. }
  693. }
  694. /**
  695. * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
  696. * @vsi: the VSI being configured
  697. *
  698. * Return 0 on success and a negative value on error
  699. */
  700. static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
  701. {
  702. struct ice_pf *pf = vsi->back;
  703. switch (vsi->type) {
  704. case ICE_VSI_PF:
  705. vsi->alloc_txq = pf->num_lan_tx;
  706. vsi->alloc_rxq = pf->num_lan_rx;
  707. vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
  708. vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
  709. break;
  710. default:
  711. dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
  712. vsi->type);
  713. break;
  714. }
  715. }
  716. /**
  717. * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
  718. * @vsi: VSI pointer
  719. * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
  720. *
  721. * On error: returns error code (negative)
  722. * On success: returns 0
  723. */
  724. static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
  725. {
  726. struct ice_pf *pf = vsi->back;
  727. /* allocate memory for both Tx and Rx ring pointers */
  728. vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
  729. sizeof(struct ice_ring *), GFP_KERNEL);
  730. if (!vsi->tx_rings)
  731. goto err_txrings;
  732. vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
  733. sizeof(struct ice_ring *), GFP_KERNEL);
  734. if (!vsi->rx_rings)
  735. goto err_rxrings;
  736. if (alloc_qvectors) {
  737. /* allocate memory for q_vector pointers */
  738. vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
  739. vsi->num_q_vectors,
  740. sizeof(struct ice_q_vector *),
  741. GFP_KERNEL);
  742. if (!vsi->q_vectors)
  743. goto err_vectors;
  744. }
  745. return 0;
  746. err_vectors:
  747. devm_kfree(&pf->pdev->dev, vsi->rx_rings);
  748. err_rxrings:
  749. devm_kfree(&pf->pdev->dev, vsi->tx_rings);
  750. err_txrings:
  751. return -ENOMEM;
  752. }
  753. /**
  754. * ice_vsi_alloc - Allocates the next available struct vsi in the PF
  755. * @pf: board private structure
  756. * @type: type of VSI
  757. *
  758. * returns a pointer to a VSI on success, NULL on failure.
  759. */
  760. static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
  761. {
  762. struct ice_vsi *vsi = NULL;
  763. /* Need to protect the allocation of the VSIs at the PF level */
  764. mutex_lock(&pf->sw_mutex);
  765. /* If we have already allocated our maximum number of VSIs,
  766. * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
  767. * is available to be populated
  768. */
  769. if (pf->next_vsi == ICE_NO_VSI) {
  770. dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
  771. goto unlock_pf;
  772. }
  773. vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
  774. if (!vsi)
  775. goto unlock_pf;
  776. vsi->type = type;
  777. vsi->back = pf;
  778. set_bit(__ICE_DOWN, vsi->state);
  779. vsi->idx = pf->next_vsi;
  780. vsi->work_lmt = ICE_DFLT_IRQ_WORK;
  781. ice_vsi_set_num_qs(vsi);
  782. switch (vsi->type) {
  783. case ICE_VSI_PF:
  784. if (ice_vsi_alloc_arrays(vsi, true))
  785. goto err_rings;
  786. break;
  787. default:
  788. dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
  789. goto unlock_pf;
  790. }
  791. /* fill VSI slot in the PF struct */
  792. pf->vsi[pf->next_vsi] = vsi;
  793. /* prepare pf->next_vsi for next use */
  794. pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
  795. pf->next_vsi);
  796. goto unlock_pf;
  797. err_rings:
  798. devm_kfree(&pf->pdev->dev, vsi);
  799. vsi = NULL;
  800. unlock_pf:
  801. mutex_unlock(&pf->sw_mutex);
  802. return vsi;
  803. }
  804. /**
  805. * ice_free_irq_msix_misc - Unroll misc vector setup
  806. * @pf: board private structure
  807. */
  808. static void ice_free_irq_msix_misc(struct ice_pf *pf)
  809. {
  810. /* disable OICR interrupt */
  811. wr32(&pf->hw, PFINT_OICR_ENA, 0);
  812. ice_flush(&pf->hw);
  813. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
  814. synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
  815. devm_free_irq(&pf->pdev->dev,
  816. pf->msix_entries[pf->oicr_idx].vector, pf);
  817. }
  818. ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
  819. }
  820. /**
  821. * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
  822. * @pf: board private structure
  823. *
  824. * This sets up the handler for MSIX 0, which is used to manage the
  825. * non-queue interrupts, e.g. AdminQ and errors. This is not used
  826. * when in MSI or Legacy interrupt mode.
  827. */
  828. static int ice_req_irq_msix_misc(struct ice_pf *pf)
  829. {
  830. struct ice_hw *hw = &pf->hw;
  831. int oicr_idx, err = 0;
  832. u8 itr_gran;
  833. u32 val;
  834. if (!pf->int_name[0])
  835. snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
  836. dev_driver_string(&pf->pdev->dev),
  837. dev_name(&pf->pdev->dev));
  838. /* reserve one vector in irq_tracker for misc interrupts */
  839. oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
  840. if (oicr_idx < 0)
  841. return oicr_idx;
  842. pf->oicr_idx = oicr_idx;
  843. err = devm_request_irq(&pf->pdev->dev,
  844. pf->msix_entries[pf->oicr_idx].vector,
  845. ice_misc_intr, 0, pf->int_name, pf);
  846. if (err) {
  847. dev_err(&pf->pdev->dev,
  848. "devm_request_irq for %s failed: %d\n",
  849. pf->int_name, err);
  850. ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
  851. return err;
  852. }
  853. ice_ena_misc_vector(pf);
  854. val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
  855. (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) |
  856. PFINT_OICR_CTL_CAUSE_ENA_M;
  857. wr32(hw, PFINT_OICR_CTL, val);
  858. /* This enables Admin queue Interrupt causes */
  859. val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
  860. (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) |
  861. PFINT_FW_CTL_CAUSE_ENA_M;
  862. wr32(hw, PFINT_FW_CTL, val);
  863. itr_gran = hw->itr_gran_200;
  864. wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
  865. ITR_TO_REG(ICE_ITR_8K, itr_gran));
  866. ice_flush(hw);
  867. ice_irq_dynamic_ena(hw);
  868. return 0;
  869. }
  870. /**
  871. * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
  872. * @vsi: the VSI getting queues
  873. *
  874. * Return 0 on success and a negative value on error
  875. */
  876. static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
  877. {
  878. struct ice_pf *pf = vsi->back;
  879. int offset, ret = 0;
  880. mutex_lock(&pf->avail_q_mutex);
  881. /* look for contiguous block of queues for tx */
  882. offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
  883. 0, vsi->alloc_txq, 0);
  884. if (offset < ICE_MAX_TXQS) {
  885. int i;
  886. bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
  887. for (i = 0; i < vsi->alloc_txq; i++)
  888. vsi->txq_map[i] = i + offset;
  889. } else {
  890. ret = -ENOMEM;
  891. vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
  892. }
  893. /* look for contiguous block of queues for rx */
  894. offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
  895. 0, vsi->alloc_rxq, 0);
  896. if (offset < ICE_MAX_RXQS) {
  897. int i;
  898. bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
  899. for (i = 0; i < vsi->alloc_rxq; i++)
  900. vsi->rxq_map[i] = i + offset;
  901. } else {
  902. ret = -ENOMEM;
  903. vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
  904. }
  905. mutex_unlock(&pf->avail_q_mutex);
  906. return ret;
  907. }
  908. /**
  909. * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
  910. * @vsi: the VSI getting queues
  911. *
  912. * Return 0 on success and a negative value on error
  913. */
  914. static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
  915. {
  916. struct ice_pf *pf = vsi->back;
  917. int i, index = 0;
  918. mutex_lock(&pf->avail_q_mutex);
  919. if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
  920. for (i = 0; i < vsi->alloc_txq; i++) {
  921. index = find_next_zero_bit(pf->avail_txqs,
  922. ICE_MAX_TXQS, index);
  923. if (index < ICE_MAX_TXQS) {
  924. set_bit(index, pf->avail_txqs);
  925. vsi->txq_map[i] = index;
  926. } else {
  927. goto err_scatter_tx;
  928. }
  929. }
  930. }
  931. if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
  932. for (i = 0; i < vsi->alloc_rxq; i++) {
  933. index = find_next_zero_bit(pf->avail_rxqs,
  934. ICE_MAX_RXQS, index);
  935. if (index < ICE_MAX_RXQS) {
  936. set_bit(index, pf->avail_rxqs);
  937. vsi->rxq_map[i] = index;
  938. } else {
  939. goto err_scatter_rx;
  940. }
  941. }
  942. }
  943. mutex_unlock(&pf->avail_q_mutex);
  944. return 0;
  945. err_scatter_rx:
  946. /* unflag any queues we have grabbed (i is failed position) */
  947. for (index = 0; index < i; index++) {
  948. clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
  949. vsi->rxq_map[index] = 0;
  950. }
  951. i = vsi->alloc_txq;
  952. err_scatter_tx:
  953. /* i is either position of failed attempt or vsi->alloc_txq */
  954. for (index = 0; index < i; index++) {
  955. clear_bit(vsi->txq_map[index], pf->avail_txqs);
  956. vsi->txq_map[index] = 0;
  957. }
  958. mutex_unlock(&pf->avail_q_mutex);
  959. return -ENOMEM;
  960. }
  961. /**
  962. * ice_vsi_get_qs - Assign queues from PF to VSI
  963. * @vsi: the VSI to assign queues to
  964. *
  965. * Returns 0 on success and a negative value on error
  966. */
  967. static int ice_vsi_get_qs(struct ice_vsi *vsi)
  968. {
  969. int ret = 0;
  970. vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
  971. vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
  972. /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping
  973. * modes individually to scatter if assigning contiguous queues
  974. * to rx or tx fails
  975. */
  976. ret = ice_vsi_get_qs_contig(vsi);
  977. if (ret < 0) {
  978. if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
  979. vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
  980. ICE_MAX_SCATTER_TXQS);
  981. if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
  982. vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
  983. ICE_MAX_SCATTER_RXQS);
  984. ret = ice_vsi_get_qs_scatter(vsi);
  985. }
  986. return ret;
  987. }
  988. /**
  989. * ice_vsi_put_qs - Release queues from VSI to PF
  990. * @vsi: the VSI thats going to release queues
  991. */
  992. static void ice_vsi_put_qs(struct ice_vsi *vsi)
  993. {
  994. struct ice_pf *pf = vsi->back;
  995. int i;
  996. mutex_lock(&pf->avail_q_mutex);
  997. for (i = 0; i < vsi->alloc_txq; i++) {
  998. clear_bit(vsi->txq_map[i], pf->avail_txqs);
  999. vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
  1000. }
  1001. for (i = 0; i < vsi->alloc_rxq; i++) {
  1002. clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
  1003. vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
  1004. }
  1005. mutex_unlock(&pf->avail_q_mutex);
  1006. }
  1007. /**
  1008. * ice_free_q_vector - Free memory allocated for a specific interrupt vector
  1009. * @vsi: VSI having the memory freed
  1010. * @v_idx: index of the vector to be freed
  1011. */
  1012. static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
  1013. {
  1014. struct ice_q_vector *q_vector;
  1015. struct ice_ring *ring;
  1016. if (!vsi->q_vectors[v_idx]) {
  1017. dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
  1018. v_idx);
  1019. return;
  1020. }
  1021. q_vector = vsi->q_vectors[v_idx];
  1022. ice_for_each_ring(ring, q_vector->tx)
  1023. ring->q_vector = NULL;
  1024. ice_for_each_ring(ring, q_vector->rx)
  1025. ring->q_vector = NULL;
  1026. /* only VSI with an associated netdev is set up with NAPI */
  1027. if (vsi->netdev)
  1028. netif_napi_del(&q_vector->napi);
  1029. devm_kfree(&vsi->back->pdev->dev, q_vector);
  1030. vsi->q_vectors[v_idx] = NULL;
  1031. }
  1032. /**
  1033. * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
  1034. * @vsi: the VSI having memory freed
  1035. */
  1036. static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
  1037. {
  1038. int v_idx;
  1039. for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
  1040. ice_free_q_vector(vsi, v_idx);
  1041. }
  1042. /**
  1043. * ice_cfg_netdev - Setup the netdev flags
  1044. * @vsi: the VSI being configured
  1045. *
  1046. * Returns 0 on success, negative value on failure
  1047. */
  1048. static int ice_cfg_netdev(struct ice_vsi *vsi)
  1049. {
  1050. struct ice_netdev_priv *np;
  1051. struct net_device *netdev;
  1052. u8 mac_addr[ETH_ALEN];
  1053. netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
  1054. vsi->alloc_txq, vsi->alloc_rxq);
  1055. if (!netdev)
  1056. return -ENOMEM;
  1057. vsi->netdev = netdev;
  1058. np = netdev_priv(netdev);
  1059. np->vsi = vsi;
  1060. /* set features that user can change */
  1061. netdev->hw_features = NETIF_F_SG |
  1062. NETIF_F_HIGHDMA |
  1063. NETIF_F_RXHASH;
  1064. /* enable features */
  1065. netdev->features |= netdev->hw_features;
  1066. if (vsi->type == ICE_VSI_PF) {
  1067. SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
  1068. ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
  1069. ether_addr_copy(netdev->dev_addr, mac_addr);
  1070. ether_addr_copy(netdev->perm_addr, mac_addr);
  1071. }
  1072. netdev->priv_flags |= IFF_UNICAST_FLT;
  1073. /* setup watchdog timeout value to be 5 second */
  1074. netdev->watchdog_timeo = 5 * HZ;
  1075. netdev->min_mtu = ETH_MIN_MTU;
  1076. netdev->max_mtu = ICE_MAX_MTU;
  1077. return 0;
  1078. }
  1079. /**
  1080. * ice_vsi_free_arrays - clean up vsi resources
  1081. * @vsi: pointer to VSI being cleared
  1082. * @free_qvectors: bool to specify if q_vectors should be deallocated
  1083. */
  1084. static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
  1085. {
  1086. struct ice_pf *pf = vsi->back;
  1087. /* free the ring and vector containers */
  1088. if (free_qvectors && vsi->q_vectors) {
  1089. devm_kfree(&pf->pdev->dev, vsi->q_vectors);
  1090. vsi->q_vectors = NULL;
  1091. }
  1092. if (vsi->tx_rings) {
  1093. devm_kfree(&pf->pdev->dev, vsi->tx_rings);
  1094. vsi->tx_rings = NULL;
  1095. }
  1096. if (vsi->rx_rings) {
  1097. devm_kfree(&pf->pdev->dev, vsi->rx_rings);
  1098. vsi->rx_rings = NULL;
  1099. }
  1100. }
  1101. /**
  1102. * ice_vsi_clear - clean up and deallocate the provided vsi
  1103. * @vsi: pointer to VSI being cleared
  1104. *
  1105. * This deallocates the vsi's queue resources, removes it from the PF's
  1106. * VSI array if necessary, and deallocates the VSI
  1107. *
  1108. * Returns 0 on success, negative on failure
  1109. */
  1110. static int ice_vsi_clear(struct ice_vsi *vsi)
  1111. {
  1112. struct ice_pf *pf = NULL;
  1113. if (!vsi)
  1114. return 0;
  1115. if (!vsi->back)
  1116. return -EINVAL;
  1117. pf = vsi->back;
  1118. if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
  1119. dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
  1120. vsi->idx);
  1121. return -EINVAL;
  1122. }
  1123. mutex_lock(&pf->sw_mutex);
  1124. /* updates the PF for this cleared vsi */
  1125. pf->vsi[vsi->idx] = NULL;
  1126. if (vsi->idx < pf->next_vsi)
  1127. pf->next_vsi = vsi->idx;
  1128. ice_vsi_free_arrays(vsi, true);
  1129. mutex_unlock(&pf->sw_mutex);
  1130. devm_kfree(&pf->pdev->dev, vsi);
  1131. return 0;
  1132. }
  1133. /**
  1134. * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
  1135. * @vsi: the VSI being configured
  1136. * @v_idx: index of the vector in the vsi struct
  1137. *
  1138. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  1139. */
  1140. static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
  1141. {
  1142. struct ice_pf *pf = vsi->back;
  1143. struct ice_q_vector *q_vector;
  1144. /* allocate q_vector */
  1145. q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
  1146. if (!q_vector)
  1147. return -ENOMEM;
  1148. q_vector->vsi = vsi;
  1149. q_vector->v_idx = v_idx;
  1150. /* only set affinity_mask if the CPU is online */
  1151. if (cpu_online(v_idx))
  1152. cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
  1153. /* tie q_vector and vsi together */
  1154. vsi->q_vectors[v_idx] = q_vector;
  1155. return 0;
  1156. }
  1157. /**
  1158. * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
  1159. * @vsi: the VSI being configured
  1160. *
  1161. * We allocate one q_vector per queue interrupt. If allocation fails we
  1162. * return -ENOMEM.
  1163. */
  1164. static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
  1165. {
  1166. struct ice_pf *pf = vsi->back;
  1167. int v_idx = 0, num_q_vectors;
  1168. int err;
  1169. if (vsi->q_vectors[0]) {
  1170. dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
  1171. vsi->vsi_num);
  1172. return -EEXIST;
  1173. }
  1174. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
  1175. num_q_vectors = vsi->num_q_vectors;
  1176. } else {
  1177. err = -EINVAL;
  1178. goto err_out;
  1179. }
  1180. for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
  1181. err = ice_vsi_alloc_q_vector(vsi, v_idx);
  1182. if (err)
  1183. goto err_out;
  1184. }
  1185. return 0;
  1186. err_out:
  1187. while (v_idx--)
  1188. ice_free_q_vector(vsi, v_idx);
  1189. dev_err(&pf->pdev->dev,
  1190. "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
  1191. vsi->num_q_vectors, vsi->vsi_num, err);
  1192. vsi->num_q_vectors = 0;
  1193. return err;
  1194. }
  1195. /**
  1196. * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
  1197. * @vsi: ptr to the VSI
  1198. *
  1199. * This should only be called after ice_vsi_alloc() which allocates the
  1200. * corresponding SW VSI structure and initializes num_queue_pairs for the
  1201. * newly allocated VSI.
  1202. *
  1203. * Returns 0 on success or negative on failure
  1204. */
  1205. static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
  1206. {
  1207. struct ice_pf *pf = vsi->back;
  1208. int num_q_vectors = 0;
  1209. if (vsi->base_vector) {
  1210. dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
  1211. vsi->vsi_num, vsi->base_vector);
  1212. return -EEXIST;
  1213. }
  1214. if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
  1215. return -ENOENT;
  1216. switch (vsi->type) {
  1217. case ICE_VSI_PF:
  1218. num_q_vectors = vsi->num_q_vectors;
  1219. break;
  1220. default:
  1221. dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
  1222. vsi->type);
  1223. break;
  1224. }
  1225. if (num_q_vectors)
  1226. vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
  1227. num_q_vectors, vsi->idx);
  1228. if (vsi->base_vector < 0) {
  1229. dev_err(&pf->pdev->dev,
  1230. "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
  1231. num_q_vectors, vsi->vsi_num, vsi->base_vector);
  1232. return -ENOENT;
  1233. }
  1234. return 0;
  1235. }
  1236. /**
  1237. * ice_vsi_setup - Set up a VSI by a given type
  1238. * @pf: board private structure
  1239. * @type: VSI type
  1240. * @pi: pointer to the port_info instance
  1241. *
  1242. * This allocates the sw VSI structure and its queue resources.
  1243. *
  1244. * Returns pointer to the successfully allocated and configure VSI sw struct on
  1245. * success, otherwise returns NULL on failure.
  1246. */
  1247. static struct ice_vsi *
  1248. ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type,
  1249. struct ice_port_info *pi)
  1250. {
  1251. struct device *dev = &pf->pdev->dev;
  1252. struct ice_vsi_ctx ctxt = { 0 };
  1253. struct ice_vsi *vsi;
  1254. int ret;
  1255. vsi = ice_vsi_alloc(pf, type);
  1256. if (!vsi) {
  1257. dev_err(dev, "could not allocate VSI\n");
  1258. return NULL;
  1259. }
  1260. vsi->port_info = pi;
  1261. vsi->vsw = pf->first_sw;
  1262. if (ice_vsi_get_qs(vsi)) {
  1263. dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
  1264. vsi->idx);
  1265. goto err_get_qs;
  1266. }
  1267. /* create the VSI */
  1268. ret = ice_vsi_add(vsi);
  1269. if (ret)
  1270. goto err_vsi;
  1271. ctxt.vsi_num = vsi->vsi_num;
  1272. switch (vsi->type) {
  1273. case ICE_VSI_PF:
  1274. ret = ice_cfg_netdev(vsi);
  1275. if (ret)
  1276. goto err_cfg_netdev;
  1277. ret = register_netdev(vsi->netdev);
  1278. if (ret)
  1279. goto err_register_netdev;
  1280. netif_carrier_off(vsi->netdev);
  1281. /* make sure transmit queues start off as stopped */
  1282. netif_tx_stop_all_queues(vsi->netdev);
  1283. ret = ice_vsi_alloc_q_vectors(vsi);
  1284. if (ret)
  1285. goto err_msix;
  1286. ret = ice_vsi_setup_vector_base(vsi);
  1287. if (ret)
  1288. goto err_rings;
  1289. ret = ice_vsi_alloc_rings(vsi);
  1290. if (ret)
  1291. goto err_rings;
  1292. ice_vsi_map_rings_to_vectors(vsi);
  1293. break;
  1294. default:
  1295. /* if vsi type is not recognized, clean up the resources and
  1296. * exit
  1297. */
  1298. goto err_rings;
  1299. }
  1300. return vsi;
  1301. err_rings:
  1302. ice_vsi_free_q_vectors(vsi);
  1303. err_msix:
  1304. if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
  1305. unregister_netdev(vsi->netdev);
  1306. err_register_netdev:
  1307. if (vsi->netdev) {
  1308. free_netdev(vsi->netdev);
  1309. vsi->netdev = NULL;
  1310. }
  1311. err_cfg_netdev:
  1312. ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
  1313. if (ret)
  1314. dev_err(&vsi->back->pdev->dev,
  1315. "Free VSI AQ call failed, err %d\n", ret);
  1316. err_vsi:
  1317. ice_vsi_put_qs(vsi);
  1318. err_get_qs:
  1319. pf->q_left_tx += vsi->alloc_txq;
  1320. pf->q_left_rx += vsi->alloc_rxq;
  1321. ice_vsi_clear(vsi);
  1322. return NULL;
  1323. }
  1324. /**
  1325. * ice_setup_pf_sw - Setup the HW switch on startup or after reset
  1326. * @pf: board private structure
  1327. *
  1328. * Returns 0 on success, negative value on failure
  1329. */
  1330. static int ice_setup_pf_sw(struct ice_pf *pf)
  1331. {
  1332. LIST_HEAD(tmp_add_list);
  1333. u8 broadcast[ETH_ALEN];
  1334. struct ice_vsi *vsi;
  1335. int status = 0;
  1336. vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info);
  1337. if (!vsi) {
  1338. status = -ENOMEM;
  1339. goto error_exit;
  1340. }
  1341. /* tmp_add_list contains a list of MAC addresses for which MAC
  1342. * filters need to be programmed. Add the VSI's unicast MAC to
  1343. * this list
  1344. */
  1345. status = ice_add_mac_to_list(vsi, &tmp_add_list,
  1346. vsi->port_info->mac.perm_addr);
  1347. if (status)
  1348. goto error_exit;
  1349. /* VSI needs to receive broadcast traffic, so add the broadcast
  1350. * MAC address to the list.
  1351. */
  1352. eth_broadcast_addr(broadcast);
  1353. status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
  1354. if (status)
  1355. goto error_exit;
  1356. /* program MAC filters for entries in tmp_add_list */
  1357. status = ice_add_mac(&pf->hw, &tmp_add_list);
  1358. if (status) {
  1359. dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
  1360. status = -ENOMEM;
  1361. goto error_exit;
  1362. }
  1363. ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
  1364. return status;
  1365. error_exit:
  1366. ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
  1367. if (vsi) {
  1368. ice_vsi_free_q_vectors(vsi);
  1369. if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
  1370. unregister_netdev(vsi->netdev);
  1371. if (vsi->netdev) {
  1372. free_netdev(vsi->netdev);
  1373. vsi->netdev = NULL;
  1374. }
  1375. ice_vsi_delete(vsi);
  1376. ice_vsi_put_qs(vsi);
  1377. pf->q_left_tx += vsi->alloc_txq;
  1378. pf->q_left_rx += vsi->alloc_rxq;
  1379. ice_vsi_clear(vsi);
  1380. }
  1381. return status;
  1382. }
  1383. /**
  1384. * ice_determine_q_usage - Calculate queue distribution
  1385. * @pf: board private structure
  1386. *
  1387. * Return -ENOMEM if we don't get enough queues for all ports
  1388. */
  1389. static void ice_determine_q_usage(struct ice_pf *pf)
  1390. {
  1391. u16 q_left_tx, q_left_rx;
  1392. q_left_tx = pf->hw.func_caps.common_cap.num_txq;
  1393. q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
  1394. /* initial support for only 1 tx and 1 rx queue */
  1395. pf->num_lan_tx = 1;
  1396. pf->num_lan_rx = 1;
  1397. pf->q_left_tx = q_left_tx - pf->num_lan_tx;
  1398. pf->q_left_rx = q_left_rx - pf->num_lan_rx;
  1399. }
  1400. /**
  1401. * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
  1402. * @pf: board private structure to initialize
  1403. */
  1404. static void ice_deinit_pf(struct ice_pf *pf)
  1405. {
  1406. if (pf->serv_tmr.function)
  1407. del_timer_sync(&pf->serv_tmr);
  1408. if (pf->serv_task.func)
  1409. cancel_work_sync(&pf->serv_task);
  1410. mutex_destroy(&pf->sw_mutex);
  1411. mutex_destroy(&pf->avail_q_mutex);
  1412. }
  1413. /**
  1414. * ice_init_pf - Initialize general software structures (struct ice_pf)
  1415. * @pf: board private structure to initialize
  1416. */
  1417. static void ice_init_pf(struct ice_pf *pf)
  1418. {
  1419. bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
  1420. set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
  1421. mutex_init(&pf->sw_mutex);
  1422. mutex_init(&pf->avail_q_mutex);
  1423. /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
  1424. mutex_lock(&pf->avail_q_mutex);
  1425. bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
  1426. bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
  1427. mutex_unlock(&pf->avail_q_mutex);
  1428. /* setup service timer and periodic service task */
  1429. timer_setup(&pf->serv_tmr, ice_service_timer, 0);
  1430. pf->serv_tmr_period = HZ;
  1431. INIT_WORK(&pf->serv_task, ice_service_task);
  1432. clear_bit(__ICE_SERVICE_SCHED, pf->state);
  1433. }
  1434. /**
  1435. * ice_ena_msix_range - Request a range of MSIX vectors from the OS
  1436. * @pf: board private structure
  1437. *
  1438. * compute the number of MSIX vectors required (v_budget) and request from
  1439. * the OS. Return the number of vectors reserved or negative on failure
  1440. */
  1441. static int ice_ena_msix_range(struct ice_pf *pf)
  1442. {
  1443. int v_left, v_actual, v_budget = 0;
  1444. int needed, err, i;
  1445. v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
  1446. /* reserve one vector for miscellaneous handler */
  1447. needed = 1;
  1448. v_budget += needed;
  1449. v_left -= needed;
  1450. /* reserve vectors for LAN traffic */
  1451. pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
  1452. v_budget += pf->num_lan_msix;
  1453. pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
  1454. sizeof(struct msix_entry), GFP_KERNEL);
  1455. if (!pf->msix_entries) {
  1456. err = -ENOMEM;
  1457. goto exit_err;
  1458. }
  1459. for (i = 0; i < v_budget; i++)
  1460. pf->msix_entries[i].entry = i;
  1461. /* actually reserve the vectors */
  1462. v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
  1463. ICE_MIN_MSIX, v_budget);
  1464. if (v_actual < 0) {
  1465. dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
  1466. err = v_actual;
  1467. goto msix_err;
  1468. }
  1469. if (v_actual < v_budget) {
  1470. dev_warn(&pf->pdev->dev,
  1471. "not enough vectors. requested = %d, obtained = %d\n",
  1472. v_budget, v_actual);
  1473. if (v_actual >= (pf->num_lan_msix + 1)) {
  1474. pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
  1475. } else if (v_actual >= 2) {
  1476. pf->num_lan_msix = 1;
  1477. pf->num_avail_msix = v_actual - 2;
  1478. } else {
  1479. pci_disable_msix(pf->pdev);
  1480. err = -ERANGE;
  1481. goto msix_err;
  1482. }
  1483. }
  1484. return v_actual;
  1485. msix_err:
  1486. devm_kfree(&pf->pdev->dev, pf->msix_entries);
  1487. goto exit_err;
  1488. exit_err:
  1489. pf->num_lan_msix = 0;
  1490. clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
  1491. return err;
  1492. }
  1493. /**
  1494. * ice_dis_msix - Disable MSI-X interrupt setup in OS
  1495. * @pf: board private structure
  1496. */
  1497. static void ice_dis_msix(struct ice_pf *pf)
  1498. {
  1499. pci_disable_msix(pf->pdev);
  1500. devm_kfree(&pf->pdev->dev, pf->msix_entries);
  1501. pf->msix_entries = NULL;
  1502. clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
  1503. }
  1504. /**
  1505. * ice_init_interrupt_scheme - Determine proper interrupt scheme
  1506. * @pf: board private structure to initialize
  1507. */
  1508. static int ice_init_interrupt_scheme(struct ice_pf *pf)
  1509. {
  1510. int vectors = 0;
  1511. ssize_t size;
  1512. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
  1513. vectors = ice_ena_msix_range(pf);
  1514. else
  1515. return -ENODEV;
  1516. if (vectors < 0)
  1517. return vectors;
  1518. /* set up vector assignment tracking */
  1519. size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
  1520. pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
  1521. if (!pf->irq_tracker) {
  1522. ice_dis_msix(pf);
  1523. return -ENOMEM;
  1524. }
  1525. pf->irq_tracker->num_entries = vectors;
  1526. return 0;
  1527. }
  1528. /**
  1529. * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
  1530. * @pf: board private structure
  1531. */
  1532. static void ice_clear_interrupt_scheme(struct ice_pf *pf)
  1533. {
  1534. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
  1535. ice_dis_msix(pf);
  1536. devm_kfree(&pf->pdev->dev, pf->irq_tracker);
  1537. pf->irq_tracker = NULL;
  1538. }
  1539. /**
  1540. * ice_probe - Device initialization routine
  1541. * @pdev: PCI device information struct
  1542. * @ent: entry in ice_pci_tbl
  1543. *
  1544. * Returns 0 on success, negative on failure
  1545. */
  1546. static int ice_probe(struct pci_dev *pdev,
  1547. const struct pci_device_id __always_unused *ent)
  1548. {
  1549. struct ice_pf *pf;
  1550. struct ice_hw *hw;
  1551. int err;
  1552. /* this driver uses devres, see Documentation/driver-model/devres.txt */
  1553. err = pcim_enable_device(pdev);
  1554. if (err)
  1555. return err;
  1556. err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
  1557. if (err) {
  1558. dev_err(&pdev->dev, "I/O map error %d\n", err);
  1559. return err;
  1560. }
  1561. pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
  1562. if (!pf)
  1563. return -ENOMEM;
  1564. /* set up for high or low dma */
  1565. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1566. if (err)
  1567. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  1568. if (err) {
  1569. dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
  1570. return err;
  1571. }
  1572. pci_enable_pcie_error_reporting(pdev);
  1573. pci_set_master(pdev);
  1574. pf->pdev = pdev;
  1575. pci_set_drvdata(pdev, pf);
  1576. set_bit(__ICE_DOWN, pf->state);
  1577. hw = &pf->hw;
  1578. hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
  1579. hw->back = pf;
  1580. hw->vendor_id = pdev->vendor;
  1581. hw->device_id = pdev->device;
  1582. pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
  1583. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  1584. hw->subsystem_device_id = pdev->subsystem_device;
  1585. hw->bus.device = PCI_SLOT(pdev->devfn);
  1586. hw->bus.func = PCI_FUNC(pdev->devfn);
  1587. ice_set_ctrlq_len(hw);
  1588. pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
  1589. #ifndef CONFIG_DYNAMIC_DEBUG
  1590. if (debug < -1)
  1591. hw->debug_mask = debug;
  1592. #endif
  1593. err = ice_init_hw(hw);
  1594. if (err) {
  1595. dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
  1596. err = -EIO;
  1597. goto err_exit_unroll;
  1598. }
  1599. dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
  1600. hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
  1601. hw->api_maj_ver, hw->api_min_ver);
  1602. ice_init_pf(pf);
  1603. ice_determine_q_usage(pf);
  1604. pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
  1605. hw->func_caps.guaranteed_num_vsi);
  1606. if (!pf->num_alloc_vsi) {
  1607. err = -EIO;
  1608. goto err_init_pf_unroll;
  1609. }
  1610. pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
  1611. sizeof(struct ice_vsi *), GFP_KERNEL);
  1612. if (!pf->vsi) {
  1613. err = -ENOMEM;
  1614. goto err_init_pf_unroll;
  1615. }
  1616. err = ice_init_interrupt_scheme(pf);
  1617. if (err) {
  1618. dev_err(&pdev->dev,
  1619. "ice_init_interrupt_scheme failed: %d\n", err);
  1620. err = -EIO;
  1621. goto err_init_interrupt_unroll;
  1622. }
  1623. /* In case of MSIX we are going to setup the misc vector right here
  1624. * to handle admin queue events etc. In case of legacy and MSI
  1625. * the misc functionality and queue processing is combined in
  1626. * the same vector and that gets setup at open.
  1627. */
  1628. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
  1629. err = ice_req_irq_msix_misc(pf);
  1630. if (err) {
  1631. dev_err(&pdev->dev,
  1632. "setup of misc vector failed: %d\n", err);
  1633. goto err_init_interrupt_unroll;
  1634. }
  1635. }
  1636. /* create switch struct for the switch element created by FW on boot */
  1637. pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
  1638. GFP_KERNEL);
  1639. if (!pf->first_sw) {
  1640. err = -ENOMEM;
  1641. goto err_msix_misc_unroll;
  1642. }
  1643. pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
  1644. pf->first_sw->pf = pf;
  1645. /* record the sw_id available for later use */
  1646. pf->first_sw->sw_id = hw->port_info->sw_id;
  1647. err = ice_setup_pf_sw(pf);
  1648. if (err) {
  1649. dev_err(&pdev->dev,
  1650. "probe failed due to setup pf switch:%d\n", err);
  1651. goto err_alloc_sw_unroll;
  1652. }
  1653. /* Driver is mostly up */
  1654. clear_bit(__ICE_DOWN, pf->state);
  1655. /* since everything is good, start the service timer */
  1656. mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
  1657. return 0;
  1658. err_alloc_sw_unroll:
  1659. set_bit(__ICE_DOWN, pf->state);
  1660. devm_kfree(&pf->pdev->dev, pf->first_sw);
  1661. err_msix_misc_unroll:
  1662. ice_free_irq_msix_misc(pf);
  1663. err_init_interrupt_unroll:
  1664. ice_clear_interrupt_scheme(pf);
  1665. devm_kfree(&pdev->dev, pf->vsi);
  1666. err_init_pf_unroll:
  1667. ice_deinit_pf(pf);
  1668. ice_deinit_hw(hw);
  1669. err_exit_unroll:
  1670. pci_disable_pcie_error_reporting(pdev);
  1671. return err;
  1672. }
  1673. /**
  1674. * ice_remove - Device removal routine
  1675. * @pdev: PCI device information struct
  1676. */
  1677. static void ice_remove(struct pci_dev *pdev)
  1678. {
  1679. struct ice_pf *pf = pci_get_drvdata(pdev);
  1680. int i = 0;
  1681. int err;
  1682. if (!pf)
  1683. return;
  1684. set_bit(__ICE_DOWN, pf->state);
  1685. for (i = 0; i < pf->num_alloc_vsi; i++) {
  1686. if (!pf->vsi[i])
  1687. continue;
  1688. err = ice_vsi_release(pf->vsi[i]);
  1689. if (err)
  1690. dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n",
  1691. i, err);
  1692. }
  1693. ice_free_irq_msix_misc(pf);
  1694. ice_clear_interrupt_scheme(pf);
  1695. ice_deinit_pf(pf);
  1696. ice_deinit_hw(&pf->hw);
  1697. pci_disable_pcie_error_reporting(pdev);
  1698. }
  1699. /* ice_pci_tbl - PCI Device ID Table
  1700. *
  1701. * Wildcard entries (PCI_ANY_ID) should come last
  1702. * Last entry must be all 0s
  1703. *
  1704. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  1705. * Class, Class Mask, private data (not used) }
  1706. */
  1707. static const struct pci_device_id ice_pci_tbl[] = {
  1708. { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
  1709. { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
  1710. { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
  1711. { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 },
  1712. { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 },
  1713. /* required last entry */
  1714. { 0, }
  1715. };
  1716. MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
  1717. static struct pci_driver ice_driver = {
  1718. .name = KBUILD_MODNAME,
  1719. .id_table = ice_pci_tbl,
  1720. .probe = ice_probe,
  1721. .remove = ice_remove,
  1722. };
  1723. /**
  1724. * ice_module_init - Driver registration routine
  1725. *
  1726. * ice_module_init is the first routine called when the driver is
  1727. * loaded. All it does is register with the PCI subsystem.
  1728. */
  1729. static int __init ice_module_init(void)
  1730. {
  1731. int status;
  1732. pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
  1733. pr_info("%s\n", ice_copyright);
  1734. ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME);
  1735. if (!ice_wq) {
  1736. pr_err("Failed to create workqueue\n");
  1737. return -ENOMEM;
  1738. }
  1739. status = pci_register_driver(&ice_driver);
  1740. if (status) {
  1741. pr_err("failed to register pci driver, err %d\n", status);
  1742. destroy_workqueue(ice_wq);
  1743. }
  1744. return status;
  1745. }
  1746. module_init(ice_module_init);
  1747. /**
  1748. * ice_module_exit - Driver exit cleanup routine
  1749. *
  1750. * ice_module_exit is called just before the driver is removed
  1751. * from memory.
  1752. */
  1753. static void __exit ice_module_exit(void)
  1754. {
  1755. pci_unregister_driver(&ice_driver);
  1756. destroy_workqueue(ice_wq);
  1757. pr_info("module unloaded\n");
  1758. }
  1759. module_exit(ice_module_exit);
  1760. /**
  1761. * ice_vsi_release - Delete a VSI and free its resources
  1762. * @vsi: the VSI being removed
  1763. *
  1764. * Returns 0 on success or < 0 on error
  1765. */
  1766. static int ice_vsi_release(struct ice_vsi *vsi)
  1767. {
  1768. struct ice_pf *pf;
  1769. if (!vsi->back)
  1770. return -ENODEV;
  1771. pf = vsi->back;
  1772. if (vsi->netdev) {
  1773. unregister_netdev(vsi->netdev);
  1774. free_netdev(vsi->netdev);
  1775. vsi->netdev = NULL;
  1776. }
  1777. /* reclaim interrupt vectors back to PF */
  1778. ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
  1779. pf->num_avail_msix += vsi->num_q_vectors;
  1780. ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
  1781. ice_vsi_delete(vsi);
  1782. ice_vsi_free_q_vectors(vsi);
  1783. ice_vsi_clear_rings(vsi);
  1784. ice_vsi_put_qs(vsi);
  1785. pf->q_left_tx += vsi->alloc_txq;
  1786. pf->q_left_rx += vsi->alloc_rxq;
  1787. ice_vsi_clear(vsi);
  1788. return 0;
  1789. }