i40iw_main.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/ip.h>
  39. #include <linux/tcp.h>
  40. #include <linux/if_vlan.h>
  41. #include <net/addrconf.h>
  42. #include "i40iw.h"
  43. #include "i40iw_register.h"
  44. #include <net/netevent.h>
  45. #define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
  46. #define CLIENT_IW_INTERFACE_VERSION_MINOR 01
  47. #define CLIENT_IW_INTERFACE_VERSION_BUILD 00
  48. #define DRV_VERSION_MAJOR 0
  49. #define DRV_VERSION_MINOR 5
  50. #define DRV_VERSION_BUILD 123
  51. #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
  52. __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
  53. static int push_mode;
  54. module_param(push_mode, int, 0644);
  55. MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
  56. static int debug;
  57. module_param(debug, int, 0644);
  58. MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
  59. static int resource_profile;
  60. module_param(resource_profile, int, 0644);
  61. MODULE_PARM_DESC(resource_profile,
  62. "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
  63. static int max_rdma_vfs = 32;
  64. module_param(max_rdma_vfs, int, 0644);
  65. MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
  66. static int mpa_version = 2;
  67. module_param(mpa_version, int, 0644);
  68. MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
  69. MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
  70. MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
  71. MODULE_LICENSE("Dual BSD/GPL");
  72. MODULE_VERSION(DRV_VERSION);
  73. static struct i40e_client i40iw_client;
  74. static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
  75. static LIST_HEAD(i40iw_handlers);
  76. static spinlock_t i40iw_handler_lock;
  77. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  78. u32 vf_id, u8 *msg, u16 len);
  79. static struct notifier_block i40iw_inetaddr_notifier = {
  80. .notifier_call = i40iw_inetaddr_event
  81. };
  82. static struct notifier_block i40iw_inetaddr6_notifier = {
  83. .notifier_call = i40iw_inet6addr_event
  84. };
  85. static struct notifier_block i40iw_net_notifier = {
  86. .notifier_call = i40iw_net_event
  87. };
  88. static atomic_t i40iw_notifiers_registered;
  89. /**
  90. * i40iw_find_i40e_handler - find a handler given a client info
  91. * @ldev: pointer to a client info
  92. */
  93. static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
  94. {
  95. struct i40iw_handler *hdl;
  96. unsigned long flags;
  97. spin_lock_irqsave(&i40iw_handler_lock, flags);
  98. list_for_each_entry(hdl, &i40iw_handlers, list) {
  99. if (hdl->ldev.netdev == ldev->netdev) {
  100. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  101. return hdl;
  102. }
  103. }
  104. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  105. return NULL;
  106. }
  107. /**
  108. * i40iw_find_netdev - find a handler given a netdev
  109. * @netdev: pointer to net_device
  110. */
  111. struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
  112. {
  113. struct i40iw_handler *hdl;
  114. unsigned long flags;
  115. spin_lock_irqsave(&i40iw_handler_lock, flags);
  116. list_for_each_entry(hdl, &i40iw_handlers, list) {
  117. if (hdl->ldev.netdev == netdev) {
  118. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  119. return hdl;
  120. }
  121. }
  122. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  123. return NULL;
  124. }
  125. /**
  126. * i40iw_add_handler - add a handler to the list
  127. * @hdl: handler to be added to the handler list
  128. */
  129. static void i40iw_add_handler(struct i40iw_handler *hdl)
  130. {
  131. unsigned long flags;
  132. spin_lock_irqsave(&i40iw_handler_lock, flags);
  133. list_add(&hdl->list, &i40iw_handlers);
  134. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  135. }
  136. /**
  137. * i40iw_del_handler - delete a handler from the list
  138. * @hdl: handler to be deleted from the handler list
  139. */
  140. static int i40iw_del_handler(struct i40iw_handler *hdl)
  141. {
  142. unsigned long flags;
  143. spin_lock_irqsave(&i40iw_handler_lock, flags);
  144. list_del(&hdl->list);
  145. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  146. return 0;
  147. }
  148. /**
  149. * i40iw_enable_intr - set up device interrupts
  150. * @dev: hardware control device structure
  151. * @msix_id: id of the interrupt to be enabled
  152. */
  153. static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
  154. {
  155. u32 val;
  156. val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
  157. I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
  158. (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
  159. if (dev->is_pf)
  160. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
  161. else
  162. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
  163. }
  164. /**
  165. * i40iw_dpc - tasklet for aeq and ceq 0
  166. * @data: iwarp device
  167. */
  168. static void i40iw_dpc(unsigned long data)
  169. {
  170. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  171. if (iwdev->msix_shared)
  172. i40iw_process_ceq(iwdev, iwdev->ceqlist);
  173. i40iw_process_aeq(iwdev);
  174. i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
  175. }
  176. /**
  177. * i40iw_ceq_dpc - dpc handler for CEQ
  178. * @data: data points to CEQ
  179. */
  180. static void i40iw_ceq_dpc(unsigned long data)
  181. {
  182. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  183. struct i40iw_device *iwdev = iwceq->iwdev;
  184. i40iw_process_ceq(iwdev, iwceq);
  185. i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
  186. }
  187. /**
  188. * i40iw_irq_handler - interrupt handler for aeq and ceq0
  189. * @irq: Interrupt request number
  190. * @data: iwarp device
  191. */
  192. static irqreturn_t i40iw_irq_handler(int irq, void *data)
  193. {
  194. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  195. tasklet_schedule(&iwdev->dpc_tasklet);
  196. return IRQ_HANDLED;
  197. }
  198. /**
  199. * i40iw_destroy_cqp - destroy control qp
  200. * @iwdev: iwarp device
  201. * @create_done: 1 if cqp create poll was success
  202. *
  203. * Issue destroy cqp request and
  204. * free the resources associated with the cqp
  205. */
  206. static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
  207. {
  208. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  209. struct i40iw_cqp *cqp = &iwdev->cqp;
  210. if (free_hwcqp)
  211. dev->cqp_ops->cqp_destroy(dev->cqp);
  212. i40iw_free_dma_mem(dev->hw, &cqp->sq);
  213. kfree(cqp->scratch_array);
  214. iwdev->cqp.scratch_array = NULL;
  215. kfree(cqp->cqp_requests);
  216. cqp->cqp_requests = NULL;
  217. }
  218. /**
  219. * i40iw_disable_irqs - disable device interrupts
  220. * @dev: hardware control device structure
  221. * @msic_vec: msix vector to disable irq
  222. * @dev_id: parameter to pass to free_irq (used during irq setup)
  223. *
  224. * The function is called when destroying aeq/ceq
  225. */
  226. static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
  227. struct i40iw_msix_vector *msix_vec,
  228. void *dev_id)
  229. {
  230. if (dev->is_pf)
  231. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
  232. else
  233. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
  234. irq_set_affinity_hint(msix_vec->irq, NULL);
  235. free_irq(msix_vec->irq, dev_id);
  236. }
  237. /**
  238. * i40iw_destroy_aeq - destroy aeq
  239. * @iwdev: iwarp device
  240. * @reset: true if called before reset
  241. *
  242. * Issue a destroy aeq request and
  243. * free the resources associated with the aeq
  244. * The function is called during driver unload
  245. */
  246. static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
  247. {
  248. enum i40iw_status_code status = I40IW_ERR_NOT_READY;
  249. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  250. struct i40iw_aeq *aeq = &iwdev->aeq;
  251. if (!iwdev->msix_shared)
  252. i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
  253. if (reset)
  254. goto exit;
  255. if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
  256. status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
  257. if (status)
  258. i40iw_pr_err("destroy aeq failed %d\n", status);
  259. exit:
  260. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  261. }
  262. /**
  263. * i40iw_destroy_ceq - destroy ceq
  264. * @iwdev: iwarp device
  265. * @iwceq: ceq to be destroyed
  266. * @reset: true if called before reset
  267. *
  268. * Issue a destroy ceq request and
  269. * free the resources associated with the ceq
  270. */
  271. static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
  272. struct i40iw_ceq *iwceq,
  273. bool reset)
  274. {
  275. enum i40iw_status_code status;
  276. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  277. if (reset)
  278. goto exit;
  279. status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
  280. if (status) {
  281. i40iw_pr_err("ceq destroy command failed %d\n", status);
  282. goto exit;
  283. }
  284. status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
  285. if (status)
  286. i40iw_pr_err("ceq destroy completion failed %d\n", status);
  287. exit:
  288. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  289. }
  290. /**
  291. * i40iw_dele_ceqs - destroy all ceq's
  292. * @iwdev: iwarp device
  293. * @reset: true if called before reset
  294. *
  295. * Go through all of the device ceq's and for each ceq
  296. * disable the ceq interrupt and destroy the ceq
  297. */
  298. static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
  299. {
  300. u32 i = 0;
  301. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  302. struct i40iw_ceq *iwceq = iwdev->ceqlist;
  303. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  304. if (iwdev->msix_shared) {
  305. i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
  306. i40iw_destroy_ceq(iwdev, iwceq, reset);
  307. iwceq++;
  308. i++;
  309. }
  310. for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
  311. i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
  312. i40iw_destroy_ceq(iwdev, iwceq, reset);
  313. }
  314. }
  315. /**
  316. * i40iw_destroy_ccq - destroy control cq
  317. * @iwdev: iwarp device
  318. * @reset: true if called before reset
  319. *
  320. * Issue destroy ccq request and
  321. * free the resources associated with the ccq
  322. */
  323. static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
  324. {
  325. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  326. struct i40iw_ccq *ccq = &iwdev->ccq;
  327. enum i40iw_status_code status = 0;
  328. if (!reset)
  329. status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
  330. if (status)
  331. i40iw_pr_err("ccq destroy failed %d\n", status);
  332. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  333. }
  334. /* types of hmc objects */
  335. static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
  336. I40IW_HMC_IW_QP,
  337. I40IW_HMC_IW_CQ,
  338. I40IW_HMC_IW_HTE,
  339. I40IW_HMC_IW_ARP,
  340. I40IW_HMC_IW_APBVT_ENTRY,
  341. I40IW_HMC_IW_MR,
  342. I40IW_HMC_IW_XF,
  343. I40IW_HMC_IW_XFFL,
  344. I40IW_HMC_IW_Q1,
  345. I40IW_HMC_IW_Q1FL,
  346. I40IW_HMC_IW_TIMER,
  347. };
  348. /**
  349. * i40iw_close_hmc_objects_type - delete hmc objects of a given type
  350. * @iwdev: iwarp device
  351. * @obj_type: the hmc object type to be deleted
  352. * @is_pf: true if the function is PF otherwise false
  353. * @reset: true if called before reset
  354. */
  355. static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
  356. enum i40iw_hmc_rsrc_type obj_type,
  357. struct i40iw_hmc_info *hmc_info,
  358. bool is_pf,
  359. bool reset)
  360. {
  361. struct i40iw_hmc_del_obj_info info;
  362. memset(&info, 0, sizeof(info));
  363. info.hmc_info = hmc_info;
  364. info.rsrc_type = obj_type;
  365. info.count = hmc_info->hmc_obj[obj_type].cnt;
  366. info.is_pf = is_pf;
  367. if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
  368. i40iw_pr_err("del obj of type %d failed\n", obj_type);
  369. }
  370. /**
  371. * i40iw_del_hmc_objects - remove all device hmc objects
  372. * @dev: iwarp device
  373. * @hmc_info: hmc_info to free
  374. * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
  375. * by PF on behalf of VF
  376. * @reset: true if called before reset
  377. */
  378. static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
  379. struct i40iw_hmc_info *hmc_info,
  380. bool is_pf,
  381. bool reset)
  382. {
  383. unsigned int i;
  384. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
  385. i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
  386. }
  387. /**
  388. * i40iw_ceq_handler - interrupt handler for ceq
  389. * @data: ceq pointer
  390. */
  391. static irqreturn_t i40iw_ceq_handler(int irq, void *data)
  392. {
  393. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  394. if (iwceq->irq != irq)
  395. i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
  396. tasklet_schedule(&iwceq->dpc_tasklet);
  397. return IRQ_HANDLED;
  398. }
  399. /**
  400. * i40iw_create_hmc_obj_type - create hmc object of a given type
  401. * @dev: hardware control device structure
  402. * @info: information for the hmc object to create
  403. */
  404. static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
  405. struct i40iw_hmc_create_obj_info *info)
  406. {
  407. return dev->hmc_ops->create_hmc_object(dev, info);
  408. }
  409. /**
  410. * i40iw_create_hmc_objs - create all hmc objects for the device
  411. * @iwdev: iwarp device
  412. * @is_pf: true if the function is PF otherwise false
  413. *
  414. * Create the device hmc objects and allocate hmc pages
  415. * Return 0 if successful, otherwise clean up and return error
  416. */
  417. static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
  418. bool is_pf)
  419. {
  420. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  421. struct i40iw_hmc_create_obj_info info;
  422. enum i40iw_status_code status;
  423. int i;
  424. memset(&info, 0, sizeof(info));
  425. info.hmc_info = dev->hmc_info;
  426. info.is_pf = is_pf;
  427. info.entry_type = iwdev->sd_type;
  428. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
  429. info.rsrc_type = iw_hmc_obj_types[i];
  430. info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
  431. status = i40iw_create_hmc_obj_type(dev, &info);
  432. if (status) {
  433. i40iw_pr_err("create obj type %d status = %d\n",
  434. iw_hmc_obj_types[i], status);
  435. break;
  436. }
  437. }
  438. if (!status)
  439. return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
  440. dev->hmc_fn_id,
  441. true, true));
  442. while (i) {
  443. i--;
  444. /* destroy the hmc objects of a given type */
  445. i40iw_close_hmc_objects_type(dev,
  446. iw_hmc_obj_types[i],
  447. dev->hmc_info,
  448. is_pf,
  449. false);
  450. }
  451. return status;
  452. }
  453. /**
  454. * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
  455. * @iwdev: iwarp device
  456. * @memptr: points to the memory addresses
  457. * @size: size of memory needed
  458. * @mask: mask for the aligned memory
  459. *
  460. * Get aligned memory of the requested size and
  461. * update the memptr to point to the new aligned memory
  462. * Return 0 if successful, otherwise return no memory error
  463. */
  464. enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
  465. struct i40iw_dma_mem *memptr,
  466. u32 size,
  467. u32 mask)
  468. {
  469. unsigned long va, newva;
  470. unsigned long extra;
  471. va = (unsigned long)iwdev->obj_next.va;
  472. newva = va;
  473. if (mask)
  474. newva = ALIGN(va, (mask + 1));
  475. extra = newva - va;
  476. memptr->va = (u8 *)va + extra;
  477. memptr->pa = iwdev->obj_next.pa + extra;
  478. memptr->size = size;
  479. if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
  480. return I40IW_ERR_NO_MEMORY;
  481. iwdev->obj_next.va = memptr->va + size;
  482. iwdev->obj_next.pa = memptr->pa + size;
  483. return 0;
  484. }
  485. /**
  486. * i40iw_create_cqp - create control qp
  487. * @iwdev: iwarp device
  488. *
  489. * Return 0, if the cqp and all the resources associated with it
  490. * are successfully created, otherwise return error
  491. */
  492. static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
  493. {
  494. enum i40iw_status_code status;
  495. u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
  496. struct i40iw_dma_mem mem;
  497. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  498. struct i40iw_cqp_init_info cqp_init_info;
  499. struct i40iw_cqp *cqp = &iwdev->cqp;
  500. u16 maj_err, min_err;
  501. int i;
  502. cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
  503. if (!cqp->cqp_requests)
  504. return I40IW_ERR_NO_MEMORY;
  505. cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
  506. if (!cqp->scratch_array) {
  507. kfree(cqp->cqp_requests);
  508. return I40IW_ERR_NO_MEMORY;
  509. }
  510. dev->cqp = &cqp->sc_cqp;
  511. dev->cqp->dev = dev;
  512. memset(&cqp_init_info, 0, sizeof(cqp_init_info));
  513. status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
  514. (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
  515. I40IW_CQP_ALIGNMENT);
  516. if (status)
  517. goto exit;
  518. status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
  519. I40IW_HOST_CTX_ALIGNMENT_MASK);
  520. if (status)
  521. goto exit;
  522. dev->cqp->host_ctx_pa = mem.pa;
  523. dev->cqp->host_ctx = mem.va;
  524. /* populate the cqp init info */
  525. cqp_init_info.dev = dev;
  526. cqp_init_info.sq_size = sqsize;
  527. cqp_init_info.sq = cqp->sq.va;
  528. cqp_init_info.sq_pa = cqp->sq.pa;
  529. cqp_init_info.host_ctx_pa = mem.pa;
  530. cqp_init_info.host_ctx = mem.va;
  531. cqp_init_info.hmc_profile = iwdev->resource_profile;
  532. cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
  533. cqp_init_info.scratch_array = cqp->scratch_array;
  534. status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
  535. if (status) {
  536. i40iw_pr_err("cqp init status %d\n", status);
  537. goto exit;
  538. }
  539. status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
  540. if (status) {
  541. i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
  542. status, maj_err, min_err);
  543. goto exit;
  544. }
  545. spin_lock_init(&cqp->req_lock);
  546. INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
  547. INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
  548. /* init the waitq of the cqp_requests and add them to the list */
  549. for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
  550. init_waitqueue_head(&cqp->cqp_requests[i].waitq);
  551. list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
  552. }
  553. return 0;
  554. exit:
  555. /* clean up the created resources */
  556. i40iw_destroy_cqp(iwdev, false);
  557. return status;
  558. }
  559. /**
  560. * i40iw_create_ccq - create control cq
  561. * @iwdev: iwarp device
  562. *
  563. * Return 0, if the ccq and the resources associated with it
  564. * are successfully created, otherwise return error
  565. */
  566. static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
  567. {
  568. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  569. struct i40iw_dma_mem mem;
  570. enum i40iw_status_code status;
  571. struct i40iw_ccq_init_info info;
  572. struct i40iw_ccq *ccq = &iwdev->ccq;
  573. memset(&info, 0, sizeof(info));
  574. dev->ccq = &ccq->sc_cq;
  575. dev->ccq->dev = dev;
  576. info.dev = dev;
  577. ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
  578. ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
  579. status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
  580. ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
  581. if (status)
  582. goto exit;
  583. status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
  584. I40IW_SHADOWAREA_MASK);
  585. if (status)
  586. goto exit;
  587. ccq->sc_cq.back_cq = (void *)ccq;
  588. /* populate the ccq init info */
  589. info.cq_base = ccq->mem_cq.va;
  590. info.cq_pa = ccq->mem_cq.pa;
  591. info.num_elem = IW_CCQ_SIZE;
  592. info.shadow_area = mem.va;
  593. info.shadow_area_pa = mem.pa;
  594. info.ceqe_mask = false;
  595. info.ceq_id_valid = true;
  596. info.shadow_read_threshold = 16;
  597. status = dev->ccq_ops->ccq_init(dev->ccq, &info);
  598. if (!status)
  599. status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
  600. exit:
  601. if (status)
  602. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  603. return status;
  604. }
  605. /**
  606. * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
  607. * @iwdev: iwarp device
  608. * @msix_vec: interrupt vector information
  609. * @iwceq: ceq associated with the vector
  610. * @ceq_id: the id number of the iwceq
  611. *
  612. * Allocate interrupt resources and enable irq handling
  613. * Return 0 if successful, otherwise return error
  614. */
  615. static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
  616. struct i40iw_ceq *iwceq,
  617. u32 ceq_id,
  618. struct i40iw_msix_vector *msix_vec)
  619. {
  620. enum i40iw_status_code status;
  621. cpumask_t mask;
  622. if (iwdev->msix_shared && !ceq_id) {
  623. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  624. status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
  625. } else {
  626. tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
  627. status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
  628. }
  629. cpumask_clear(&mask);
  630. cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
  631. irq_set_affinity_hint(msix_vec->irq, &mask);
  632. if (status) {
  633. i40iw_pr_err("ceq irq config fail\n");
  634. return I40IW_ERR_CONFIG;
  635. }
  636. msix_vec->ceq_id = ceq_id;
  637. return 0;
  638. }
  639. /**
  640. * i40iw_create_ceq - create completion event queue
  641. * @iwdev: iwarp device
  642. * @iwceq: pointer to the ceq resources to be created
  643. * @ceq_id: the id number of the iwceq
  644. *
  645. * Return 0, if the ceq and the resources associated with it
  646. * are successfully created, otherwise return error
  647. */
  648. static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
  649. struct i40iw_ceq *iwceq,
  650. u32 ceq_id)
  651. {
  652. enum i40iw_status_code status;
  653. struct i40iw_ceq_init_info info;
  654. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  655. u64 scratch;
  656. memset(&info, 0, sizeof(info));
  657. info.ceq_id = ceq_id;
  658. iwceq->iwdev = iwdev;
  659. iwceq->mem.size = sizeof(struct i40iw_ceqe) *
  660. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  661. status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
  662. I40IW_CEQ_ALIGNMENT);
  663. if (status)
  664. goto exit;
  665. info.ceq_id = ceq_id;
  666. info.ceqe_base = iwceq->mem.va;
  667. info.ceqe_pa = iwceq->mem.pa;
  668. info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  669. iwceq->sc_ceq.ceq_id = ceq_id;
  670. info.dev = dev;
  671. scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
  672. status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
  673. if (!status)
  674. status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
  675. exit:
  676. if (status)
  677. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  678. return status;
  679. }
  680. void i40iw_request_reset(struct i40iw_device *iwdev)
  681. {
  682. struct i40e_info *ldev = iwdev->ldev;
  683. ldev->ops->request_reset(ldev, iwdev->client, 1);
  684. }
  685. /**
  686. * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
  687. * @iwdev: iwarp device
  688. * @ldev: i40e lan device
  689. *
  690. * Allocate a list for all device completion event queues
  691. * Create the ceq's and configure their msix interrupt vectors
  692. * Return 0, if at least one ceq is successfully set up, otherwise return error
  693. */
  694. static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
  695. struct i40e_info *ldev)
  696. {
  697. u32 i;
  698. u32 ceq_id;
  699. struct i40iw_ceq *iwceq;
  700. struct i40iw_msix_vector *msix_vec;
  701. enum i40iw_status_code status = 0;
  702. u32 num_ceqs;
  703. if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
  704. status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
  705. iwdev->iw_qvlist);
  706. if (status)
  707. goto exit;
  708. } else {
  709. status = I40IW_ERR_BAD_PTR;
  710. goto exit;
  711. }
  712. num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
  713. iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
  714. if (!iwdev->ceqlist) {
  715. status = I40IW_ERR_NO_MEMORY;
  716. goto exit;
  717. }
  718. i = (iwdev->msix_shared) ? 0 : 1;
  719. for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
  720. iwceq = &iwdev->ceqlist[ceq_id];
  721. status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
  722. if (status) {
  723. i40iw_pr_err("create ceq status = %d\n", status);
  724. break;
  725. }
  726. msix_vec = &iwdev->iw_msixtbl[i];
  727. iwceq->irq = msix_vec->irq;
  728. iwceq->msix_idx = msix_vec->idx;
  729. status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
  730. if (status) {
  731. i40iw_destroy_ceq(iwdev, iwceq, false);
  732. break;
  733. }
  734. i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
  735. iwdev->ceqs_count++;
  736. }
  737. exit:
  738. if (status) {
  739. if (!iwdev->ceqs_count) {
  740. kfree(iwdev->ceqlist);
  741. iwdev->ceqlist = NULL;
  742. } else {
  743. status = 0;
  744. }
  745. }
  746. return status;
  747. }
  748. /**
  749. * i40iw_configure_aeq_vector - set up the msix vector for aeq
  750. * @iwdev: iwarp device
  751. *
  752. * Allocate interrupt resources and enable irq handling
  753. * Return 0 if successful, otherwise return error
  754. */
  755. static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
  756. {
  757. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  758. u32 ret = 0;
  759. if (!iwdev->msix_shared) {
  760. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  761. ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
  762. }
  763. if (ret) {
  764. i40iw_pr_err("aeq irq config fail\n");
  765. return I40IW_ERR_CONFIG;
  766. }
  767. return 0;
  768. }
  769. /**
  770. * i40iw_create_aeq - create async event queue
  771. * @iwdev: iwarp device
  772. *
  773. * Return 0, if the aeq and the resources associated with it
  774. * are successfully created, otherwise return error
  775. */
  776. static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
  777. {
  778. enum i40iw_status_code status;
  779. struct i40iw_aeq_init_info info;
  780. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  781. struct i40iw_aeq *aeq = &iwdev->aeq;
  782. u64 scratch = 0;
  783. u32 aeq_size;
  784. aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
  785. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  786. memset(&info, 0, sizeof(info));
  787. aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
  788. status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
  789. I40IW_AEQ_ALIGNMENT);
  790. if (status)
  791. goto exit;
  792. info.aeqe_base = aeq->mem.va;
  793. info.aeq_elem_pa = aeq->mem.pa;
  794. info.elem_cnt = aeq_size;
  795. info.dev = dev;
  796. status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
  797. if (status)
  798. goto exit;
  799. status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
  800. if (!status)
  801. status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
  802. exit:
  803. if (status)
  804. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  805. return status;
  806. }
  807. /**
  808. * i40iw_setup_aeq - set up the device aeq
  809. * @iwdev: iwarp device
  810. *
  811. * Create the aeq and configure its msix interrupt vector
  812. * Return 0 if successful, otherwise return error
  813. */
  814. static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
  815. {
  816. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  817. enum i40iw_status_code status;
  818. status = i40iw_create_aeq(iwdev);
  819. if (status)
  820. return status;
  821. status = i40iw_configure_aeq_vector(iwdev);
  822. if (status) {
  823. i40iw_destroy_aeq(iwdev, false);
  824. return status;
  825. }
  826. if (!iwdev->msix_shared)
  827. i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
  828. return 0;
  829. }
  830. /**
  831. * i40iw_initialize_ilq - create iwarp local queue for cm
  832. * @iwdev: iwarp device
  833. *
  834. * Return 0 if successful, otherwise return error
  835. */
  836. static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
  837. {
  838. struct i40iw_puda_rsrc_info info;
  839. enum i40iw_status_code status;
  840. memset(&info, 0, sizeof(info));
  841. info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
  842. info.cq_id = 1;
  843. info.qp_id = 0;
  844. info.count = 1;
  845. info.pd_id = 1;
  846. info.sq_size = 8192;
  847. info.rq_size = 8192;
  848. info.buf_size = 1024;
  849. info.tx_buf_cnt = 16384;
  850. info.receive = i40iw_receive_ilq;
  851. info.xmit_complete = i40iw_free_sqbuf;
  852. status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
  853. if (status)
  854. i40iw_pr_err("ilq create fail\n");
  855. return status;
  856. }
  857. /**
  858. * i40iw_initialize_ieq - create iwarp exception queue
  859. * @iwdev: iwarp device
  860. *
  861. * Return 0 if successful, otherwise return error
  862. */
  863. static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
  864. {
  865. struct i40iw_puda_rsrc_info info;
  866. enum i40iw_status_code status;
  867. memset(&info, 0, sizeof(info));
  868. info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
  869. info.cq_id = 2;
  870. info.qp_id = iwdev->sc_dev.exception_lan_queue;
  871. info.count = 1;
  872. info.pd_id = 2;
  873. info.sq_size = 8192;
  874. info.rq_size = 8192;
  875. info.buf_size = 2048;
  876. info.tx_buf_cnt = 16384;
  877. status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
  878. if (status)
  879. i40iw_pr_err("ieq create fail\n");
  880. return status;
  881. }
  882. /**
  883. * i40iw_hmc_setup - create hmc objects for the device
  884. * @iwdev: iwarp device
  885. *
  886. * Set up the device private memory space for the number and size of
  887. * the hmc objects and create the objects
  888. * Return 0 if successful, otherwise return error
  889. */
  890. static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
  891. {
  892. enum i40iw_status_code status;
  893. iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
  894. status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
  895. if (status)
  896. goto exit;
  897. status = i40iw_create_hmc_objs(iwdev, true);
  898. if (status)
  899. goto exit;
  900. iwdev->init_state = HMC_OBJS_CREATED;
  901. exit:
  902. return status;
  903. }
  904. /**
  905. * i40iw_del_init_mem - deallocate memory resources
  906. * @iwdev: iwarp device
  907. */
  908. static void i40iw_del_init_mem(struct i40iw_device *iwdev)
  909. {
  910. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  911. i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
  912. kfree(dev->hmc_info->sd_table.sd_entry);
  913. dev->hmc_info->sd_table.sd_entry = NULL;
  914. kfree(iwdev->mem_resources);
  915. iwdev->mem_resources = NULL;
  916. kfree(iwdev->ceqlist);
  917. iwdev->ceqlist = NULL;
  918. kfree(iwdev->iw_msixtbl);
  919. iwdev->iw_msixtbl = NULL;
  920. kfree(iwdev->hmc_info_mem);
  921. iwdev->hmc_info_mem = NULL;
  922. }
  923. /**
  924. * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
  925. * @iwdev: iwarp device
  926. * @idx: the index of the mac ip address to delete
  927. */
  928. static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
  929. {
  930. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  931. struct i40iw_cqp_request *cqp_request;
  932. struct cqp_commands_info *cqp_info;
  933. enum i40iw_status_code status = 0;
  934. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  935. if (!cqp_request) {
  936. i40iw_pr_err("cqp_request memory failed\n");
  937. return;
  938. }
  939. cqp_info = &cqp_request->info;
  940. cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
  941. cqp_info->post_sq = 1;
  942. cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  943. cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  944. cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
  945. cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
  946. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  947. if (status)
  948. i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
  949. }
  950. /**
  951. * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
  952. * @iwdev: iwarp device
  953. * @mac_addr: pointer to mac address
  954. * @idx: the index of the mac ip address to add
  955. */
  956. static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
  957. u8 *mac_addr,
  958. u8 idx)
  959. {
  960. struct i40iw_local_mac_ipaddr_entry_info *info;
  961. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  962. struct i40iw_cqp_request *cqp_request;
  963. struct cqp_commands_info *cqp_info;
  964. enum i40iw_status_code status = 0;
  965. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  966. if (!cqp_request) {
  967. i40iw_pr_err("cqp_request memory failed\n");
  968. return I40IW_ERR_NO_MEMORY;
  969. }
  970. cqp_info = &cqp_request->info;
  971. cqp_info->post_sq = 1;
  972. info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
  973. ether_addr_copy(info->mac_addr, mac_addr);
  974. info->entry_idx = idx;
  975. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  976. cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
  977. cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  978. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  979. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  980. if (status)
  981. i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
  982. return status;
  983. }
  984. /**
  985. * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
  986. * @iwdev: iwarp device
  987. * @mac_ip_tbl_idx: the index of the new mac ip address
  988. *
  989. * Allocate a mac ip address entry and update the mac_ip_tbl_idx
  990. * to hold the index of the newly created mac ip address
  991. * Return 0 if successful, otherwise return error
  992. */
  993. static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
  994. u16 *mac_ip_tbl_idx)
  995. {
  996. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  997. struct i40iw_cqp_request *cqp_request;
  998. struct cqp_commands_info *cqp_info;
  999. enum i40iw_status_code status = 0;
  1000. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  1001. if (!cqp_request) {
  1002. i40iw_pr_err("cqp_request memory failed\n");
  1003. return I40IW_ERR_NO_MEMORY;
  1004. }
  1005. /* increment refcount, because we need the cqp request ret value */
  1006. atomic_inc(&cqp_request->refcount);
  1007. cqp_info = &cqp_request->info;
  1008. cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
  1009. cqp_info->post_sq = 1;
  1010. cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  1011. cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  1012. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  1013. if (!status)
  1014. *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
  1015. else
  1016. i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
  1017. /* decrement refcount and free the cqp request, if no longer used */
  1018. i40iw_put_cqp_request(iwcqp, cqp_request);
  1019. return status;
  1020. }
  1021. /**
  1022. * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
  1023. * @iwdev: iwarp device
  1024. * @macaddr: pointer to mac address
  1025. *
  1026. * Allocate a mac ip address entry and add it to the hw table
  1027. * Return 0 if successful, otherwise return error
  1028. */
  1029. static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
  1030. u8 *macaddr)
  1031. {
  1032. enum i40iw_status_code status;
  1033. status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
  1034. if (!status) {
  1035. status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
  1036. (u8)iwdev->mac_ip_table_idx);
  1037. if (status)
  1038. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1039. }
  1040. return status;
  1041. }
  1042. /**
  1043. * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
  1044. * @iwdev: iwarp device
  1045. */
  1046. static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
  1047. {
  1048. struct net_device *ip_dev;
  1049. struct inet6_dev *idev;
  1050. struct inet6_ifaddr *ifp, *tmp;
  1051. u32 local_ipaddr6[4];
  1052. rcu_read_lock();
  1053. for_each_netdev_rcu(&init_net, ip_dev) {
  1054. if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
  1055. (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
  1056. (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
  1057. idev = __in6_dev_get(ip_dev);
  1058. if (!idev) {
  1059. i40iw_pr_err("ipv6 inet device not found\n");
  1060. break;
  1061. }
  1062. list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
  1063. i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
  1064. rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
  1065. i40iw_copy_ip_ntohl(local_ipaddr6,
  1066. ifp->addr.in6_u.u6_addr32);
  1067. i40iw_manage_arp_cache(iwdev,
  1068. ip_dev->dev_addr,
  1069. local_ipaddr6,
  1070. false,
  1071. I40IW_ARP_ADD);
  1072. }
  1073. }
  1074. }
  1075. rcu_read_unlock();
  1076. }
  1077. /**
  1078. * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
  1079. * @iwdev: iwarp device
  1080. */
  1081. static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
  1082. {
  1083. struct net_device *dev;
  1084. struct in_device *idev;
  1085. bool got_lock = true;
  1086. u32 ip_addr;
  1087. if (!rtnl_trylock())
  1088. got_lock = false;
  1089. for_each_netdev(&init_net, dev) {
  1090. if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
  1091. (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
  1092. (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
  1093. idev = in_dev_get(dev);
  1094. for_ifa(idev) {
  1095. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  1096. "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
  1097. rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
  1098. ip_addr = ntohl(ifa->ifa_address);
  1099. i40iw_manage_arp_cache(iwdev,
  1100. dev->dev_addr,
  1101. &ip_addr,
  1102. true,
  1103. I40IW_ARP_ADD);
  1104. }
  1105. endfor_ifa(idev);
  1106. in_dev_put(idev);
  1107. }
  1108. }
  1109. if (got_lock)
  1110. rtnl_unlock();
  1111. }
  1112. /**
  1113. * i40iw_add_mac_ip - add mac and ip addresses
  1114. * @iwdev: iwarp device
  1115. *
  1116. * Create and add a mac ip address entry to the hw table and
  1117. * ipv4/ipv6 addresses to the arp cache
  1118. * Return 0 if successful, otherwise return error
  1119. */
  1120. static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
  1121. {
  1122. struct net_device *netdev = iwdev->netdev;
  1123. enum i40iw_status_code status;
  1124. status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
  1125. if (status)
  1126. return status;
  1127. i40iw_add_ipv4_addr(iwdev);
  1128. i40iw_add_ipv6_addr(iwdev);
  1129. return 0;
  1130. }
  1131. /**
  1132. * i40iw_wait_pe_ready - Check if firmware is ready
  1133. * @hw: provides access to registers
  1134. */
  1135. static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
  1136. {
  1137. u32 statusfw;
  1138. u32 statuscpu0;
  1139. u32 statuscpu1;
  1140. u32 statuscpu2;
  1141. u32 retrycount = 0;
  1142. do {
  1143. statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
  1144. i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
  1145. statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
  1146. i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
  1147. statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
  1148. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
  1149. __LINE__, statuscpu1);
  1150. statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
  1151. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
  1152. __LINE__, statuscpu2);
  1153. if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
  1154. break; /* SUCCESS */
  1155. mdelay(1000);
  1156. retrycount++;
  1157. } while (retrycount < 14);
  1158. i40iw_wr32(hw, 0xb4040, 0x4C104C5);
  1159. }
  1160. /**
  1161. * i40iw_initialize_dev - initialize device
  1162. * @iwdev: iwarp device
  1163. * @ldev: lan device information
  1164. *
  1165. * Allocate memory for the hmc objects and initialize iwdev
  1166. * Return 0 if successful, otherwise clean up the resources
  1167. * and return error
  1168. */
  1169. static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
  1170. struct i40e_info *ldev)
  1171. {
  1172. enum i40iw_status_code status;
  1173. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1174. struct i40iw_device_init_info info;
  1175. struct i40iw_vsi_init_info vsi_info;
  1176. struct i40iw_dma_mem mem;
  1177. struct i40iw_l2params l2params;
  1178. u32 size;
  1179. struct i40iw_vsi_stats_info stats_info;
  1180. u16 last_qset = I40IW_NO_QSET;
  1181. u16 qset;
  1182. u32 i;
  1183. memset(&l2params, 0, sizeof(l2params));
  1184. memset(&info, 0, sizeof(info));
  1185. size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
  1186. (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
  1187. iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
  1188. if (!iwdev->hmc_info_mem)
  1189. return I40IW_ERR_NO_MEMORY;
  1190. iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
  1191. dev->hmc_info = &iwdev->hw.hmc;
  1192. dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
  1193. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
  1194. I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
  1195. if (status)
  1196. goto exit;
  1197. info.fpm_query_buf_pa = mem.pa;
  1198. info.fpm_query_buf = mem.va;
  1199. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
  1200. I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
  1201. if (status)
  1202. goto exit;
  1203. info.fpm_commit_buf_pa = mem.pa;
  1204. info.fpm_commit_buf = mem.va;
  1205. info.hmc_fn_id = ldev->fid;
  1206. info.is_pf = (ldev->ftype) ? false : true;
  1207. info.bar0 = ldev->hw_addr;
  1208. info.hw = &iwdev->hw;
  1209. info.debug_mask = debug;
  1210. l2params.mss =
  1211. (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
  1212. for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
  1213. qset = ldev->params.qos.prio_qos[i].qs_handle;
  1214. l2params.qs_handle_list[i] = qset;
  1215. if (last_qset == I40IW_NO_QSET)
  1216. last_qset = qset;
  1217. else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
  1218. iwdev->dcb = true;
  1219. }
  1220. i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
  1221. info.exception_lan_queue = 1;
  1222. info.vchnl_send = i40iw_virtchnl_send;
  1223. status = i40iw_device_init(&iwdev->sc_dev, &info);
  1224. exit:
  1225. if (status) {
  1226. kfree(iwdev->hmc_info_mem);
  1227. iwdev->hmc_info_mem = NULL;
  1228. }
  1229. memset(&vsi_info, 0, sizeof(vsi_info));
  1230. vsi_info.dev = &iwdev->sc_dev;
  1231. vsi_info.back_vsi = (void *)iwdev;
  1232. vsi_info.params = &l2params;
  1233. i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
  1234. if (dev->is_pf) {
  1235. memset(&stats_info, 0, sizeof(stats_info));
  1236. stats_info.fcn_id = ldev->fid;
  1237. stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
  1238. stats_info.stats_initialize = true;
  1239. if (stats_info.pestat)
  1240. i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
  1241. }
  1242. return status;
  1243. }
  1244. /**
  1245. * i40iw_register_notifiers - register tcp ip notifiers
  1246. */
  1247. static void i40iw_register_notifiers(void)
  1248. {
  1249. if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
  1250. register_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1251. register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1252. register_netevent_notifier(&i40iw_net_notifier);
  1253. }
  1254. }
  1255. /**
  1256. * i40iw_save_msix_info - copy msix vector information to iwarp device
  1257. * @iwdev: iwarp device
  1258. * @ldev: lan device information
  1259. *
  1260. * Allocate iwdev msix table and copy the ldev msix info to the table
  1261. * Return 0 if successful, otherwise return error
  1262. */
  1263. static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
  1264. struct i40e_info *ldev)
  1265. {
  1266. struct i40e_qvlist_info *iw_qvlist;
  1267. struct i40e_qv_info *iw_qvinfo;
  1268. u32 ceq_idx;
  1269. u32 i;
  1270. u32 size;
  1271. iwdev->msix_count = ldev->msix_count;
  1272. size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
  1273. size += sizeof(struct i40e_qvlist_info);
  1274. size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
  1275. iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
  1276. if (!iwdev->iw_msixtbl)
  1277. return I40IW_ERR_NO_MEMORY;
  1278. iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
  1279. iw_qvlist = iwdev->iw_qvlist;
  1280. iw_qvinfo = iw_qvlist->qv_info;
  1281. iw_qvlist->num_vectors = iwdev->msix_count;
  1282. if (iwdev->msix_count <= num_online_cpus())
  1283. iwdev->msix_shared = true;
  1284. for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
  1285. iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
  1286. iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
  1287. iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
  1288. if (i == 0) {
  1289. iw_qvinfo->aeq_idx = 0;
  1290. if (iwdev->msix_shared)
  1291. iw_qvinfo->ceq_idx = ceq_idx++;
  1292. else
  1293. iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
  1294. } else {
  1295. iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
  1296. iw_qvinfo->ceq_idx = ceq_idx++;
  1297. }
  1298. iw_qvinfo->itr_idx = 3;
  1299. iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
  1300. }
  1301. return 0;
  1302. }
  1303. /**
  1304. * i40iw_deinit_device - clean up the device resources
  1305. * @iwdev: iwarp device
  1306. * @reset: true if called before reset
  1307. *
  1308. * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
  1309. * destroy the device queues and free the pble and the hmc objects
  1310. */
  1311. static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
  1312. {
  1313. struct i40e_info *ldev = iwdev->ldev;
  1314. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1315. i40iw_pr_info("state = %d\n", iwdev->init_state);
  1316. if (iwdev->param_wq)
  1317. destroy_workqueue(iwdev->param_wq);
  1318. switch (iwdev->init_state) {
  1319. case RDMA_DEV_REGISTERED:
  1320. iwdev->iw_status = 0;
  1321. i40iw_port_ibevent(iwdev);
  1322. i40iw_destroy_rdma_device(iwdev->iwibdev);
  1323. /* fallthrough */
  1324. case IP_ADDR_REGISTERED:
  1325. if (!reset)
  1326. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1327. /* fallthrough */
  1328. case INET_NOTIFIER:
  1329. if (!atomic_dec_return(&i40iw_notifiers_registered)) {
  1330. unregister_netevent_notifier(&i40iw_net_notifier);
  1331. unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1332. unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1333. }
  1334. /* fallthrough */
  1335. case CEQ_CREATED:
  1336. i40iw_dele_ceqs(iwdev, reset);
  1337. /* fallthrough */
  1338. case AEQ_CREATED:
  1339. i40iw_destroy_aeq(iwdev, reset);
  1340. /* fallthrough */
  1341. case IEQ_CREATED:
  1342. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
  1343. /* fallthrough */
  1344. case ILQ_CREATED:
  1345. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
  1346. /* fallthrough */
  1347. case CCQ_CREATED:
  1348. i40iw_destroy_ccq(iwdev, reset);
  1349. /* fallthrough */
  1350. case PBLE_CHUNK_MEM:
  1351. i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
  1352. /* fallthrough */
  1353. case HMC_OBJS_CREATED:
  1354. i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
  1355. /* fallthrough */
  1356. case CQP_CREATED:
  1357. i40iw_destroy_cqp(iwdev, true);
  1358. /* fallthrough */
  1359. case INITIAL_STATE:
  1360. i40iw_cleanup_cm_core(&iwdev->cm_core);
  1361. if (iwdev->vsi.pestat) {
  1362. i40iw_vsi_stats_free(&iwdev->vsi);
  1363. kfree(iwdev->vsi.pestat);
  1364. }
  1365. i40iw_del_init_mem(iwdev);
  1366. break;
  1367. case INVALID_STATE:
  1368. /* fallthrough */
  1369. default:
  1370. i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
  1371. break;
  1372. }
  1373. i40iw_del_handler(i40iw_find_i40e_handler(ldev));
  1374. kfree(iwdev->hdl);
  1375. }
  1376. /**
  1377. * i40iw_setup_init_state - set up the initial device struct
  1378. * @hdl: handler for iwarp device - one per instance
  1379. * @ldev: lan device information
  1380. * @client: iwarp client information, provided during registration
  1381. *
  1382. * Initialize the iwarp device and its hdl information
  1383. * using the ldev and client information
  1384. * Return 0 if successful, otherwise return error
  1385. */
  1386. static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
  1387. struct i40e_info *ldev,
  1388. struct i40e_client *client)
  1389. {
  1390. struct i40iw_device *iwdev = &hdl->device;
  1391. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1392. enum i40iw_status_code status;
  1393. memcpy(&hdl->ldev, ldev, sizeof(*ldev));
  1394. if (resource_profile == 1)
  1395. resource_profile = 2;
  1396. iwdev->mpa_version = mpa_version;
  1397. iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
  1398. (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
  1399. I40IW_HMC_PROFILE_DEFAULT;
  1400. iwdev->max_rdma_vfs =
  1401. (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
  1402. iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
  1403. iwdev->netdev = ldev->netdev;
  1404. hdl->client = client;
  1405. if (!ldev->ftype)
  1406. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
  1407. else
  1408. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
  1409. status = i40iw_save_msix_info(iwdev, ldev);
  1410. if (status)
  1411. goto exit;
  1412. iwdev->hw.dev_context = (void *)ldev->pcidev;
  1413. iwdev->hw.hw_addr = ldev->hw_addr;
  1414. status = i40iw_allocate_dma_mem(&iwdev->hw,
  1415. &iwdev->obj_mem, 8192, 4096);
  1416. if (status)
  1417. goto exit;
  1418. iwdev->obj_next = iwdev->obj_mem;
  1419. iwdev->push_mode = push_mode;
  1420. init_waitqueue_head(&iwdev->vchnl_waitq);
  1421. init_waitqueue_head(&dev->vf_reqs);
  1422. init_waitqueue_head(&iwdev->close_wq);
  1423. status = i40iw_initialize_dev(iwdev, ldev);
  1424. exit:
  1425. if (status) {
  1426. kfree(iwdev->iw_msixtbl);
  1427. i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
  1428. iwdev->iw_msixtbl = NULL;
  1429. }
  1430. return status;
  1431. }
  1432. /**
  1433. * i40iw_get_used_rsrc - determine resources used internally
  1434. * @iwdev: iwarp device
  1435. *
  1436. * Called after internal allocations
  1437. */
  1438. static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
  1439. {
  1440. iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
  1441. iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
  1442. iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
  1443. iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
  1444. }
  1445. /**
  1446. * i40iw_open - client interface operation open for iwarp/uda device
  1447. * @ldev: lan device information
  1448. * @client: iwarp client information, provided during registration
  1449. *
  1450. * Called by the lan driver during the processing of client register
  1451. * Create device resources, set up queues, pble and hmc objects and
  1452. * register the device with the ib verbs interface
  1453. * Return 0 if successful, otherwise return error
  1454. */
  1455. static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
  1456. {
  1457. struct i40iw_device *iwdev;
  1458. struct i40iw_sc_dev *dev;
  1459. enum i40iw_status_code status;
  1460. struct i40iw_handler *hdl;
  1461. hdl = i40iw_find_netdev(ldev->netdev);
  1462. if (hdl)
  1463. return 0;
  1464. hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
  1465. if (!hdl)
  1466. return -ENOMEM;
  1467. iwdev = &hdl->device;
  1468. iwdev->hdl = hdl;
  1469. dev = &iwdev->sc_dev;
  1470. i40iw_setup_cm_core(iwdev);
  1471. dev->back_dev = (void *)iwdev;
  1472. iwdev->ldev = &hdl->ldev;
  1473. iwdev->client = client;
  1474. mutex_init(&iwdev->pbl_mutex);
  1475. i40iw_add_handler(hdl);
  1476. do {
  1477. status = i40iw_setup_init_state(hdl, ldev, client);
  1478. if (status)
  1479. break;
  1480. iwdev->init_state = INITIAL_STATE;
  1481. if (dev->is_pf)
  1482. i40iw_wait_pe_ready(dev->hw);
  1483. status = i40iw_create_cqp(iwdev);
  1484. if (status)
  1485. break;
  1486. iwdev->init_state = CQP_CREATED;
  1487. status = i40iw_hmc_setup(iwdev);
  1488. if (status)
  1489. break;
  1490. status = i40iw_create_ccq(iwdev);
  1491. if (status)
  1492. break;
  1493. iwdev->init_state = CCQ_CREATED;
  1494. status = i40iw_initialize_ilq(iwdev);
  1495. if (status)
  1496. break;
  1497. iwdev->init_state = ILQ_CREATED;
  1498. status = i40iw_initialize_ieq(iwdev);
  1499. if (status)
  1500. break;
  1501. iwdev->init_state = IEQ_CREATED;
  1502. status = i40iw_setup_aeq(iwdev);
  1503. if (status)
  1504. break;
  1505. iwdev->init_state = AEQ_CREATED;
  1506. status = i40iw_setup_ceqs(iwdev, ldev);
  1507. if (status)
  1508. break;
  1509. iwdev->init_state = CEQ_CREATED;
  1510. status = i40iw_initialize_hw_resources(iwdev);
  1511. if (status)
  1512. break;
  1513. i40iw_get_used_rsrc(iwdev);
  1514. dev->ccq_ops->ccq_arm(dev->ccq);
  1515. status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
  1516. if (status)
  1517. break;
  1518. iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
  1519. i40iw_register_notifiers();
  1520. iwdev->init_state = INET_NOTIFIER;
  1521. status = i40iw_add_mac_ip(iwdev);
  1522. if (status)
  1523. break;
  1524. iwdev->init_state = IP_ADDR_REGISTERED;
  1525. if (i40iw_register_rdma_device(iwdev)) {
  1526. i40iw_pr_err("register rdma device fail\n");
  1527. break;
  1528. };
  1529. iwdev->init_state = RDMA_DEV_REGISTERED;
  1530. iwdev->iw_status = 1;
  1531. i40iw_port_ibevent(iwdev);
  1532. iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
  1533. if(iwdev->param_wq == NULL)
  1534. break;
  1535. i40iw_pr_info("i40iw_open completed\n");
  1536. return 0;
  1537. } while (0);
  1538. i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
  1539. i40iw_deinit_device(iwdev, false);
  1540. return -ERESTART;
  1541. }
  1542. /**
  1543. * i40iw_l2params_worker - worker for l2 params change
  1544. * @work: work pointer for l2 params
  1545. */
  1546. static void i40iw_l2params_worker(struct work_struct *work)
  1547. {
  1548. struct l2params_work *dwork =
  1549. container_of(work, struct l2params_work, work);
  1550. struct i40iw_device *iwdev = dwork->iwdev;
  1551. i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
  1552. atomic_dec(&iwdev->params_busy);
  1553. kfree(work);
  1554. }
  1555. /**
  1556. * i40iw_l2param_change - handle qs handles for qos and mss change
  1557. * @ldev: lan device information
  1558. * @client: client for paramater change
  1559. * @params: new parameters from L2
  1560. */
  1561. static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
  1562. struct i40e_params *params)
  1563. {
  1564. struct i40iw_handler *hdl;
  1565. struct i40iw_l2params *l2params;
  1566. struct l2params_work *work;
  1567. struct i40iw_device *iwdev;
  1568. int i;
  1569. hdl = i40iw_find_i40e_handler(ldev);
  1570. if (!hdl)
  1571. return;
  1572. iwdev = &hdl->device;
  1573. if (atomic_read(&iwdev->params_busy))
  1574. return;
  1575. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  1576. if (!work)
  1577. return;
  1578. atomic_inc(&iwdev->params_busy);
  1579. work->iwdev = iwdev;
  1580. l2params = &work->l2params;
  1581. for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
  1582. l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
  1583. l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
  1584. INIT_WORK(&work->work, i40iw_l2params_worker);
  1585. queue_work(iwdev->param_wq, &work->work);
  1586. }
  1587. /**
  1588. * i40iw_close - client interface operation close for iwarp/uda device
  1589. * @ldev: lan device information
  1590. * @client: client to close
  1591. *
  1592. * Called by the lan driver during the processing of client unregister
  1593. * Destroy and clean up the driver resources
  1594. */
  1595. static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
  1596. {
  1597. struct i40iw_device *iwdev;
  1598. struct i40iw_handler *hdl;
  1599. hdl = i40iw_find_i40e_handler(ldev);
  1600. if (!hdl)
  1601. return;
  1602. iwdev = &hdl->device;
  1603. iwdev->closing = true;
  1604. i40iw_cm_disconnect_all(iwdev);
  1605. destroy_workqueue(iwdev->virtchnl_wq);
  1606. i40iw_deinit_device(iwdev, reset);
  1607. }
  1608. /**
  1609. * i40iw_vf_reset - process VF reset
  1610. * @ldev: lan device information
  1611. * @client: client interface instance
  1612. * @vf_id: virtual function id
  1613. *
  1614. * Called when a VF is reset by the PF
  1615. * Destroy and clean up the VF resources
  1616. */
  1617. static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
  1618. {
  1619. struct i40iw_handler *hdl;
  1620. struct i40iw_sc_dev *dev;
  1621. struct i40iw_hmc_fcn_info hmc_fcn_info;
  1622. struct i40iw_virt_mem vf_dev_mem;
  1623. struct i40iw_vfdev *tmp_vfdev;
  1624. unsigned int i;
  1625. unsigned long flags;
  1626. struct i40iw_device *iwdev;
  1627. hdl = i40iw_find_i40e_handler(ldev);
  1628. if (!hdl)
  1629. return;
  1630. dev = &hdl->device.sc_dev;
  1631. iwdev = (struct i40iw_device *)dev->back_dev;
  1632. for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
  1633. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
  1634. continue;
  1635. /* free all resources allocated on behalf of vf */
  1636. tmp_vfdev = dev->vf_dev[i];
  1637. spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
  1638. dev->vf_dev[i] = NULL;
  1639. spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
  1640. i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
  1641. /* remove vf hmc function */
  1642. memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
  1643. hmc_fcn_info.vf_id = vf_id;
  1644. hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
  1645. hmc_fcn_info.free_fcn = true;
  1646. i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
  1647. /* free vf_dev */
  1648. vf_dev_mem.va = tmp_vfdev;
  1649. vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
  1650. sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
  1651. i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
  1652. break;
  1653. }
  1654. }
  1655. /**
  1656. * i40iw_vf_enable - enable a number of VFs
  1657. * @ldev: lan device information
  1658. * @client: client interface instance
  1659. * @num_vfs: number of VFs for the PF
  1660. *
  1661. * Called when the number of VFs changes
  1662. */
  1663. static void i40iw_vf_enable(struct i40e_info *ldev,
  1664. struct i40e_client *client,
  1665. u32 num_vfs)
  1666. {
  1667. struct i40iw_handler *hdl;
  1668. hdl = i40iw_find_i40e_handler(ldev);
  1669. if (!hdl)
  1670. return;
  1671. if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
  1672. hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
  1673. else
  1674. hdl->device.max_enabled_vfs = num_vfs;
  1675. }
  1676. /**
  1677. * i40iw_vf_capable - check if VF capable
  1678. * @ldev: lan device information
  1679. * @client: client interface instance
  1680. * @vf_id: virtual function id
  1681. *
  1682. * Return 1 if a VF slot is available or if VF is already RDMA enabled
  1683. * Return 0 otherwise
  1684. */
  1685. static int i40iw_vf_capable(struct i40e_info *ldev,
  1686. struct i40e_client *client,
  1687. u32 vf_id)
  1688. {
  1689. struct i40iw_handler *hdl;
  1690. struct i40iw_sc_dev *dev;
  1691. unsigned int i;
  1692. hdl = i40iw_find_i40e_handler(ldev);
  1693. if (!hdl)
  1694. return 0;
  1695. dev = &hdl->device.sc_dev;
  1696. for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
  1697. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
  1698. return 1;
  1699. }
  1700. return 0;
  1701. }
  1702. /**
  1703. * i40iw_virtchnl_receive - receive a message through the virtual channel
  1704. * @ldev: lan device information
  1705. * @client: client interface instance
  1706. * @vf_id: virtual function id associated with the message
  1707. * @msg: message buffer pointer
  1708. * @len: length of the message
  1709. *
  1710. * Invoke virtual channel receive operation for the given msg
  1711. * Return 0 if successful, otherwise return error
  1712. */
  1713. static int i40iw_virtchnl_receive(struct i40e_info *ldev,
  1714. struct i40e_client *client,
  1715. u32 vf_id,
  1716. u8 *msg,
  1717. u16 len)
  1718. {
  1719. struct i40iw_handler *hdl;
  1720. struct i40iw_sc_dev *dev;
  1721. struct i40iw_device *iwdev;
  1722. int ret_code = I40IW_NOT_SUPPORTED;
  1723. if (!len || !msg)
  1724. return I40IW_ERR_PARAM;
  1725. hdl = i40iw_find_i40e_handler(ldev);
  1726. if (!hdl)
  1727. return I40IW_ERR_PARAM;
  1728. dev = &hdl->device.sc_dev;
  1729. iwdev = dev->back_dev;
  1730. if (dev->vchnl_if.vchnl_recv) {
  1731. ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
  1732. if (!dev->is_pf) {
  1733. atomic_dec(&iwdev->vchnl_msgs);
  1734. wake_up(&iwdev->vchnl_waitq);
  1735. }
  1736. }
  1737. return ret_code;
  1738. }
  1739. /**
  1740. * i40iw_vf_clear_to_send - wait to send virtual channel message
  1741. * @dev: iwarp device *
  1742. * Wait for until virtual channel is clear
  1743. * before sending the next message
  1744. *
  1745. * Returns false if error
  1746. * Returns true if clear to send
  1747. */
  1748. bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
  1749. {
  1750. struct i40iw_device *iwdev;
  1751. wait_queue_t wait;
  1752. iwdev = dev->back_dev;
  1753. if (!wq_has_sleeper(&dev->vf_reqs) &&
  1754. (atomic_read(&iwdev->vchnl_msgs) == 0))
  1755. return true; /* virtual channel is clear */
  1756. init_wait(&wait);
  1757. add_wait_queue_exclusive(&dev->vf_reqs, &wait);
  1758. if (!wait_event_timeout(dev->vf_reqs,
  1759. (atomic_read(&iwdev->vchnl_msgs) == 0),
  1760. I40IW_VCHNL_EVENT_TIMEOUT))
  1761. dev->vchnl_up = false;
  1762. remove_wait_queue(&dev->vf_reqs, &wait);
  1763. return dev->vchnl_up;
  1764. }
  1765. /**
  1766. * i40iw_virtchnl_send - send a message through the virtual channel
  1767. * @dev: iwarp device
  1768. * @vf_id: virtual function id associated with the message
  1769. * @msg: virtual channel message buffer pointer
  1770. * @len: length of the message
  1771. *
  1772. * Invoke virtual channel send operation for the given msg
  1773. * Return 0 if successful, otherwise return error
  1774. */
  1775. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  1776. u32 vf_id,
  1777. u8 *msg,
  1778. u16 len)
  1779. {
  1780. struct i40iw_device *iwdev;
  1781. struct i40e_info *ldev;
  1782. if (!dev || !dev->back_dev)
  1783. return I40IW_ERR_BAD_PTR;
  1784. iwdev = dev->back_dev;
  1785. ldev = iwdev->ldev;
  1786. if (ldev && ldev->ops && ldev->ops->virtchnl_send)
  1787. return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
  1788. return I40IW_ERR_BAD_PTR;
  1789. }
  1790. /* client interface functions */
  1791. static const struct i40e_client_ops i40e_ops = {
  1792. .open = i40iw_open,
  1793. .close = i40iw_close,
  1794. .l2_param_change = i40iw_l2param_change,
  1795. .virtchnl_receive = i40iw_virtchnl_receive,
  1796. .vf_reset = i40iw_vf_reset,
  1797. .vf_enable = i40iw_vf_enable,
  1798. .vf_capable = i40iw_vf_capable
  1799. };
  1800. /**
  1801. * i40iw_init_module - driver initialization function
  1802. *
  1803. * First function to call when the driver is loaded
  1804. * Register the driver as i40e client and port mapper client
  1805. */
  1806. static int __init i40iw_init_module(void)
  1807. {
  1808. int ret;
  1809. memset(&i40iw_client, 0, sizeof(i40iw_client));
  1810. i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
  1811. i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
  1812. i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
  1813. i40iw_client.ops = &i40e_ops;
  1814. memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
  1815. i40iw_client.type = I40E_CLIENT_IWARP;
  1816. spin_lock_init(&i40iw_handler_lock);
  1817. ret = i40e_register_client(&i40iw_client);
  1818. return ret;
  1819. }
  1820. /**
  1821. * i40iw_exit_module - driver exit clean up function
  1822. *
  1823. * The function is called just before the driver is unloaded
  1824. * Unregister the driver as i40e client and port mapper client
  1825. */
  1826. static void __exit i40iw_exit_module(void)
  1827. {
  1828. i40e_unregister_client(&i40iw_client);
  1829. }
  1830. module_init(i40iw_init_module);
  1831. module_exit(i40iw_exit_module);