i40iw_main.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/ip.h>
  39. #include <linux/tcp.h>
  40. #include <linux/if_vlan.h>
  41. #include <net/addrconf.h>
  42. #include "i40iw.h"
  43. #include "i40iw_register.h"
  44. #include <net/netevent.h>
  45. #define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
  46. #define CLIENT_IW_INTERFACE_VERSION_MINOR 01
  47. #define CLIENT_IW_INTERFACE_VERSION_BUILD 00
  48. #define DRV_VERSION_MAJOR 0
  49. #define DRV_VERSION_MINOR 5
  50. #define DRV_VERSION_BUILD 123
  51. #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
  52. __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
  53. static int push_mode;
  54. module_param(push_mode, int, 0644);
  55. MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
  56. static int debug;
  57. module_param(debug, int, 0644);
  58. MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
  59. static int resource_profile;
  60. module_param(resource_profile, int, 0644);
  61. MODULE_PARM_DESC(resource_profile,
  62. "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
  63. static int max_rdma_vfs = 32;
  64. module_param(max_rdma_vfs, int, 0644);
  65. MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
  66. static int mpa_version = 2;
  67. module_param(mpa_version, int, 0644);
  68. MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
  69. MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
  70. MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
  71. MODULE_LICENSE("Dual BSD/GPL");
  72. MODULE_VERSION(DRV_VERSION);
  73. static struct i40e_client i40iw_client;
  74. static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
  75. static LIST_HEAD(i40iw_handlers);
  76. static spinlock_t i40iw_handler_lock;
  77. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  78. u32 vf_id, u8 *msg, u16 len);
  79. static struct notifier_block i40iw_inetaddr_notifier = {
  80. .notifier_call = i40iw_inetaddr_event
  81. };
  82. static struct notifier_block i40iw_inetaddr6_notifier = {
  83. .notifier_call = i40iw_inet6addr_event
  84. };
  85. static struct notifier_block i40iw_net_notifier = {
  86. .notifier_call = i40iw_net_event
  87. };
  88. static int i40iw_notifiers_registered;
  89. /**
  90. * i40iw_find_i40e_handler - find a handler given a client info
  91. * @ldev: pointer to a client info
  92. */
  93. static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
  94. {
  95. struct i40iw_handler *hdl;
  96. unsigned long flags;
  97. spin_lock_irqsave(&i40iw_handler_lock, flags);
  98. list_for_each_entry(hdl, &i40iw_handlers, list) {
  99. if (hdl->ldev.netdev == ldev->netdev) {
  100. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  101. return hdl;
  102. }
  103. }
  104. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  105. return NULL;
  106. }
  107. /**
  108. * i40iw_find_netdev - find a handler given a netdev
  109. * @netdev: pointer to net_device
  110. */
  111. struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
  112. {
  113. struct i40iw_handler *hdl;
  114. unsigned long flags;
  115. spin_lock_irqsave(&i40iw_handler_lock, flags);
  116. list_for_each_entry(hdl, &i40iw_handlers, list) {
  117. if (hdl->ldev.netdev == netdev) {
  118. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  119. return hdl;
  120. }
  121. }
  122. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  123. return NULL;
  124. }
  125. /**
  126. * i40iw_add_handler - add a handler to the list
  127. * @hdl: handler to be added to the handler list
  128. */
  129. static void i40iw_add_handler(struct i40iw_handler *hdl)
  130. {
  131. unsigned long flags;
  132. spin_lock_irqsave(&i40iw_handler_lock, flags);
  133. list_add(&hdl->list, &i40iw_handlers);
  134. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  135. }
  136. /**
  137. * i40iw_del_handler - delete a handler from the list
  138. * @hdl: handler to be deleted from the handler list
  139. */
  140. static int i40iw_del_handler(struct i40iw_handler *hdl)
  141. {
  142. unsigned long flags;
  143. spin_lock_irqsave(&i40iw_handler_lock, flags);
  144. list_del(&hdl->list);
  145. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  146. return 0;
  147. }
  148. /**
  149. * i40iw_enable_intr - set up device interrupts
  150. * @dev: hardware control device structure
  151. * @msix_id: id of the interrupt to be enabled
  152. */
  153. static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
  154. {
  155. u32 val;
  156. val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
  157. I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
  158. (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
  159. if (dev->is_pf)
  160. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
  161. else
  162. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
  163. }
  164. /**
  165. * i40iw_dpc - tasklet for aeq and ceq 0
  166. * @data: iwarp device
  167. */
  168. static void i40iw_dpc(unsigned long data)
  169. {
  170. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  171. if (iwdev->msix_shared)
  172. i40iw_process_ceq(iwdev, iwdev->ceqlist);
  173. i40iw_process_aeq(iwdev);
  174. i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
  175. }
  176. /**
  177. * i40iw_ceq_dpc - dpc handler for CEQ
  178. * @data: data points to CEQ
  179. */
  180. static void i40iw_ceq_dpc(unsigned long data)
  181. {
  182. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  183. struct i40iw_device *iwdev = iwceq->iwdev;
  184. i40iw_process_ceq(iwdev, iwceq);
  185. i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
  186. }
  187. /**
  188. * i40iw_irq_handler - interrupt handler for aeq and ceq0
  189. * @irq: Interrupt request number
  190. * @data: iwarp device
  191. */
  192. static irqreturn_t i40iw_irq_handler(int irq, void *data)
  193. {
  194. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  195. tasklet_schedule(&iwdev->dpc_tasklet);
  196. return IRQ_HANDLED;
  197. }
  198. /**
  199. * i40iw_destroy_cqp - destroy control qp
  200. * @iwdev: iwarp device
  201. * @create_done: 1 if cqp create poll was success
  202. *
  203. * Issue destroy cqp request and
  204. * free the resources associated with the cqp
  205. */
  206. static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
  207. {
  208. enum i40iw_status_code status = 0;
  209. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  210. struct i40iw_cqp *cqp = &iwdev->cqp;
  211. if (free_hwcqp && dev->cqp_ops->cqp_destroy)
  212. status = dev->cqp_ops->cqp_destroy(dev->cqp);
  213. if (status)
  214. i40iw_pr_err("destroy cqp failed");
  215. i40iw_free_dma_mem(dev->hw, &cqp->sq);
  216. kfree(cqp->scratch_array);
  217. iwdev->cqp.scratch_array = NULL;
  218. kfree(cqp->cqp_requests);
  219. cqp->cqp_requests = NULL;
  220. }
  221. /**
  222. * i40iw_disable_irqs - disable device interrupts
  223. * @dev: hardware control device structure
  224. * @msic_vec: msix vector to disable irq
  225. * @dev_id: parameter to pass to free_irq (used during irq setup)
  226. *
  227. * The function is called when destroying aeq/ceq
  228. */
  229. static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
  230. struct i40iw_msix_vector *msix_vec,
  231. void *dev_id)
  232. {
  233. if (dev->is_pf)
  234. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
  235. else
  236. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
  237. free_irq(msix_vec->irq, dev_id);
  238. }
  239. /**
  240. * i40iw_destroy_aeq - destroy aeq
  241. * @iwdev: iwarp device
  242. * @reset: true if called before reset
  243. *
  244. * Issue a destroy aeq request and
  245. * free the resources associated with the aeq
  246. * The function is called during driver unload
  247. */
  248. static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
  249. {
  250. enum i40iw_status_code status = I40IW_ERR_NOT_READY;
  251. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  252. struct i40iw_aeq *aeq = &iwdev->aeq;
  253. if (!iwdev->msix_shared)
  254. i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
  255. if (reset)
  256. goto exit;
  257. if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
  258. status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
  259. if (status)
  260. i40iw_pr_err("destroy aeq failed %d\n", status);
  261. exit:
  262. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  263. }
  264. /**
  265. * i40iw_destroy_ceq - destroy ceq
  266. * @iwdev: iwarp device
  267. * @iwceq: ceq to be destroyed
  268. * @reset: true if called before reset
  269. *
  270. * Issue a destroy ceq request and
  271. * free the resources associated with the ceq
  272. */
  273. static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
  274. struct i40iw_ceq *iwceq,
  275. bool reset)
  276. {
  277. enum i40iw_status_code status;
  278. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  279. if (reset)
  280. goto exit;
  281. status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
  282. if (status) {
  283. i40iw_pr_err("ceq destroy command failed %d\n", status);
  284. goto exit;
  285. }
  286. status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
  287. if (status)
  288. i40iw_pr_err("ceq destroy completion failed %d\n", status);
  289. exit:
  290. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  291. }
  292. /**
  293. * i40iw_dele_ceqs - destroy all ceq's
  294. * @iwdev: iwarp device
  295. * @reset: true if called before reset
  296. *
  297. * Go through all of the device ceq's and for each ceq
  298. * disable the ceq interrupt and destroy the ceq
  299. */
  300. static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
  301. {
  302. u32 i = 0;
  303. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  304. struct i40iw_ceq *iwceq = iwdev->ceqlist;
  305. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  306. if (iwdev->msix_shared) {
  307. i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
  308. i40iw_destroy_ceq(iwdev, iwceq, reset);
  309. iwceq++;
  310. i++;
  311. }
  312. for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
  313. i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
  314. i40iw_destroy_ceq(iwdev, iwceq, reset);
  315. }
  316. }
  317. /**
  318. * i40iw_destroy_ccq - destroy control cq
  319. * @iwdev: iwarp device
  320. * @reset: true if called before reset
  321. *
  322. * Issue destroy ccq request and
  323. * free the resources associated with the ccq
  324. */
  325. static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
  326. {
  327. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  328. struct i40iw_ccq *ccq = &iwdev->ccq;
  329. enum i40iw_status_code status = 0;
  330. if (!reset)
  331. status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
  332. if (status)
  333. i40iw_pr_err("ccq destroy failed %d\n", status);
  334. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  335. }
  336. /* types of hmc objects */
  337. static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
  338. I40IW_HMC_IW_QP,
  339. I40IW_HMC_IW_CQ,
  340. I40IW_HMC_IW_HTE,
  341. I40IW_HMC_IW_ARP,
  342. I40IW_HMC_IW_APBVT_ENTRY,
  343. I40IW_HMC_IW_MR,
  344. I40IW_HMC_IW_XF,
  345. I40IW_HMC_IW_XFFL,
  346. I40IW_HMC_IW_Q1,
  347. I40IW_HMC_IW_Q1FL,
  348. I40IW_HMC_IW_TIMER,
  349. };
  350. /**
  351. * i40iw_close_hmc_objects_type - delete hmc objects of a given type
  352. * @iwdev: iwarp device
  353. * @obj_type: the hmc object type to be deleted
  354. * @is_pf: true if the function is PF otherwise false
  355. * @reset: true if called before reset
  356. */
  357. static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
  358. enum i40iw_hmc_rsrc_type obj_type,
  359. struct i40iw_hmc_info *hmc_info,
  360. bool is_pf,
  361. bool reset)
  362. {
  363. struct i40iw_hmc_del_obj_info info;
  364. memset(&info, 0, sizeof(info));
  365. info.hmc_info = hmc_info;
  366. info.rsrc_type = obj_type;
  367. info.count = hmc_info->hmc_obj[obj_type].cnt;
  368. info.is_pf = is_pf;
  369. if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
  370. i40iw_pr_err("del obj of type %d failed\n", obj_type);
  371. }
  372. /**
  373. * i40iw_del_hmc_objects - remove all device hmc objects
  374. * @dev: iwarp device
  375. * @hmc_info: hmc_info to free
  376. * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
  377. * by PF on behalf of VF
  378. * @reset: true if called before reset
  379. */
  380. static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
  381. struct i40iw_hmc_info *hmc_info,
  382. bool is_pf,
  383. bool reset)
  384. {
  385. unsigned int i;
  386. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
  387. i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
  388. }
  389. /**
  390. * i40iw_ceq_handler - interrupt handler for ceq
  391. * @data: ceq pointer
  392. */
  393. static irqreturn_t i40iw_ceq_handler(int irq, void *data)
  394. {
  395. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  396. if (iwceq->irq != irq)
  397. i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
  398. tasklet_schedule(&iwceq->dpc_tasklet);
  399. return IRQ_HANDLED;
  400. }
  401. /**
  402. * i40iw_create_hmc_obj_type - create hmc object of a given type
  403. * @dev: hardware control device structure
  404. * @info: information for the hmc object to create
  405. */
  406. static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
  407. struct i40iw_hmc_create_obj_info *info)
  408. {
  409. return dev->hmc_ops->create_hmc_object(dev, info);
  410. }
  411. /**
  412. * i40iw_create_hmc_objs - create all hmc objects for the device
  413. * @iwdev: iwarp device
  414. * @is_pf: true if the function is PF otherwise false
  415. *
  416. * Create the device hmc objects and allocate hmc pages
  417. * Return 0 if successful, otherwise clean up and return error
  418. */
  419. static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
  420. bool is_pf)
  421. {
  422. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  423. struct i40iw_hmc_create_obj_info info;
  424. enum i40iw_status_code status;
  425. int i;
  426. memset(&info, 0, sizeof(info));
  427. info.hmc_info = dev->hmc_info;
  428. info.is_pf = is_pf;
  429. info.entry_type = iwdev->sd_type;
  430. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
  431. info.rsrc_type = iw_hmc_obj_types[i];
  432. info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
  433. status = i40iw_create_hmc_obj_type(dev, &info);
  434. if (status) {
  435. i40iw_pr_err("create obj type %d status = %d\n",
  436. iw_hmc_obj_types[i], status);
  437. break;
  438. }
  439. }
  440. if (!status)
  441. return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
  442. dev->hmc_fn_id,
  443. true, true));
  444. while (i) {
  445. i--;
  446. /* destroy the hmc objects of a given type */
  447. i40iw_close_hmc_objects_type(dev,
  448. iw_hmc_obj_types[i],
  449. dev->hmc_info,
  450. is_pf,
  451. false);
  452. }
  453. return status;
  454. }
  455. /**
  456. * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
  457. * @iwdev: iwarp device
  458. * @memptr: points to the memory addresses
  459. * @size: size of memory needed
  460. * @mask: mask for the aligned memory
  461. *
  462. * Get aligned memory of the requested size and
  463. * update the memptr to point to the new aligned memory
  464. * Return 0 if successful, otherwise return no memory error
  465. */
  466. enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
  467. struct i40iw_dma_mem *memptr,
  468. u32 size,
  469. u32 mask)
  470. {
  471. unsigned long va, newva;
  472. unsigned long extra;
  473. va = (unsigned long)iwdev->obj_next.va;
  474. newva = va;
  475. if (mask)
  476. newva = ALIGN(va, (mask + 1));
  477. extra = newva - va;
  478. memptr->va = (u8 *)va + extra;
  479. memptr->pa = iwdev->obj_next.pa + extra;
  480. memptr->size = size;
  481. if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
  482. return I40IW_ERR_NO_MEMORY;
  483. iwdev->obj_next.va = memptr->va + size;
  484. iwdev->obj_next.pa = memptr->pa + size;
  485. return 0;
  486. }
  487. /**
  488. * i40iw_create_cqp - create control qp
  489. * @iwdev: iwarp device
  490. *
  491. * Return 0, if the cqp and all the resources associated with it
  492. * are successfully created, otherwise return error
  493. */
  494. static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
  495. {
  496. enum i40iw_status_code status;
  497. u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
  498. struct i40iw_dma_mem mem;
  499. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  500. struct i40iw_cqp_init_info cqp_init_info;
  501. struct i40iw_cqp *cqp = &iwdev->cqp;
  502. u16 maj_err, min_err;
  503. int i;
  504. cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
  505. if (!cqp->cqp_requests)
  506. return I40IW_ERR_NO_MEMORY;
  507. cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
  508. if (!cqp->scratch_array) {
  509. kfree(cqp->cqp_requests);
  510. return I40IW_ERR_NO_MEMORY;
  511. }
  512. dev->cqp = &cqp->sc_cqp;
  513. dev->cqp->dev = dev;
  514. memset(&cqp_init_info, 0, sizeof(cqp_init_info));
  515. status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
  516. (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
  517. I40IW_CQP_ALIGNMENT);
  518. if (status)
  519. goto exit;
  520. status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
  521. I40IW_HOST_CTX_ALIGNMENT_MASK);
  522. if (status)
  523. goto exit;
  524. dev->cqp->host_ctx_pa = mem.pa;
  525. dev->cqp->host_ctx = mem.va;
  526. /* populate the cqp init info */
  527. cqp_init_info.dev = dev;
  528. cqp_init_info.sq_size = sqsize;
  529. cqp_init_info.sq = cqp->sq.va;
  530. cqp_init_info.sq_pa = cqp->sq.pa;
  531. cqp_init_info.host_ctx_pa = mem.pa;
  532. cqp_init_info.host_ctx = mem.va;
  533. cqp_init_info.hmc_profile = iwdev->resource_profile;
  534. cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
  535. cqp_init_info.scratch_array = cqp->scratch_array;
  536. status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
  537. if (status) {
  538. i40iw_pr_err("cqp init status %d\n", status);
  539. goto exit;
  540. }
  541. status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
  542. if (status) {
  543. i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
  544. status, maj_err, min_err);
  545. goto exit;
  546. }
  547. spin_lock_init(&cqp->req_lock);
  548. INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
  549. INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
  550. /* init the waitq of the cqp_requests and add them to the list */
  551. for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
  552. init_waitqueue_head(&cqp->cqp_requests[i].waitq);
  553. list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
  554. }
  555. return 0;
  556. exit:
  557. /* clean up the created resources */
  558. i40iw_destroy_cqp(iwdev, false);
  559. return status;
  560. }
  561. /**
  562. * i40iw_create_ccq - create control cq
  563. * @iwdev: iwarp device
  564. *
  565. * Return 0, if the ccq and the resources associated with it
  566. * are successfully created, otherwise return error
  567. */
  568. static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
  569. {
  570. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  571. struct i40iw_dma_mem mem;
  572. enum i40iw_status_code status;
  573. struct i40iw_ccq_init_info info;
  574. struct i40iw_ccq *ccq = &iwdev->ccq;
  575. memset(&info, 0, sizeof(info));
  576. dev->ccq = &ccq->sc_cq;
  577. dev->ccq->dev = dev;
  578. info.dev = dev;
  579. ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
  580. ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
  581. status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
  582. ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
  583. if (status)
  584. goto exit;
  585. status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
  586. I40IW_SHADOWAREA_MASK);
  587. if (status)
  588. goto exit;
  589. ccq->sc_cq.back_cq = (void *)ccq;
  590. /* populate the ccq init info */
  591. info.cq_base = ccq->mem_cq.va;
  592. info.cq_pa = ccq->mem_cq.pa;
  593. info.num_elem = IW_CCQ_SIZE;
  594. info.shadow_area = mem.va;
  595. info.shadow_area_pa = mem.pa;
  596. info.ceqe_mask = false;
  597. info.ceq_id_valid = true;
  598. info.shadow_read_threshold = 16;
  599. status = dev->ccq_ops->ccq_init(dev->ccq, &info);
  600. if (!status)
  601. status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
  602. exit:
  603. if (status)
  604. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  605. return status;
  606. }
  607. /**
  608. * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
  609. * @iwdev: iwarp device
  610. * @msix_vec: interrupt vector information
  611. * @iwceq: ceq associated with the vector
  612. * @ceq_id: the id number of the iwceq
  613. *
  614. * Allocate interrupt resources and enable irq handling
  615. * Return 0 if successful, otherwise return error
  616. */
  617. static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
  618. struct i40iw_ceq *iwceq,
  619. u32 ceq_id,
  620. struct i40iw_msix_vector *msix_vec)
  621. {
  622. enum i40iw_status_code status;
  623. if (iwdev->msix_shared && !ceq_id) {
  624. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  625. status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
  626. } else {
  627. tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
  628. status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
  629. }
  630. if (status) {
  631. i40iw_pr_err("ceq irq config fail\n");
  632. return I40IW_ERR_CONFIG;
  633. }
  634. msix_vec->ceq_id = ceq_id;
  635. msix_vec->cpu_affinity = 0;
  636. return 0;
  637. }
  638. /**
  639. * i40iw_create_ceq - create completion event queue
  640. * @iwdev: iwarp device
  641. * @iwceq: pointer to the ceq resources to be created
  642. * @ceq_id: the id number of the iwceq
  643. *
  644. * Return 0, if the ceq and the resources associated with it
  645. * are successfully created, otherwise return error
  646. */
  647. static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
  648. struct i40iw_ceq *iwceq,
  649. u32 ceq_id)
  650. {
  651. enum i40iw_status_code status;
  652. struct i40iw_ceq_init_info info;
  653. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  654. u64 scratch;
  655. memset(&info, 0, sizeof(info));
  656. info.ceq_id = ceq_id;
  657. iwceq->iwdev = iwdev;
  658. iwceq->mem.size = sizeof(struct i40iw_ceqe) *
  659. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  660. status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
  661. I40IW_CEQ_ALIGNMENT);
  662. if (status)
  663. goto exit;
  664. info.ceq_id = ceq_id;
  665. info.ceqe_base = iwceq->mem.va;
  666. info.ceqe_pa = iwceq->mem.pa;
  667. info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  668. iwceq->sc_ceq.ceq_id = ceq_id;
  669. info.dev = dev;
  670. scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
  671. status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
  672. if (!status)
  673. status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
  674. exit:
  675. if (status)
  676. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  677. return status;
  678. }
  679. void i40iw_request_reset(struct i40iw_device *iwdev)
  680. {
  681. struct i40e_info *ldev = iwdev->ldev;
  682. ldev->ops->request_reset(ldev, iwdev->client, 1);
  683. }
  684. /**
  685. * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
  686. * @iwdev: iwarp device
  687. * @ldev: i40e lan device
  688. *
  689. * Allocate a list for all device completion event queues
  690. * Create the ceq's and configure their msix interrupt vectors
  691. * Return 0, if at least one ceq is successfully set up, otherwise return error
  692. */
  693. static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
  694. struct i40e_info *ldev)
  695. {
  696. u32 i;
  697. u32 ceq_id;
  698. struct i40iw_ceq *iwceq;
  699. struct i40iw_msix_vector *msix_vec;
  700. enum i40iw_status_code status = 0;
  701. u32 num_ceqs;
  702. if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
  703. status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
  704. iwdev->iw_qvlist);
  705. if (status)
  706. goto exit;
  707. } else {
  708. status = I40IW_ERR_BAD_PTR;
  709. goto exit;
  710. }
  711. num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
  712. iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
  713. if (!iwdev->ceqlist) {
  714. status = I40IW_ERR_NO_MEMORY;
  715. goto exit;
  716. }
  717. i = (iwdev->msix_shared) ? 0 : 1;
  718. for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
  719. iwceq = &iwdev->ceqlist[ceq_id];
  720. status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
  721. if (status) {
  722. i40iw_pr_err("create ceq status = %d\n", status);
  723. break;
  724. }
  725. msix_vec = &iwdev->iw_msixtbl[i];
  726. iwceq->irq = msix_vec->irq;
  727. iwceq->msix_idx = msix_vec->idx;
  728. status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
  729. if (status) {
  730. i40iw_destroy_ceq(iwdev, iwceq, false);
  731. break;
  732. }
  733. i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
  734. iwdev->ceqs_count++;
  735. }
  736. exit:
  737. if (status) {
  738. if (!iwdev->ceqs_count) {
  739. kfree(iwdev->ceqlist);
  740. iwdev->ceqlist = NULL;
  741. } else {
  742. status = 0;
  743. }
  744. }
  745. return status;
  746. }
  747. /**
  748. * i40iw_configure_aeq_vector - set up the msix vector for aeq
  749. * @iwdev: iwarp device
  750. *
  751. * Allocate interrupt resources and enable irq handling
  752. * Return 0 if successful, otherwise return error
  753. */
  754. static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
  755. {
  756. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  757. u32 ret = 0;
  758. if (!iwdev->msix_shared) {
  759. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  760. ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
  761. }
  762. if (ret) {
  763. i40iw_pr_err("aeq irq config fail\n");
  764. return I40IW_ERR_CONFIG;
  765. }
  766. return 0;
  767. }
  768. /**
  769. * i40iw_create_aeq - create async event queue
  770. * @iwdev: iwarp device
  771. *
  772. * Return 0, if the aeq and the resources associated with it
  773. * are successfully created, otherwise return error
  774. */
  775. static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
  776. {
  777. enum i40iw_status_code status;
  778. struct i40iw_aeq_init_info info;
  779. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  780. struct i40iw_aeq *aeq = &iwdev->aeq;
  781. u64 scratch = 0;
  782. u32 aeq_size;
  783. aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
  784. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  785. memset(&info, 0, sizeof(info));
  786. aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
  787. status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
  788. I40IW_AEQ_ALIGNMENT);
  789. if (status)
  790. goto exit;
  791. info.aeqe_base = aeq->mem.va;
  792. info.aeq_elem_pa = aeq->mem.pa;
  793. info.elem_cnt = aeq_size;
  794. info.dev = dev;
  795. status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
  796. if (status)
  797. goto exit;
  798. status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
  799. if (!status)
  800. status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
  801. exit:
  802. if (status)
  803. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  804. return status;
  805. }
  806. /**
  807. * i40iw_setup_aeq - set up the device aeq
  808. * @iwdev: iwarp device
  809. *
  810. * Create the aeq and configure its msix interrupt vector
  811. * Return 0 if successful, otherwise return error
  812. */
  813. static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
  814. {
  815. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  816. enum i40iw_status_code status;
  817. status = i40iw_create_aeq(iwdev);
  818. if (status)
  819. return status;
  820. status = i40iw_configure_aeq_vector(iwdev);
  821. if (status) {
  822. i40iw_destroy_aeq(iwdev, false);
  823. return status;
  824. }
  825. if (!iwdev->msix_shared)
  826. i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
  827. return 0;
  828. }
  829. /**
  830. * i40iw_initialize_ilq - create iwarp local queue for cm
  831. * @iwdev: iwarp device
  832. *
  833. * Return 0 if successful, otherwise return error
  834. */
  835. static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
  836. {
  837. struct i40iw_puda_rsrc_info info;
  838. enum i40iw_status_code status;
  839. info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
  840. info.cq_id = 1;
  841. info.qp_id = 0;
  842. info.count = 1;
  843. info.pd_id = 1;
  844. info.sq_size = 8192;
  845. info.rq_size = 8192;
  846. info.buf_size = 1024;
  847. info.tx_buf_cnt = 16384;
  848. info.mss = iwdev->mss;
  849. info.receive = i40iw_receive_ilq;
  850. info.xmit_complete = i40iw_free_sqbuf;
  851. status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
  852. if (status)
  853. i40iw_pr_err("ilq create fail\n");
  854. return status;
  855. }
  856. /**
  857. * i40iw_initialize_ieq - create iwarp exception queue
  858. * @iwdev: iwarp device
  859. *
  860. * Return 0 if successful, otherwise return error
  861. */
  862. static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
  863. {
  864. struct i40iw_puda_rsrc_info info;
  865. enum i40iw_status_code status;
  866. info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
  867. info.cq_id = 2;
  868. info.qp_id = iwdev->sc_dev.exception_lan_queue;
  869. info.count = 1;
  870. info.pd_id = 2;
  871. info.sq_size = 8192;
  872. info.rq_size = 8192;
  873. info.buf_size = 2048;
  874. info.mss = iwdev->mss;
  875. info.tx_buf_cnt = 16384;
  876. status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
  877. if (status)
  878. i40iw_pr_err("ieq create fail\n");
  879. return status;
  880. }
  881. /**
  882. * i40iw_hmc_setup - create hmc objects for the device
  883. * @iwdev: iwarp device
  884. *
  885. * Set up the device private memory space for the number and size of
  886. * the hmc objects and create the objects
  887. * Return 0 if successful, otherwise return error
  888. */
  889. static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
  890. {
  891. enum i40iw_status_code status;
  892. iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
  893. status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
  894. if (status)
  895. goto exit;
  896. status = i40iw_create_hmc_objs(iwdev, true);
  897. if (status)
  898. goto exit;
  899. iwdev->init_state = HMC_OBJS_CREATED;
  900. exit:
  901. return status;
  902. }
  903. /**
  904. * i40iw_del_init_mem - deallocate memory resources
  905. * @iwdev: iwarp device
  906. */
  907. static void i40iw_del_init_mem(struct i40iw_device *iwdev)
  908. {
  909. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  910. i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
  911. kfree(dev->hmc_info->sd_table.sd_entry);
  912. dev->hmc_info->sd_table.sd_entry = NULL;
  913. kfree(iwdev->mem_resources);
  914. iwdev->mem_resources = NULL;
  915. kfree(iwdev->ceqlist);
  916. iwdev->ceqlist = NULL;
  917. kfree(iwdev->iw_msixtbl);
  918. iwdev->iw_msixtbl = NULL;
  919. kfree(iwdev->hmc_info_mem);
  920. iwdev->hmc_info_mem = NULL;
  921. }
  922. /**
  923. * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
  924. * @iwdev: iwarp device
  925. * @idx: the index of the mac ip address to delete
  926. */
  927. static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
  928. {
  929. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  930. struct i40iw_cqp_request *cqp_request;
  931. struct cqp_commands_info *cqp_info;
  932. enum i40iw_status_code status = 0;
  933. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  934. if (!cqp_request) {
  935. i40iw_pr_err("cqp_request memory failed\n");
  936. return;
  937. }
  938. cqp_info = &cqp_request->info;
  939. cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
  940. cqp_info->post_sq = 1;
  941. cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  942. cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  943. cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
  944. cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
  945. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  946. if (status)
  947. i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
  948. }
  949. /**
  950. * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
  951. * @iwdev: iwarp device
  952. * @mac_addr: pointer to mac address
  953. * @idx: the index of the mac ip address to add
  954. */
  955. static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
  956. u8 *mac_addr,
  957. u8 idx)
  958. {
  959. struct i40iw_local_mac_ipaddr_entry_info *info;
  960. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  961. struct i40iw_cqp_request *cqp_request;
  962. struct cqp_commands_info *cqp_info;
  963. enum i40iw_status_code status = 0;
  964. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  965. if (!cqp_request) {
  966. i40iw_pr_err("cqp_request memory failed\n");
  967. return I40IW_ERR_NO_MEMORY;
  968. }
  969. cqp_info = &cqp_request->info;
  970. cqp_info->post_sq = 1;
  971. info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
  972. ether_addr_copy(info->mac_addr, mac_addr);
  973. info->entry_idx = idx;
  974. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  975. cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
  976. cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  977. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  978. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  979. if (status)
  980. i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
  981. return status;
  982. }
  983. /**
  984. * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
  985. * @iwdev: iwarp device
  986. * @mac_ip_tbl_idx: the index of the new mac ip address
  987. *
  988. * Allocate a mac ip address entry and update the mac_ip_tbl_idx
  989. * to hold the index of the newly created mac ip address
  990. * Return 0 if successful, otherwise return error
  991. */
  992. static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
  993. u16 *mac_ip_tbl_idx)
  994. {
  995. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  996. struct i40iw_cqp_request *cqp_request;
  997. struct cqp_commands_info *cqp_info;
  998. enum i40iw_status_code status = 0;
  999. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  1000. if (!cqp_request) {
  1001. i40iw_pr_err("cqp_request memory failed\n");
  1002. return I40IW_ERR_NO_MEMORY;
  1003. }
  1004. /* increment refcount, because we need the cqp request ret value */
  1005. atomic_inc(&cqp_request->refcount);
  1006. cqp_info = &cqp_request->info;
  1007. cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
  1008. cqp_info->post_sq = 1;
  1009. cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  1010. cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  1011. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  1012. if (!status)
  1013. *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
  1014. else
  1015. i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
  1016. /* decrement refcount and free the cqp request, if no longer used */
  1017. i40iw_put_cqp_request(iwcqp, cqp_request);
  1018. return status;
  1019. }
  1020. /**
  1021. * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
  1022. * @iwdev: iwarp device
  1023. * @macaddr: pointer to mac address
  1024. *
  1025. * Allocate a mac ip address entry and add it to the hw table
  1026. * Return 0 if successful, otherwise return error
  1027. */
  1028. static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
  1029. u8 *macaddr)
  1030. {
  1031. enum i40iw_status_code status;
  1032. status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
  1033. if (!status) {
  1034. status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
  1035. (u8)iwdev->mac_ip_table_idx);
  1036. if (status)
  1037. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1038. }
  1039. return status;
  1040. }
  1041. /**
  1042. * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
  1043. * @iwdev: iwarp device
  1044. */
  1045. static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
  1046. {
  1047. struct net_device *ip_dev;
  1048. struct inet6_dev *idev;
  1049. struct inet6_ifaddr *ifp;
  1050. u32 local_ipaddr6[4];
  1051. rcu_read_lock();
  1052. for_each_netdev_rcu(&init_net, ip_dev) {
  1053. if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
  1054. (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
  1055. (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
  1056. idev = __in6_dev_get(ip_dev);
  1057. if (!idev) {
  1058. i40iw_pr_err("ipv6 inet device not found\n");
  1059. break;
  1060. }
  1061. list_for_each_entry(ifp, &idev->addr_list, if_list) {
  1062. i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
  1063. rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
  1064. i40iw_copy_ip_ntohl(local_ipaddr6,
  1065. ifp->addr.in6_u.u6_addr32);
  1066. i40iw_manage_arp_cache(iwdev,
  1067. ip_dev->dev_addr,
  1068. local_ipaddr6,
  1069. false,
  1070. I40IW_ARP_ADD);
  1071. }
  1072. }
  1073. }
  1074. rcu_read_unlock();
  1075. }
  1076. /**
  1077. * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
  1078. * @iwdev: iwarp device
  1079. */
  1080. static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
  1081. {
  1082. struct net_device *dev;
  1083. struct in_device *idev;
  1084. bool got_lock = true;
  1085. u32 ip_addr;
  1086. if (!rtnl_trylock())
  1087. got_lock = false;
  1088. for_each_netdev(&init_net, dev) {
  1089. if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
  1090. (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
  1091. (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
  1092. idev = in_dev_get(dev);
  1093. for_ifa(idev) {
  1094. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  1095. "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
  1096. rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
  1097. ip_addr = ntohl(ifa->ifa_address);
  1098. i40iw_manage_arp_cache(iwdev,
  1099. dev->dev_addr,
  1100. &ip_addr,
  1101. true,
  1102. I40IW_ARP_ADD);
  1103. }
  1104. endfor_ifa(idev);
  1105. in_dev_put(idev);
  1106. }
  1107. }
  1108. if (got_lock)
  1109. rtnl_unlock();
  1110. }
  1111. /**
  1112. * i40iw_add_mac_ip - add mac and ip addresses
  1113. * @iwdev: iwarp device
  1114. *
  1115. * Create and add a mac ip address entry to the hw table and
  1116. * ipv4/ipv6 addresses to the arp cache
  1117. * Return 0 if successful, otherwise return error
  1118. */
  1119. static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
  1120. {
  1121. struct net_device *netdev = iwdev->netdev;
  1122. enum i40iw_status_code status;
  1123. status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
  1124. if (status)
  1125. return status;
  1126. i40iw_add_ipv4_addr(iwdev);
  1127. i40iw_add_ipv6_addr(iwdev);
  1128. return 0;
  1129. }
  1130. /**
  1131. * i40iw_wait_pe_ready - Check if firmware is ready
  1132. * @hw: provides access to registers
  1133. */
  1134. static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
  1135. {
  1136. u32 statusfw;
  1137. u32 statuscpu0;
  1138. u32 statuscpu1;
  1139. u32 statuscpu2;
  1140. u32 retrycount = 0;
  1141. do {
  1142. statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
  1143. i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
  1144. statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
  1145. i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
  1146. statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
  1147. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
  1148. __LINE__, statuscpu1);
  1149. statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
  1150. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
  1151. __LINE__, statuscpu2);
  1152. if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
  1153. break; /* SUCCESS */
  1154. mdelay(1000);
  1155. retrycount++;
  1156. } while (retrycount < 14);
  1157. i40iw_wr32(hw, 0xb4040, 0x4C104C5);
  1158. }
  1159. /**
  1160. * i40iw_initialize_dev - initialize device
  1161. * @iwdev: iwarp device
  1162. * @ldev: lan device information
  1163. *
  1164. * Allocate memory for the hmc objects and initialize iwdev
  1165. * Return 0 if successful, otherwise clean up the resources
  1166. * and return error
  1167. */
  1168. static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
  1169. struct i40e_info *ldev)
  1170. {
  1171. enum i40iw_status_code status;
  1172. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1173. struct i40iw_device_init_info info;
  1174. struct i40iw_dma_mem mem;
  1175. u32 size;
  1176. memset(&info, 0, sizeof(info));
  1177. size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
  1178. (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
  1179. iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
  1180. if (!iwdev->hmc_info_mem) {
  1181. i40iw_pr_err("memory alloc fail\n");
  1182. return I40IW_ERR_NO_MEMORY;
  1183. }
  1184. iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
  1185. dev->hmc_info = &iwdev->hw.hmc;
  1186. dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
  1187. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
  1188. I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
  1189. if (status)
  1190. goto exit;
  1191. info.fpm_query_buf_pa = mem.pa;
  1192. info.fpm_query_buf = mem.va;
  1193. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
  1194. I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
  1195. if (status)
  1196. goto exit;
  1197. info.fpm_commit_buf_pa = mem.pa;
  1198. info.fpm_commit_buf = mem.va;
  1199. info.hmc_fn_id = ldev->fid;
  1200. info.is_pf = (ldev->ftype) ? false : true;
  1201. info.bar0 = ldev->hw_addr;
  1202. info.hw = &iwdev->hw;
  1203. info.debug_mask = debug;
  1204. info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle;
  1205. info.exception_lan_queue = 1;
  1206. info.vchnl_send = i40iw_virtchnl_send;
  1207. status = i40iw_device_init(&iwdev->sc_dev, &info);
  1208. exit:
  1209. if (status) {
  1210. kfree(iwdev->hmc_info_mem);
  1211. iwdev->hmc_info_mem = NULL;
  1212. }
  1213. return status;
  1214. }
  1215. /**
  1216. * i40iw_register_notifiers - register tcp ip notifiers
  1217. */
  1218. static void i40iw_register_notifiers(void)
  1219. {
  1220. if (!i40iw_notifiers_registered) {
  1221. register_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1222. register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1223. register_netevent_notifier(&i40iw_net_notifier);
  1224. }
  1225. i40iw_notifiers_registered++;
  1226. }
  1227. /**
  1228. * i40iw_save_msix_info - copy msix vector information to iwarp device
  1229. * @iwdev: iwarp device
  1230. * @ldev: lan device information
  1231. *
  1232. * Allocate iwdev msix table and copy the ldev msix info to the table
  1233. * Return 0 if successful, otherwise return error
  1234. */
  1235. static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
  1236. struct i40e_info *ldev)
  1237. {
  1238. struct i40e_qvlist_info *iw_qvlist;
  1239. struct i40e_qv_info *iw_qvinfo;
  1240. u32 ceq_idx;
  1241. u32 i;
  1242. u32 size;
  1243. iwdev->msix_count = ldev->msix_count;
  1244. size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
  1245. size += sizeof(struct i40e_qvlist_info);
  1246. size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
  1247. iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
  1248. if (!iwdev->iw_msixtbl)
  1249. return I40IW_ERR_NO_MEMORY;
  1250. iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
  1251. iw_qvlist = iwdev->iw_qvlist;
  1252. iw_qvinfo = iw_qvlist->qv_info;
  1253. iw_qvlist->num_vectors = iwdev->msix_count;
  1254. if (iwdev->msix_count <= num_online_cpus())
  1255. iwdev->msix_shared = true;
  1256. for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
  1257. iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
  1258. iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
  1259. if (i == 0) {
  1260. iw_qvinfo->aeq_idx = 0;
  1261. if (iwdev->msix_shared)
  1262. iw_qvinfo->ceq_idx = ceq_idx++;
  1263. else
  1264. iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
  1265. } else {
  1266. iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
  1267. iw_qvinfo->ceq_idx = ceq_idx++;
  1268. }
  1269. iw_qvinfo->itr_idx = 3;
  1270. iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
  1271. }
  1272. return 0;
  1273. }
  1274. /**
  1275. * i40iw_deinit_device - clean up the device resources
  1276. * @iwdev: iwarp device
  1277. * @reset: true if called before reset
  1278. * @del_hdl: true if delete hdl entry
  1279. *
  1280. * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
  1281. * destroy the device queues and free the pble and the hmc objects
  1282. */
  1283. static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl)
  1284. {
  1285. struct i40e_info *ldev = iwdev->ldev;
  1286. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1287. i40iw_pr_info("state = %d\n", iwdev->init_state);
  1288. switch (iwdev->init_state) {
  1289. case RDMA_DEV_REGISTERED:
  1290. iwdev->iw_status = 0;
  1291. i40iw_port_ibevent(iwdev);
  1292. i40iw_destroy_rdma_device(iwdev->iwibdev);
  1293. /* fallthrough */
  1294. case IP_ADDR_REGISTERED:
  1295. if (!reset)
  1296. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1297. /* fallthrough */
  1298. case INET_NOTIFIER:
  1299. if (i40iw_notifiers_registered > 0) {
  1300. i40iw_notifiers_registered--;
  1301. unregister_netevent_notifier(&i40iw_net_notifier);
  1302. unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1303. unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1304. }
  1305. /* fallthrough */
  1306. case CEQ_CREATED:
  1307. i40iw_dele_ceqs(iwdev, reset);
  1308. /* fallthrough */
  1309. case AEQ_CREATED:
  1310. i40iw_destroy_aeq(iwdev, reset);
  1311. /* fallthrough */
  1312. case IEQ_CREATED:
  1313. i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
  1314. /* fallthrough */
  1315. case ILQ_CREATED:
  1316. i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
  1317. /* fallthrough */
  1318. case CCQ_CREATED:
  1319. i40iw_destroy_ccq(iwdev, reset);
  1320. /* fallthrough */
  1321. case PBLE_CHUNK_MEM:
  1322. i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
  1323. /* fallthrough */
  1324. case HMC_OBJS_CREATED:
  1325. i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
  1326. /* fallthrough */
  1327. case CQP_CREATED:
  1328. i40iw_destroy_cqp(iwdev, !reset);
  1329. /* fallthrough */
  1330. case INITIAL_STATE:
  1331. i40iw_cleanup_cm_core(&iwdev->cm_core);
  1332. if (dev->is_pf)
  1333. i40iw_hw_stats_del_timer(dev);
  1334. i40iw_del_init_mem(iwdev);
  1335. break;
  1336. case INVALID_STATE:
  1337. /* fallthrough */
  1338. default:
  1339. i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
  1340. break;
  1341. }
  1342. if (del_hdl)
  1343. i40iw_del_handler(i40iw_find_i40e_handler(ldev));
  1344. kfree(iwdev->hdl);
  1345. }
  1346. /**
  1347. * i40iw_setup_init_state - set up the initial device struct
  1348. * @hdl: handler for iwarp device - one per instance
  1349. * @ldev: lan device information
  1350. * @client: iwarp client information, provided during registration
  1351. *
  1352. * Initialize the iwarp device and its hdl information
  1353. * using the ldev and client information
  1354. * Return 0 if successful, otherwise return error
  1355. */
  1356. static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
  1357. struct i40e_info *ldev,
  1358. struct i40e_client *client)
  1359. {
  1360. struct i40iw_device *iwdev = &hdl->device;
  1361. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1362. enum i40iw_status_code status;
  1363. memcpy(&hdl->ldev, ldev, sizeof(*ldev));
  1364. if (resource_profile == 1)
  1365. resource_profile = 2;
  1366. iwdev->mpa_version = mpa_version;
  1367. iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
  1368. (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
  1369. I40IW_HMC_PROFILE_DEFAULT;
  1370. iwdev->max_rdma_vfs =
  1371. (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
  1372. iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
  1373. iwdev->netdev = ldev->netdev;
  1374. hdl->client = client;
  1375. iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
  1376. if (!ldev->ftype)
  1377. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
  1378. else
  1379. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
  1380. status = i40iw_save_msix_info(iwdev, ldev);
  1381. if (status)
  1382. goto exit;
  1383. iwdev->hw.dev_context = (void *)ldev->pcidev;
  1384. iwdev->hw.hw_addr = ldev->hw_addr;
  1385. status = i40iw_allocate_dma_mem(&iwdev->hw,
  1386. &iwdev->obj_mem, 8192, 4096);
  1387. if (status)
  1388. goto exit;
  1389. iwdev->obj_next = iwdev->obj_mem;
  1390. iwdev->push_mode = push_mode;
  1391. init_waitqueue_head(&iwdev->vchnl_waitq);
  1392. init_waitqueue_head(&dev->vf_reqs);
  1393. status = i40iw_initialize_dev(iwdev, ldev);
  1394. exit:
  1395. if (status) {
  1396. kfree(iwdev->iw_msixtbl);
  1397. i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
  1398. iwdev->iw_msixtbl = NULL;
  1399. }
  1400. return status;
  1401. }
  1402. /**
  1403. * i40iw_open - client interface operation open for iwarp/uda device
  1404. * @ldev: lan device information
  1405. * @client: iwarp client information, provided during registration
  1406. *
  1407. * Called by the lan driver during the processing of client register
  1408. * Create device resources, set up queues, pble and hmc objects and
  1409. * register the device with the ib verbs interface
  1410. * Return 0 if successful, otherwise return error
  1411. */
  1412. static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
  1413. {
  1414. struct i40iw_device *iwdev;
  1415. struct i40iw_sc_dev *dev;
  1416. enum i40iw_status_code status;
  1417. struct i40iw_handler *hdl;
  1418. hdl = i40iw_find_netdev(ldev->netdev);
  1419. if (hdl)
  1420. return 0;
  1421. hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
  1422. if (!hdl)
  1423. return -ENOMEM;
  1424. iwdev = &hdl->device;
  1425. iwdev->hdl = hdl;
  1426. dev = &iwdev->sc_dev;
  1427. i40iw_setup_cm_core(iwdev);
  1428. dev->back_dev = (void *)iwdev;
  1429. iwdev->ldev = &hdl->ldev;
  1430. iwdev->client = client;
  1431. mutex_init(&iwdev->pbl_mutex);
  1432. i40iw_add_handler(hdl);
  1433. do {
  1434. status = i40iw_setup_init_state(hdl, ldev, client);
  1435. if (status)
  1436. break;
  1437. iwdev->init_state = INITIAL_STATE;
  1438. if (dev->is_pf)
  1439. i40iw_wait_pe_ready(dev->hw);
  1440. status = i40iw_create_cqp(iwdev);
  1441. if (status)
  1442. break;
  1443. iwdev->init_state = CQP_CREATED;
  1444. status = i40iw_hmc_setup(iwdev);
  1445. if (status)
  1446. break;
  1447. status = i40iw_create_ccq(iwdev);
  1448. if (status)
  1449. break;
  1450. iwdev->init_state = CCQ_CREATED;
  1451. status = i40iw_initialize_ilq(iwdev);
  1452. if (status)
  1453. break;
  1454. iwdev->init_state = ILQ_CREATED;
  1455. status = i40iw_initialize_ieq(iwdev);
  1456. if (status)
  1457. break;
  1458. iwdev->init_state = IEQ_CREATED;
  1459. status = i40iw_setup_aeq(iwdev);
  1460. if (status)
  1461. break;
  1462. iwdev->init_state = AEQ_CREATED;
  1463. status = i40iw_setup_ceqs(iwdev, ldev);
  1464. if (status)
  1465. break;
  1466. iwdev->init_state = CEQ_CREATED;
  1467. status = i40iw_initialize_hw_resources(iwdev);
  1468. if (status)
  1469. break;
  1470. dev->ccq_ops->ccq_arm(dev->ccq);
  1471. status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
  1472. if (status)
  1473. break;
  1474. iwdev->virtchnl_wq = create_singlethread_workqueue("iwvch");
  1475. i40iw_register_notifiers();
  1476. iwdev->init_state = INET_NOTIFIER;
  1477. status = i40iw_add_mac_ip(iwdev);
  1478. if (status)
  1479. break;
  1480. iwdev->init_state = IP_ADDR_REGISTERED;
  1481. if (i40iw_register_rdma_device(iwdev)) {
  1482. i40iw_pr_err("register rdma device fail\n");
  1483. break;
  1484. };
  1485. iwdev->init_state = RDMA_DEV_REGISTERED;
  1486. iwdev->iw_status = 1;
  1487. i40iw_port_ibevent(iwdev);
  1488. i40iw_pr_info("i40iw_open completed\n");
  1489. return 0;
  1490. } while (0);
  1491. i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
  1492. i40iw_deinit_device(iwdev, false, false);
  1493. return -ERESTART;
  1494. }
  1495. /**
  1496. * i40iw_l2param_change : handle qs handles for qos and mss change
  1497. * @ldev: lan device information
  1498. * @client: client for paramater change
  1499. * @params: new parameters from L2
  1500. */
  1501. static void i40iw_l2param_change(struct i40e_info *ldev,
  1502. struct i40e_client *client,
  1503. struct i40e_params *params)
  1504. {
  1505. struct i40iw_handler *hdl;
  1506. struct i40iw_device *iwdev;
  1507. hdl = i40iw_find_i40e_handler(ldev);
  1508. if (!hdl)
  1509. return;
  1510. iwdev = &hdl->device;
  1511. if (params->mtu)
  1512. iwdev->mss = params->mtu - I40IW_MTU_TO_MSS;
  1513. }
  1514. /**
  1515. * i40iw_close - client interface operation close for iwarp/uda device
  1516. * @ldev: lan device information
  1517. * @client: client to close
  1518. *
  1519. * Called by the lan driver during the processing of client unregister
  1520. * Destroy and clean up the driver resources
  1521. */
  1522. static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
  1523. {
  1524. struct i40iw_device *iwdev;
  1525. struct i40iw_handler *hdl;
  1526. hdl = i40iw_find_i40e_handler(ldev);
  1527. if (!hdl)
  1528. return;
  1529. iwdev = &hdl->device;
  1530. destroy_workqueue(iwdev->virtchnl_wq);
  1531. i40iw_deinit_device(iwdev, reset, true);
  1532. }
  1533. /**
  1534. * i40iw_vf_reset - process VF reset
  1535. * @ldev: lan device information
  1536. * @client: client interface instance
  1537. * @vf_id: virtual function id
  1538. *
  1539. * Called when a VF is reset by the PF
  1540. * Destroy and clean up the VF resources
  1541. */
  1542. static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
  1543. {
  1544. struct i40iw_handler *hdl;
  1545. struct i40iw_sc_dev *dev;
  1546. struct i40iw_hmc_fcn_info hmc_fcn_info;
  1547. struct i40iw_virt_mem vf_dev_mem;
  1548. struct i40iw_vfdev *tmp_vfdev;
  1549. unsigned int i;
  1550. unsigned long flags;
  1551. hdl = i40iw_find_i40e_handler(ldev);
  1552. if (!hdl)
  1553. return;
  1554. dev = &hdl->device.sc_dev;
  1555. for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
  1556. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
  1557. continue;
  1558. /* free all resources allocated on behalf of vf */
  1559. tmp_vfdev = dev->vf_dev[i];
  1560. spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
  1561. dev->vf_dev[i] = NULL;
  1562. spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
  1563. i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
  1564. /* remove vf hmc function */
  1565. memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
  1566. hmc_fcn_info.vf_id = vf_id;
  1567. hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
  1568. hmc_fcn_info.free_fcn = true;
  1569. i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
  1570. /* free vf_dev */
  1571. vf_dev_mem.va = tmp_vfdev;
  1572. vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
  1573. sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
  1574. i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
  1575. break;
  1576. }
  1577. }
  1578. /**
  1579. * i40iw_vf_enable - enable a number of VFs
  1580. * @ldev: lan device information
  1581. * @client: client interface instance
  1582. * @num_vfs: number of VFs for the PF
  1583. *
  1584. * Called when the number of VFs changes
  1585. */
  1586. static void i40iw_vf_enable(struct i40e_info *ldev,
  1587. struct i40e_client *client,
  1588. u32 num_vfs)
  1589. {
  1590. struct i40iw_handler *hdl;
  1591. hdl = i40iw_find_i40e_handler(ldev);
  1592. if (!hdl)
  1593. return;
  1594. if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
  1595. hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
  1596. else
  1597. hdl->device.max_enabled_vfs = num_vfs;
  1598. }
  1599. /**
  1600. * i40iw_vf_capable - check if VF capable
  1601. * @ldev: lan device information
  1602. * @client: client interface instance
  1603. * @vf_id: virtual function id
  1604. *
  1605. * Return 1 if a VF slot is available or if VF is already RDMA enabled
  1606. * Return 0 otherwise
  1607. */
  1608. static int i40iw_vf_capable(struct i40e_info *ldev,
  1609. struct i40e_client *client,
  1610. u32 vf_id)
  1611. {
  1612. struct i40iw_handler *hdl;
  1613. struct i40iw_sc_dev *dev;
  1614. unsigned int i;
  1615. hdl = i40iw_find_i40e_handler(ldev);
  1616. if (!hdl)
  1617. return 0;
  1618. dev = &hdl->device.sc_dev;
  1619. for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
  1620. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
  1621. return 1;
  1622. }
  1623. return 0;
  1624. }
  1625. /**
  1626. * i40iw_virtchnl_receive - receive a message through the virtual channel
  1627. * @ldev: lan device information
  1628. * @client: client interface instance
  1629. * @vf_id: virtual function id associated with the message
  1630. * @msg: message buffer pointer
  1631. * @len: length of the message
  1632. *
  1633. * Invoke virtual channel receive operation for the given msg
  1634. * Return 0 if successful, otherwise return error
  1635. */
  1636. static int i40iw_virtchnl_receive(struct i40e_info *ldev,
  1637. struct i40e_client *client,
  1638. u32 vf_id,
  1639. u8 *msg,
  1640. u16 len)
  1641. {
  1642. struct i40iw_handler *hdl;
  1643. struct i40iw_sc_dev *dev;
  1644. struct i40iw_device *iwdev;
  1645. int ret_code = I40IW_NOT_SUPPORTED;
  1646. if (!len || !msg)
  1647. return I40IW_ERR_PARAM;
  1648. hdl = i40iw_find_i40e_handler(ldev);
  1649. if (!hdl)
  1650. return I40IW_ERR_PARAM;
  1651. dev = &hdl->device.sc_dev;
  1652. iwdev = dev->back_dev;
  1653. if (dev->vchnl_if.vchnl_recv) {
  1654. ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
  1655. if (!dev->is_pf) {
  1656. atomic_dec(&iwdev->vchnl_msgs);
  1657. wake_up(&iwdev->vchnl_waitq);
  1658. }
  1659. }
  1660. return ret_code;
  1661. }
  1662. /**
  1663. * i40iw_vf_clear_to_send - wait to send virtual channel message
  1664. * @dev: iwarp device *
  1665. * Wait for until virtual channel is clear
  1666. * before sending the next message
  1667. *
  1668. * Returns false if error
  1669. * Returns true if clear to send
  1670. */
  1671. bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
  1672. {
  1673. struct i40iw_device *iwdev;
  1674. wait_queue_t wait;
  1675. iwdev = dev->back_dev;
  1676. if (!wq_has_sleeper(&dev->vf_reqs) &&
  1677. (atomic_read(&iwdev->vchnl_msgs) == 0))
  1678. return true; /* virtual channel is clear */
  1679. init_wait(&wait);
  1680. add_wait_queue_exclusive(&dev->vf_reqs, &wait);
  1681. if (!wait_event_timeout(dev->vf_reqs,
  1682. (atomic_read(&iwdev->vchnl_msgs) == 0),
  1683. I40IW_VCHNL_EVENT_TIMEOUT))
  1684. dev->vchnl_up = false;
  1685. remove_wait_queue(&dev->vf_reqs, &wait);
  1686. return dev->vchnl_up;
  1687. }
  1688. /**
  1689. * i40iw_virtchnl_send - send a message through the virtual channel
  1690. * @dev: iwarp device
  1691. * @vf_id: virtual function id associated with the message
  1692. * @msg: virtual channel message buffer pointer
  1693. * @len: length of the message
  1694. *
  1695. * Invoke virtual channel send operation for the given msg
  1696. * Return 0 if successful, otherwise return error
  1697. */
  1698. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  1699. u32 vf_id,
  1700. u8 *msg,
  1701. u16 len)
  1702. {
  1703. struct i40iw_device *iwdev;
  1704. struct i40e_info *ldev;
  1705. if (!dev || !dev->back_dev)
  1706. return I40IW_ERR_BAD_PTR;
  1707. iwdev = dev->back_dev;
  1708. ldev = iwdev->ldev;
  1709. if (ldev && ldev->ops && ldev->ops->virtchnl_send)
  1710. return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
  1711. return I40IW_ERR_BAD_PTR;
  1712. }
  1713. /* client interface functions */
  1714. static const struct i40e_client_ops i40e_ops = {
  1715. .open = i40iw_open,
  1716. .close = i40iw_close,
  1717. .l2_param_change = i40iw_l2param_change,
  1718. .virtchnl_receive = i40iw_virtchnl_receive,
  1719. .vf_reset = i40iw_vf_reset,
  1720. .vf_enable = i40iw_vf_enable,
  1721. .vf_capable = i40iw_vf_capable
  1722. };
  1723. /**
  1724. * i40iw_init_module - driver initialization function
  1725. *
  1726. * First function to call when the driver is loaded
  1727. * Register the driver as i40e client and port mapper client
  1728. */
  1729. static int __init i40iw_init_module(void)
  1730. {
  1731. int ret;
  1732. memset(&i40iw_client, 0, sizeof(i40iw_client));
  1733. i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
  1734. i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
  1735. i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
  1736. i40iw_client.ops = &i40e_ops;
  1737. memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
  1738. i40iw_client.type = I40E_CLIENT_IWARP;
  1739. spin_lock_init(&i40iw_handler_lock);
  1740. ret = i40e_register_client(&i40iw_client);
  1741. return ret;
  1742. }
  1743. /**
  1744. * i40iw_exit_module - driver exit clean up function
  1745. *
  1746. * The function is called just before the driver is unloaded
  1747. * Unregister the driver as i40e client and port mapper client
  1748. */
  1749. static void __exit i40iw_exit_module(void)
  1750. {
  1751. i40e_unregister_client(&i40iw_client);
  1752. }
  1753. module_init(i40iw_init_module);
  1754. module_exit(i40iw_exit_module);