i40iw_main.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/ip.h>
  39. #include <linux/tcp.h>
  40. #include <linux/if_vlan.h>
  41. #include <net/addrconf.h>
  42. #include "i40iw.h"
  43. #include "i40iw_register.h"
  44. #include <net/netevent.h>
  45. #define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
  46. #define CLIENT_IW_INTERFACE_VERSION_MINOR 01
  47. #define CLIENT_IW_INTERFACE_VERSION_BUILD 00
  48. #define DRV_VERSION_MAJOR 0
  49. #define DRV_VERSION_MINOR 5
  50. #define DRV_VERSION_BUILD 123
  51. #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
  52. __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
  53. static int push_mode;
  54. module_param(push_mode, int, 0644);
  55. MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
  56. static int debug;
  57. module_param(debug, int, 0644);
  58. MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
  59. static int resource_profile;
  60. module_param(resource_profile, int, 0644);
  61. MODULE_PARM_DESC(resource_profile,
  62. "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
  63. static int max_rdma_vfs = 32;
  64. module_param(max_rdma_vfs, int, 0644);
  65. MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
  66. static int mpa_version = 2;
  67. module_param(mpa_version, int, 0644);
  68. MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
  69. MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
  70. MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
  71. MODULE_LICENSE("Dual BSD/GPL");
  72. static struct i40e_client i40iw_client;
  73. static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
  74. static LIST_HEAD(i40iw_handlers);
  75. static spinlock_t i40iw_handler_lock;
  76. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  77. u32 vf_id, u8 *msg, u16 len);
  78. static struct notifier_block i40iw_inetaddr_notifier = {
  79. .notifier_call = i40iw_inetaddr_event
  80. };
  81. static struct notifier_block i40iw_inetaddr6_notifier = {
  82. .notifier_call = i40iw_inet6addr_event
  83. };
  84. static struct notifier_block i40iw_net_notifier = {
  85. .notifier_call = i40iw_net_event
  86. };
  87. static struct notifier_block i40iw_netdevice_notifier = {
  88. .notifier_call = i40iw_netdevice_event
  89. };
  90. /**
  91. * i40iw_find_i40e_handler - find a handler given a client info
  92. * @ldev: pointer to a client info
  93. */
  94. static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
  95. {
  96. struct i40iw_handler *hdl;
  97. unsigned long flags;
  98. spin_lock_irqsave(&i40iw_handler_lock, flags);
  99. list_for_each_entry(hdl, &i40iw_handlers, list) {
  100. if (hdl->ldev.netdev == ldev->netdev) {
  101. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  102. return hdl;
  103. }
  104. }
  105. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  106. return NULL;
  107. }
  108. /**
  109. * i40iw_find_netdev - find a handler given a netdev
  110. * @netdev: pointer to net_device
  111. */
  112. struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
  113. {
  114. struct i40iw_handler *hdl;
  115. unsigned long flags;
  116. spin_lock_irqsave(&i40iw_handler_lock, flags);
  117. list_for_each_entry(hdl, &i40iw_handlers, list) {
  118. if (hdl->ldev.netdev == netdev) {
  119. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  120. return hdl;
  121. }
  122. }
  123. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  124. return NULL;
  125. }
  126. /**
  127. * i40iw_add_handler - add a handler to the list
  128. * @hdl: handler to be added to the handler list
  129. */
  130. static void i40iw_add_handler(struct i40iw_handler *hdl)
  131. {
  132. unsigned long flags;
  133. spin_lock_irqsave(&i40iw_handler_lock, flags);
  134. list_add(&hdl->list, &i40iw_handlers);
  135. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  136. }
  137. /**
  138. * i40iw_del_handler - delete a handler from the list
  139. * @hdl: handler to be deleted from the handler list
  140. */
  141. static int i40iw_del_handler(struct i40iw_handler *hdl)
  142. {
  143. unsigned long flags;
  144. spin_lock_irqsave(&i40iw_handler_lock, flags);
  145. list_del(&hdl->list);
  146. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  147. return 0;
  148. }
  149. /**
  150. * i40iw_enable_intr - set up device interrupts
  151. * @dev: hardware control device structure
  152. * @msix_id: id of the interrupt to be enabled
  153. */
  154. static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
  155. {
  156. u32 val;
  157. val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
  158. I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
  159. (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
  160. if (dev->is_pf)
  161. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
  162. else
  163. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
  164. }
  165. /**
  166. * i40iw_dpc - tasklet for aeq and ceq 0
  167. * @data: iwarp device
  168. */
  169. static void i40iw_dpc(unsigned long data)
  170. {
  171. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  172. if (iwdev->msix_shared)
  173. i40iw_process_ceq(iwdev, iwdev->ceqlist);
  174. i40iw_process_aeq(iwdev);
  175. i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
  176. }
  177. /**
  178. * i40iw_ceq_dpc - dpc handler for CEQ
  179. * @data: data points to CEQ
  180. */
  181. static void i40iw_ceq_dpc(unsigned long data)
  182. {
  183. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  184. struct i40iw_device *iwdev = iwceq->iwdev;
  185. i40iw_process_ceq(iwdev, iwceq);
  186. i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
  187. }
  188. /**
  189. * i40iw_irq_handler - interrupt handler for aeq and ceq0
  190. * @irq: Interrupt request number
  191. * @data: iwarp device
  192. */
  193. static irqreturn_t i40iw_irq_handler(int irq, void *data)
  194. {
  195. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  196. tasklet_schedule(&iwdev->dpc_tasklet);
  197. return IRQ_HANDLED;
  198. }
  199. /**
  200. * i40iw_destroy_cqp - destroy control qp
  201. * @iwdev: iwarp device
  202. * @create_done: 1 if cqp create poll was success
  203. *
  204. * Issue destroy cqp request and
  205. * free the resources associated with the cqp
  206. */
  207. static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
  208. {
  209. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  210. struct i40iw_cqp *cqp = &iwdev->cqp;
  211. if (free_hwcqp)
  212. dev->cqp_ops->cqp_destroy(dev->cqp);
  213. i40iw_cleanup_pending_cqp_op(iwdev);
  214. i40iw_free_dma_mem(dev->hw, &cqp->sq);
  215. kfree(cqp->scratch_array);
  216. iwdev->cqp.scratch_array = NULL;
  217. kfree(cqp->cqp_requests);
  218. cqp->cqp_requests = NULL;
  219. }
  220. /**
  221. * i40iw_disable_irqs - disable device interrupts
  222. * @dev: hardware control device structure
  223. * @msic_vec: msix vector to disable irq
  224. * @dev_id: parameter to pass to free_irq (used during irq setup)
  225. *
  226. * The function is called when destroying aeq/ceq
  227. */
  228. static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
  229. struct i40iw_msix_vector *msix_vec,
  230. void *dev_id)
  231. {
  232. if (dev->is_pf)
  233. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
  234. else
  235. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
  236. irq_set_affinity_hint(msix_vec->irq, NULL);
  237. free_irq(msix_vec->irq, dev_id);
  238. }
  239. /**
  240. * i40iw_destroy_aeq - destroy aeq
  241. * @iwdev: iwarp device
  242. *
  243. * Issue a destroy aeq request and
  244. * free the resources associated with the aeq
  245. * The function is called during driver unload
  246. */
  247. static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
  248. {
  249. enum i40iw_status_code status = I40IW_ERR_NOT_READY;
  250. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  251. struct i40iw_aeq *aeq = &iwdev->aeq;
  252. if (!iwdev->msix_shared)
  253. i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
  254. if (iwdev->reset)
  255. goto exit;
  256. if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
  257. status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
  258. if (status)
  259. i40iw_pr_err("destroy aeq failed %d\n", status);
  260. exit:
  261. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  262. }
  263. /**
  264. * i40iw_destroy_ceq - destroy ceq
  265. * @iwdev: iwarp device
  266. * @iwceq: ceq to be destroyed
  267. *
  268. * Issue a destroy ceq request and
  269. * free the resources associated with the ceq
  270. */
  271. static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
  272. struct i40iw_ceq *iwceq)
  273. {
  274. enum i40iw_status_code status;
  275. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  276. if (iwdev->reset)
  277. goto exit;
  278. status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
  279. if (status) {
  280. i40iw_pr_err("ceq destroy command failed %d\n", status);
  281. goto exit;
  282. }
  283. status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
  284. if (status)
  285. i40iw_pr_err("ceq destroy completion failed %d\n", status);
  286. exit:
  287. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  288. }
  289. /**
  290. * i40iw_dele_ceqs - destroy all ceq's
  291. * @iwdev: iwarp device
  292. *
  293. * Go through all of the device ceq's and for each ceq
  294. * disable the ceq interrupt and destroy the ceq
  295. */
  296. static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
  297. {
  298. u32 i = 0;
  299. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  300. struct i40iw_ceq *iwceq = iwdev->ceqlist;
  301. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  302. if (iwdev->msix_shared) {
  303. i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
  304. i40iw_destroy_ceq(iwdev, iwceq);
  305. iwceq++;
  306. i++;
  307. }
  308. for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
  309. i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
  310. i40iw_destroy_ceq(iwdev, iwceq);
  311. }
  312. iwdev->sc_dev.ceq_valid = false;
  313. }
  314. /**
  315. * i40iw_destroy_ccq - destroy control cq
  316. * @iwdev: iwarp device
  317. *
  318. * Issue destroy ccq request and
  319. * free the resources associated with the ccq
  320. */
  321. static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
  322. {
  323. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  324. struct i40iw_ccq *ccq = &iwdev->ccq;
  325. enum i40iw_status_code status = 0;
  326. if (!iwdev->reset)
  327. status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
  328. if (status)
  329. i40iw_pr_err("ccq destroy failed %d\n", status);
  330. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  331. }
  332. /* types of hmc objects */
  333. static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
  334. I40IW_HMC_IW_QP,
  335. I40IW_HMC_IW_CQ,
  336. I40IW_HMC_IW_HTE,
  337. I40IW_HMC_IW_ARP,
  338. I40IW_HMC_IW_APBVT_ENTRY,
  339. I40IW_HMC_IW_MR,
  340. I40IW_HMC_IW_XF,
  341. I40IW_HMC_IW_XFFL,
  342. I40IW_HMC_IW_Q1,
  343. I40IW_HMC_IW_Q1FL,
  344. I40IW_HMC_IW_TIMER,
  345. };
  346. /**
  347. * i40iw_close_hmc_objects_type - delete hmc objects of a given type
  348. * @iwdev: iwarp device
  349. * @obj_type: the hmc object type to be deleted
  350. * @is_pf: true if the function is PF otherwise false
  351. * @reset: true if called before reset
  352. */
  353. static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
  354. enum i40iw_hmc_rsrc_type obj_type,
  355. struct i40iw_hmc_info *hmc_info,
  356. bool is_pf,
  357. bool reset)
  358. {
  359. struct i40iw_hmc_del_obj_info info;
  360. memset(&info, 0, sizeof(info));
  361. info.hmc_info = hmc_info;
  362. info.rsrc_type = obj_type;
  363. info.count = hmc_info->hmc_obj[obj_type].cnt;
  364. info.is_pf = is_pf;
  365. if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
  366. i40iw_pr_err("del obj of type %d failed\n", obj_type);
  367. }
  368. /**
  369. * i40iw_del_hmc_objects - remove all device hmc objects
  370. * @dev: iwarp device
  371. * @hmc_info: hmc_info to free
  372. * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
  373. * by PF on behalf of VF
  374. * @reset: true if called before reset
  375. */
  376. static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
  377. struct i40iw_hmc_info *hmc_info,
  378. bool is_pf,
  379. bool reset)
  380. {
  381. unsigned int i;
  382. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
  383. i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
  384. }
  385. /**
  386. * i40iw_ceq_handler - interrupt handler for ceq
  387. * @data: ceq pointer
  388. */
  389. static irqreturn_t i40iw_ceq_handler(int irq, void *data)
  390. {
  391. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  392. if (iwceq->irq != irq)
  393. i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
  394. tasklet_schedule(&iwceq->dpc_tasklet);
  395. return IRQ_HANDLED;
  396. }
  397. /**
  398. * i40iw_create_hmc_obj_type - create hmc object of a given type
  399. * @dev: hardware control device structure
  400. * @info: information for the hmc object to create
  401. */
  402. static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
  403. struct i40iw_hmc_create_obj_info *info)
  404. {
  405. return dev->hmc_ops->create_hmc_object(dev, info);
  406. }
  407. /**
  408. * i40iw_create_hmc_objs - create all hmc objects for the device
  409. * @iwdev: iwarp device
  410. * @is_pf: true if the function is PF otherwise false
  411. *
  412. * Create the device hmc objects and allocate hmc pages
  413. * Return 0 if successful, otherwise clean up and return error
  414. */
  415. static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
  416. bool is_pf)
  417. {
  418. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  419. struct i40iw_hmc_create_obj_info info;
  420. enum i40iw_status_code status;
  421. int i;
  422. memset(&info, 0, sizeof(info));
  423. info.hmc_info = dev->hmc_info;
  424. info.is_pf = is_pf;
  425. info.entry_type = iwdev->sd_type;
  426. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
  427. info.rsrc_type = iw_hmc_obj_types[i];
  428. info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
  429. info.add_sd_cnt = 0;
  430. status = i40iw_create_hmc_obj_type(dev, &info);
  431. if (status) {
  432. i40iw_pr_err("create obj type %d status = %d\n",
  433. iw_hmc_obj_types[i], status);
  434. break;
  435. }
  436. }
  437. if (!status)
  438. return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
  439. dev->hmc_fn_id,
  440. true, true));
  441. while (i) {
  442. i--;
  443. /* destroy the hmc objects of a given type */
  444. i40iw_close_hmc_objects_type(dev,
  445. iw_hmc_obj_types[i],
  446. dev->hmc_info,
  447. is_pf,
  448. false);
  449. }
  450. return status;
  451. }
  452. /**
  453. * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
  454. * @iwdev: iwarp device
  455. * @memptr: points to the memory addresses
  456. * @size: size of memory needed
  457. * @mask: mask for the aligned memory
  458. *
  459. * Get aligned memory of the requested size and
  460. * update the memptr to point to the new aligned memory
  461. * Return 0 if successful, otherwise return no memory error
  462. */
  463. enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
  464. struct i40iw_dma_mem *memptr,
  465. u32 size,
  466. u32 mask)
  467. {
  468. unsigned long va, newva;
  469. unsigned long extra;
  470. va = (unsigned long)iwdev->obj_next.va;
  471. newva = va;
  472. if (mask)
  473. newva = ALIGN(va, (mask + 1));
  474. extra = newva - va;
  475. memptr->va = (u8 *)va + extra;
  476. memptr->pa = iwdev->obj_next.pa + extra;
  477. memptr->size = size;
  478. if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
  479. return I40IW_ERR_NO_MEMORY;
  480. iwdev->obj_next.va = memptr->va + size;
  481. iwdev->obj_next.pa = memptr->pa + size;
  482. return 0;
  483. }
  484. /**
  485. * i40iw_create_cqp - create control qp
  486. * @iwdev: iwarp device
  487. *
  488. * Return 0, if the cqp and all the resources associated with it
  489. * are successfully created, otherwise return error
  490. */
  491. static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
  492. {
  493. enum i40iw_status_code status;
  494. u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
  495. struct i40iw_dma_mem mem;
  496. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  497. struct i40iw_cqp_init_info cqp_init_info;
  498. struct i40iw_cqp *cqp = &iwdev->cqp;
  499. u16 maj_err, min_err;
  500. int i;
  501. cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
  502. if (!cqp->cqp_requests)
  503. return I40IW_ERR_NO_MEMORY;
  504. cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
  505. if (!cqp->scratch_array) {
  506. kfree(cqp->cqp_requests);
  507. return I40IW_ERR_NO_MEMORY;
  508. }
  509. dev->cqp = &cqp->sc_cqp;
  510. dev->cqp->dev = dev;
  511. memset(&cqp_init_info, 0, sizeof(cqp_init_info));
  512. status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
  513. (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
  514. I40IW_CQP_ALIGNMENT);
  515. if (status)
  516. goto exit;
  517. status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
  518. I40IW_HOST_CTX_ALIGNMENT_MASK);
  519. if (status)
  520. goto exit;
  521. dev->cqp->host_ctx_pa = mem.pa;
  522. dev->cqp->host_ctx = mem.va;
  523. /* populate the cqp init info */
  524. cqp_init_info.dev = dev;
  525. cqp_init_info.sq_size = sqsize;
  526. cqp_init_info.sq = cqp->sq.va;
  527. cqp_init_info.sq_pa = cqp->sq.pa;
  528. cqp_init_info.host_ctx_pa = mem.pa;
  529. cqp_init_info.host_ctx = mem.va;
  530. cqp_init_info.hmc_profile = iwdev->resource_profile;
  531. cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
  532. cqp_init_info.scratch_array = cqp->scratch_array;
  533. status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
  534. if (status) {
  535. i40iw_pr_err("cqp init status %d\n", status);
  536. goto exit;
  537. }
  538. status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
  539. if (status) {
  540. i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
  541. status, maj_err, min_err);
  542. goto exit;
  543. }
  544. spin_lock_init(&cqp->req_lock);
  545. INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
  546. INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
  547. /* init the waitq of the cqp_requests and add them to the list */
  548. for (i = 0; i < sqsize; i++) {
  549. init_waitqueue_head(&cqp->cqp_requests[i].waitq);
  550. list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
  551. }
  552. return 0;
  553. exit:
  554. /* clean up the created resources */
  555. i40iw_destroy_cqp(iwdev, false);
  556. return status;
  557. }
  558. /**
  559. * i40iw_create_ccq - create control cq
  560. * @iwdev: iwarp device
  561. *
  562. * Return 0, if the ccq and the resources associated with it
  563. * are successfully created, otherwise return error
  564. */
  565. static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
  566. {
  567. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  568. struct i40iw_dma_mem mem;
  569. enum i40iw_status_code status;
  570. struct i40iw_ccq_init_info info;
  571. struct i40iw_ccq *ccq = &iwdev->ccq;
  572. memset(&info, 0, sizeof(info));
  573. dev->ccq = &ccq->sc_cq;
  574. dev->ccq->dev = dev;
  575. info.dev = dev;
  576. ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
  577. ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
  578. status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
  579. ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
  580. if (status)
  581. goto exit;
  582. status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
  583. I40IW_SHADOWAREA_MASK);
  584. if (status)
  585. goto exit;
  586. ccq->sc_cq.back_cq = (void *)ccq;
  587. /* populate the ccq init info */
  588. info.cq_base = ccq->mem_cq.va;
  589. info.cq_pa = ccq->mem_cq.pa;
  590. info.num_elem = IW_CCQ_SIZE;
  591. info.shadow_area = mem.va;
  592. info.shadow_area_pa = mem.pa;
  593. info.ceqe_mask = false;
  594. info.ceq_id_valid = true;
  595. info.shadow_read_threshold = 16;
  596. status = dev->ccq_ops->ccq_init(dev->ccq, &info);
  597. if (!status)
  598. status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
  599. exit:
  600. if (status)
  601. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  602. return status;
  603. }
  604. /**
  605. * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
  606. * @iwdev: iwarp device
  607. * @msix_vec: interrupt vector information
  608. * @iwceq: ceq associated with the vector
  609. * @ceq_id: the id number of the iwceq
  610. *
  611. * Allocate interrupt resources and enable irq handling
  612. * Return 0 if successful, otherwise return error
  613. */
  614. static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
  615. struct i40iw_ceq *iwceq,
  616. u32 ceq_id,
  617. struct i40iw_msix_vector *msix_vec)
  618. {
  619. enum i40iw_status_code status;
  620. cpumask_t mask;
  621. if (iwdev->msix_shared && !ceq_id) {
  622. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  623. status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
  624. } else {
  625. tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
  626. status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
  627. }
  628. cpumask_clear(&mask);
  629. cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
  630. irq_set_affinity_hint(msix_vec->irq, &mask);
  631. if (status) {
  632. i40iw_pr_err("ceq irq config fail\n");
  633. return I40IW_ERR_CONFIG;
  634. }
  635. msix_vec->ceq_id = ceq_id;
  636. return 0;
  637. }
  638. /**
  639. * i40iw_create_ceq - create completion event queue
  640. * @iwdev: iwarp device
  641. * @iwceq: pointer to the ceq resources to be created
  642. * @ceq_id: the id number of the iwceq
  643. *
  644. * Return 0, if the ceq and the resources associated with it
  645. * are successfully created, otherwise return error
  646. */
  647. static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
  648. struct i40iw_ceq *iwceq,
  649. u32 ceq_id)
  650. {
  651. enum i40iw_status_code status;
  652. struct i40iw_ceq_init_info info;
  653. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  654. u64 scratch;
  655. memset(&info, 0, sizeof(info));
  656. info.ceq_id = ceq_id;
  657. iwceq->iwdev = iwdev;
  658. iwceq->mem.size = sizeof(struct i40iw_ceqe) *
  659. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  660. status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
  661. I40IW_CEQ_ALIGNMENT);
  662. if (status)
  663. goto exit;
  664. info.ceq_id = ceq_id;
  665. info.ceqe_base = iwceq->mem.va;
  666. info.ceqe_pa = iwceq->mem.pa;
  667. info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  668. iwceq->sc_ceq.ceq_id = ceq_id;
  669. info.dev = dev;
  670. scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
  671. status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
  672. if (!status)
  673. status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
  674. exit:
  675. if (status)
  676. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  677. return status;
  678. }
  679. void i40iw_request_reset(struct i40iw_device *iwdev)
  680. {
  681. struct i40e_info *ldev = iwdev->ldev;
  682. ldev->ops->request_reset(ldev, iwdev->client, 1);
  683. }
  684. /**
  685. * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
  686. * @iwdev: iwarp device
  687. * @ldev: i40e lan device
  688. *
  689. * Allocate a list for all device completion event queues
  690. * Create the ceq's and configure their msix interrupt vectors
  691. * Return 0, if at least one ceq is successfully set up, otherwise return error
  692. */
  693. static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
  694. struct i40e_info *ldev)
  695. {
  696. u32 i;
  697. u32 ceq_id;
  698. struct i40iw_ceq *iwceq;
  699. struct i40iw_msix_vector *msix_vec;
  700. enum i40iw_status_code status = 0;
  701. u32 num_ceqs;
  702. if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
  703. status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
  704. iwdev->iw_qvlist);
  705. if (status)
  706. goto exit;
  707. } else {
  708. status = I40IW_ERR_BAD_PTR;
  709. goto exit;
  710. }
  711. num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
  712. iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
  713. if (!iwdev->ceqlist) {
  714. status = I40IW_ERR_NO_MEMORY;
  715. goto exit;
  716. }
  717. i = (iwdev->msix_shared) ? 0 : 1;
  718. for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
  719. iwceq = &iwdev->ceqlist[ceq_id];
  720. status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
  721. if (status) {
  722. i40iw_pr_err("create ceq status = %d\n", status);
  723. break;
  724. }
  725. msix_vec = &iwdev->iw_msixtbl[i];
  726. iwceq->irq = msix_vec->irq;
  727. iwceq->msix_idx = msix_vec->idx;
  728. status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
  729. if (status) {
  730. i40iw_destroy_ceq(iwdev, iwceq);
  731. break;
  732. }
  733. i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
  734. iwdev->ceqs_count++;
  735. }
  736. exit:
  737. if (status && !iwdev->ceqs_count) {
  738. kfree(iwdev->ceqlist);
  739. iwdev->ceqlist = NULL;
  740. return status;
  741. } else {
  742. iwdev->sc_dev.ceq_valid = true;
  743. return 0;
  744. }
  745. }
  746. /**
  747. * i40iw_configure_aeq_vector - set up the msix vector for aeq
  748. * @iwdev: iwarp device
  749. *
  750. * Allocate interrupt resources and enable irq handling
  751. * Return 0 if successful, otherwise return error
  752. */
  753. static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
  754. {
  755. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  756. u32 ret = 0;
  757. if (!iwdev->msix_shared) {
  758. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  759. ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
  760. }
  761. if (ret) {
  762. i40iw_pr_err("aeq irq config fail\n");
  763. return I40IW_ERR_CONFIG;
  764. }
  765. return 0;
  766. }
  767. /**
  768. * i40iw_create_aeq - create async event queue
  769. * @iwdev: iwarp device
  770. *
  771. * Return 0, if the aeq and the resources associated with it
  772. * are successfully created, otherwise return error
  773. */
  774. static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
  775. {
  776. enum i40iw_status_code status;
  777. struct i40iw_aeq_init_info info;
  778. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  779. struct i40iw_aeq *aeq = &iwdev->aeq;
  780. u64 scratch = 0;
  781. u32 aeq_size;
  782. aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
  783. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  784. memset(&info, 0, sizeof(info));
  785. aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
  786. status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
  787. I40IW_AEQ_ALIGNMENT);
  788. if (status)
  789. goto exit;
  790. info.aeqe_base = aeq->mem.va;
  791. info.aeq_elem_pa = aeq->mem.pa;
  792. info.elem_cnt = aeq_size;
  793. info.dev = dev;
  794. status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
  795. if (status)
  796. goto exit;
  797. status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
  798. if (!status)
  799. status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
  800. exit:
  801. if (status)
  802. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  803. return status;
  804. }
  805. /**
  806. * i40iw_setup_aeq - set up the device aeq
  807. * @iwdev: iwarp device
  808. *
  809. * Create the aeq and configure its msix interrupt vector
  810. * Return 0 if successful, otherwise return error
  811. */
  812. static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
  813. {
  814. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  815. enum i40iw_status_code status;
  816. status = i40iw_create_aeq(iwdev);
  817. if (status)
  818. return status;
  819. status = i40iw_configure_aeq_vector(iwdev);
  820. if (status) {
  821. i40iw_destroy_aeq(iwdev);
  822. return status;
  823. }
  824. if (!iwdev->msix_shared)
  825. i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
  826. return 0;
  827. }
  828. /**
  829. * i40iw_initialize_ilq - create iwarp local queue for cm
  830. * @iwdev: iwarp device
  831. *
  832. * Return 0 if successful, otherwise return error
  833. */
  834. static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
  835. {
  836. struct i40iw_puda_rsrc_info info;
  837. enum i40iw_status_code status;
  838. memset(&info, 0, sizeof(info));
  839. info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
  840. info.cq_id = 1;
  841. info.qp_id = 0;
  842. info.count = 1;
  843. info.pd_id = 1;
  844. info.sq_size = 8192;
  845. info.rq_size = 8192;
  846. info.buf_size = 1024;
  847. info.tx_buf_cnt = 16384;
  848. info.receive = i40iw_receive_ilq;
  849. info.xmit_complete = i40iw_free_sqbuf;
  850. status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
  851. if (status)
  852. i40iw_pr_err("ilq create fail\n");
  853. return status;
  854. }
  855. /**
  856. * i40iw_initialize_ieq - create iwarp exception queue
  857. * @iwdev: iwarp device
  858. *
  859. * Return 0 if successful, otherwise return error
  860. */
  861. static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
  862. {
  863. struct i40iw_puda_rsrc_info info;
  864. enum i40iw_status_code status;
  865. memset(&info, 0, sizeof(info));
  866. info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
  867. info.cq_id = 2;
  868. info.qp_id = iwdev->vsi.exception_lan_queue;
  869. info.count = 1;
  870. info.pd_id = 2;
  871. info.sq_size = 8192;
  872. info.rq_size = 8192;
  873. info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN;
  874. info.tx_buf_cnt = 4096;
  875. status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
  876. if (status)
  877. i40iw_pr_err("ieq create fail\n");
  878. return status;
  879. }
  880. /**
  881. * i40iw_reinitialize_ieq - destroy and re-create ieq
  882. * @dev: iwarp device
  883. */
  884. void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev)
  885. {
  886. struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
  887. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false);
  888. if (i40iw_initialize_ieq(iwdev)) {
  889. iwdev->reset = true;
  890. i40iw_request_reset(iwdev);
  891. }
  892. }
  893. /**
  894. * i40iw_hmc_setup - create hmc objects for the device
  895. * @iwdev: iwarp device
  896. *
  897. * Set up the device private memory space for the number and size of
  898. * the hmc objects and create the objects
  899. * Return 0 if successful, otherwise return error
  900. */
  901. static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
  902. {
  903. enum i40iw_status_code status;
  904. iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
  905. status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
  906. if (status)
  907. goto exit;
  908. status = i40iw_create_hmc_objs(iwdev, true);
  909. if (status)
  910. goto exit;
  911. iwdev->init_state = HMC_OBJS_CREATED;
  912. exit:
  913. return status;
  914. }
  915. /**
  916. * i40iw_del_init_mem - deallocate memory resources
  917. * @iwdev: iwarp device
  918. */
  919. static void i40iw_del_init_mem(struct i40iw_device *iwdev)
  920. {
  921. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  922. i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
  923. kfree(dev->hmc_info->sd_table.sd_entry);
  924. dev->hmc_info->sd_table.sd_entry = NULL;
  925. kfree(iwdev->mem_resources);
  926. iwdev->mem_resources = NULL;
  927. kfree(iwdev->ceqlist);
  928. iwdev->ceqlist = NULL;
  929. kfree(iwdev->iw_msixtbl);
  930. iwdev->iw_msixtbl = NULL;
  931. kfree(iwdev->hmc_info_mem);
  932. iwdev->hmc_info_mem = NULL;
  933. }
  934. /**
  935. * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
  936. * @iwdev: iwarp device
  937. * @idx: the index of the mac ip address to delete
  938. */
  939. static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
  940. {
  941. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  942. struct i40iw_cqp_request *cqp_request;
  943. struct cqp_commands_info *cqp_info;
  944. enum i40iw_status_code status = 0;
  945. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  946. if (!cqp_request) {
  947. i40iw_pr_err("cqp_request memory failed\n");
  948. return;
  949. }
  950. cqp_info = &cqp_request->info;
  951. cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
  952. cqp_info->post_sq = 1;
  953. cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  954. cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  955. cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
  956. cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
  957. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  958. if (status)
  959. i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
  960. }
  961. /**
  962. * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
  963. * @iwdev: iwarp device
  964. * @mac_addr: pointer to mac address
  965. * @idx: the index of the mac ip address to add
  966. */
  967. static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
  968. u8 *mac_addr,
  969. u8 idx)
  970. {
  971. struct i40iw_local_mac_ipaddr_entry_info *info;
  972. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  973. struct i40iw_cqp_request *cqp_request;
  974. struct cqp_commands_info *cqp_info;
  975. enum i40iw_status_code status = 0;
  976. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  977. if (!cqp_request) {
  978. i40iw_pr_err("cqp_request memory failed\n");
  979. return I40IW_ERR_NO_MEMORY;
  980. }
  981. cqp_info = &cqp_request->info;
  982. cqp_info->post_sq = 1;
  983. info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
  984. ether_addr_copy(info->mac_addr, mac_addr);
  985. info->entry_idx = idx;
  986. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  987. cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
  988. cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  989. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  990. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  991. if (status)
  992. i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
  993. return status;
  994. }
  995. /**
  996. * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
  997. * @iwdev: iwarp device
  998. * @mac_ip_tbl_idx: the index of the new mac ip address
  999. *
  1000. * Allocate a mac ip address entry and update the mac_ip_tbl_idx
  1001. * to hold the index of the newly created mac ip address
  1002. * Return 0 if successful, otherwise return error
  1003. */
  1004. static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
  1005. u16 *mac_ip_tbl_idx)
  1006. {
  1007. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  1008. struct i40iw_cqp_request *cqp_request;
  1009. struct cqp_commands_info *cqp_info;
  1010. enum i40iw_status_code status = 0;
  1011. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  1012. if (!cqp_request) {
  1013. i40iw_pr_err("cqp_request memory failed\n");
  1014. return I40IW_ERR_NO_MEMORY;
  1015. }
  1016. /* increment refcount, because we need the cqp request ret value */
  1017. atomic_inc(&cqp_request->refcount);
  1018. cqp_info = &cqp_request->info;
  1019. cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
  1020. cqp_info->post_sq = 1;
  1021. cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  1022. cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  1023. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  1024. if (!status)
  1025. *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
  1026. else
  1027. i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
  1028. /* decrement refcount and free the cqp request, if no longer used */
  1029. i40iw_put_cqp_request(iwcqp, cqp_request);
  1030. return status;
  1031. }
  1032. /**
  1033. * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
  1034. * @iwdev: iwarp device
  1035. * @macaddr: pointer to mac address
  1036. *
  1037. * Allocate a mac ip address entry and add it to the hw table
  1038. * Return 0 if successful, otherwise return error
  1039. */
  1040. static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
  1041. u8 *macaddr)
  1042. {
  1043. enum i40iw_status_code status;
  1044. status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
  1045. if (!status) {
  1046. status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
  1047. (u8)iwdev->mac_ip_table_idx);
  1048. if (status)
  1049. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1050. }
  1051. return status;
  1052. }
  1053. /**
  1054. * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
  1055. * @iwdev: iwarp device
  1056. */
  1057. static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
  1058. {
  1059. struct net_device *ip_dev;
  1060. struct inet6_dev *idev;
  1061. struct inet6_ifaddr *ifp, *tmp;
  1062. u32 local_ipaddr6[4];
  1063. rcu_read_lock();
  1064. for_each_netdev_rcu(&init_net, ip_dev) {
  1065. if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
  1066. (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
  1067. (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
  1068. idev = __in6_dev_get(ip_dev);
  1069. if (!idev) {
  1070. i40iw_pr_err("ipv6 inet device not found\n");
  1071. break;
  1072. }
  1073. list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
  1074. i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
  1075. rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
  1076. i40iw_copy_ip_ntohl(local_ipaddr6,
  1077. ifp->addr.in6_u.u6_addr32);
  1078. i40iw_manage_arp_cache(iwdev,
  1079. ip_dev->dev_addr,
  1080. local_ipaddr6,
  1081. false,
  1082. I40IW_ARP_ADD);
  1083. }
  1084. }
  1085. }
  1086. rcu_read_unlock();
  1087. }
  1088. /**
  1089. * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
  1090. * @iwdev: iwarp device
  1091. */
  1092. static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
  1093. {
  1094. struct net_device *dev;
  1095. struct in_device *idev;
  1096. bool got_lock = true;
  1097. u32 ip_addr;
  1098. if (!rtnl_trylock())
  1099. got_lock = false;
  1100. for_each_netdev(&init_net, dev) {
  1101. if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
  1102. (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
  1103. (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
  1104. idev = in_dev_get(dev);
  1105. for_ifa(idev) {
  1106. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  1107. "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
  1108. rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
  1109. ip_addr = ntohl(ifa->ifa_address);
  1110. i40iw_manage_arp_cache(iwdev,
  1111. dev->dev_addr,
  1112. &ip_addr,
  1113. true,
  1114. I40IW_ARP_ADD);
  1115. }
  1116. endfor_ifa(idev);
  1117. in_dev_put(idev);
  1118. }
  1119. }
  1120. if (got_lock)
  1121. rtnl_unlock();
  1122. }
  1123. /**
  1124. * i40iw_add_mac_ip - add mac and ip addresses
  1125. * @iwdev: iwarp device
  1126. *
  1127. * Create and add a mac ip address entry to the hw table and
  1128. * ipv4/ipv6 addresses to the arp cache
  1129. * Return 0 if successful, otherwise return error
  1130. */
  1131. static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
  1132. {
  1133. struct net_device *netdev = iwdev->netdev;
  1134. enum i40iw_status_code status;
  1135. status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
  1136. if (status)
  1137. return status;
  1138. i40iw_add_ipv4_addr(iwdev);
  1139. i40iw_add_ipv6_addr(iwdev);
  1140. return 0;
  1141. }
  1142. /**
  1143. * i40iw_wait_pe_ready - Check if firmware is ready
  1144. * @hw: provides access to registers
  1145. */
  1146. static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
  1147. {
  1148. u32 statusfw;
  1149. u32 statuscpu0;
  1150. u32 statuscpu1;
  1151. u32 statuscpu2;
  1152. u32 retrycount = 0;
  1153. do {
  1154. statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
  1155. i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
  1156. statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
  1157. i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
  1158. statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
  1159. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
  1160. __LINE__, statuscpu1);
  1161. statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
  1162. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
  1163. __LINE__, statuscpu2);
  1164. if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
  1165. break; /* SUCCESS */
  1166. msleep(1000);
  1167. retrycount++;
  1168. } while (retrycount < 14);
  1169. i40iw_wr32(hw, 0xb4040, 0x4C104C5);
  1170. }
  1171. /**
  1172. * i40iw_initialize_dev - initialize device
  1173. * @iwdev: iwarp device
  1174. * @ldev: lan device information
  1175. *
  1176. * Allocate memory for the hmc objects and initialize iwdev
  1177. * Return 0 if successful, otherwise clean up the resources
  1178. * and return error
  1179. */
  1180. static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
  1181. struct i40e_info *ldev)
  1182. {
  1183. enum i40iw_status_code status;
  1184. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1185. struct i40iw_device_init_info info;
  1186. struct i40iw_vsi_init_info vsi_info;
  1187. struct i40iw_dma_mem mem;
  1188. struct i40iw_l2params l2params;
  1189. u32 size;
  1190. struct i40iw_vsi_stats_info stats_info;
  1191. u16 last_qset = I40IW_NO_QSET;
  1192. u16 qset;
  1193. u32 i;
  1194. memset(&l2params, 0, sizeof(l2params));
  1195. memset(&info, 0, sizeof(info));
  1196. size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
  1197. (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
  1198. iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
  1199. if (!iwdev->hmc_info_mem)
  1200. return I40IW_ERR_NO_MEMORY;
  1201. iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
  1202. dev->hmc_info = &iwdev->hw.hmc;
  1203. dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
  1204. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
  1205. I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
  1206. if (status)
  1207. goto error;
  1208. info.fpm_query_buf_pa = mem.pa;
  1209. info.fpm_query_buf = mem.va;
  1210. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
  1211. I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
  1212. if (status)
  1213. goto error;
  1214. info.fpm_commit_buf_pa = mem.pa;
  1215. info.fpm_commit_buf = mem.va;
  1216. info.hmc_fn_id = ldev->fid;
  1217. info.is_pf = (ldev->ftype) ? false : true;
  1218. info.bar0 = ldev->hw_addr;
  1219. info.hw = &iwdev->hw;
  1220. info.debug_mask = debug;
  1221. l2params.mtu =
  1222. (ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU;
  1223. for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
  1224. qset = ldev->params.qos.prio_qos[i].qs_handle;
  1225. l2params.qs_handle_list[i] = qset;
  1226. if (last_qset == I40IW_NO_QSET)
  1227. last_qset = qset;
  1228. else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
  1229. iwdev->dcb = true;
  1230. }
  1231. i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
  1232. info.vchnl_send = i40iw_virtchnl_send;
  1233. status = i40iw_device_init(&iwdev->sc_dev, &info);
  1234. if (status)
  1235. goto error;
  1236. memset(&vsi_info, 0, sizeof(vsi_info));
  1237. vsi_info.dev = &iwdev->sc_dev;
  1238. vsi_info.back_vsi = (void *)iwdev;
  1239. vsi_info.params = &l2params;
  1240. vsi_info.exception_lan_queue = 1;
  1241. i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
  1242. if (dev->is_pf) {
  1243. memset(&stats_info, 0, sizeof(stats_info));
  1244. stats_info.fcn_id = ldev->fid;
  1245. stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
  1246. if (!stats_info.pestat) {
  1247. status = I40IW_ERR_NO_MEMORY;
  1248. goto error;
  1249. }
  1250. stats_info.stats_initialize = true;
  1251. if (stats_info.pestat)
  1252. i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
  1253. }
  1254. return status;
  1255. error:
  1256. kfree(iwdev->hmc_info_mem);
  1257. iwdev->hmc_info_mem = NULL;
  1258. return status;
  1259. }
  1260. /**
  1261. * i40iw_register_notifiers - register tcp ip notifiers
  1262. */
  1263. static void i40iw_register_notifiers(void)
  1264. {
  1265. register_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1266. register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1267. register_netevent_notifier(&i40iw_net_notifier);
  1268. register_netdevice_notifier(&i40iw_netdevice_notifier);
  1269. }
  1270. /**
  1271. * i40iw_unregister_notifiers - unregister tcp ip notifiers
  1272. */
  1273. static void i40iw_unregister_notifiers(void)
  1274. {
  1275. unregister_netevent_notifier(&i40iw_net_notifier);
  1276. unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1277. unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1278. unregister_netdevice_notifier(&i40iw_netdevice_notifier);
  1279. }
  1280. /**
  1281. * i40iw_save_msix_info - copy msix vector information to iwarp device
  1282. * @iwdev: iwarp device
  1283. * @ldev: lan device information
  1284. *
  1285. * Allocate iwdev msix table and copy the ldev msix info to the table
  1286. * Return 0 if successful, otherwise return error
  1287. */
  1288. static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
  1289. struct i40e_info *ldev)
  1290. {
  1291. struct i40e_qvlist_info *iw_qvlist;
  1292. struct i40e_qv_info *iw_qvinfo;
  1293. u32 ceq_idx;
  1294. u32 i;
  1295. u32 size;
  1296. if (!ldev->msix_count) {
  1297. i40iw_pr_err("No MSI-X vectors\n");
  1298. return I40IW_ERR_CONFIG;
  1299. }
  1300. iwdev->msix_count = ldev->msix_count;
  1301. size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
  1302. size += sizeof(struct i40e_qvlist_info);
  1303. size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
  1304. iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
  1305. if (!iwdev->iw_msixtbl)
  1306. return I40IW_ERR_NO_MEMORY;
  1307. iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
  1308. iw_qvlist = iwdev->iw_qvlist;
  1309. iw_qvinfo = iw_qvlist->qv_info;
  1310. iw_qvlist->num_vectors = iwdev->msix_count;
  1311. if (iwdev->msix_count <= num_online_cpus())
  1312. iwdev->msix_shared = true;
  1313. for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
  1314. iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
  1315. iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
  1316. iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
  1317. if (i == 0) {
  1318. iw_qvinfo->aeq_idx = 0;
  1319. if (iwdev->msix_shared)
  1320. iw_qvinfo->ceq_idx = ceq_idx++;
  1321. else
  1322. iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
  1323. } else {
  1324. iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
  1325. iw_qvinfo->ceq_idx = ceq_idx++;
  1326. }
  1327. iw_qvinfo->itr_idx = 3;
  1328. iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
  1329. }
  1330. return 0;
  1331. }
  1332. /**
  1333. * i40iw_deinit_device - clean up the device resources
  1334. * @iwdev: iwarp device
  1335. *
  1336. * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
  1337. * destroy the device queues and free the pble and the hmc objects
  1338. */
  1339. static void i40iw_deinit_device(struct i40iw_device *iwdev)
  1340. {
  1341. struct i40e_info *ldev = iwdev->ldev;
  1342. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1343. i40iw_pr_info("state = %d\n", iwdev->init_state);
  1344. if (iwdev->param_wq)
  1345. destroy_workqueue(iwdev->param_wq);
  1346. switch (iwdev->init_state) {
  1347. case RDMA_DEV_REGISTERED:
  1348. iwdev->iw_status = 0;
  1349. i40iw_port_ibevent(iwdev);
  1350. i40iw_destroy_rdma_device(iwdev->iwibdev);
  1351. /* fallthrough */
  1352. case IP_ADDR_REGISTERED:
  1353. if (!iwdev->reset)
  1354. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1355. /* fallthrough */
  1356. /* fallthrough */
  1357. case PBLE_CHUNK_MEM:
  1358. i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
  1359. /* fallthrough */
  1360. case CEQ_CREATED:
  1361. i40iw_dele_ceqs(iwdev);
  1362. /* fallthrough */
  1363. case AEQ_CREATED:
  1364. i40iw_destroy_aeq(iwdev);
  1365. /* fallthrough */
  1366. case IEQ_CREATED:
  1367. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
  1368. /* fallthrough */
  1369. case ILQ_CREATED:
  1370. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
  1371. /* fallthrough */
  1372. case CCQ_CREATED:
  1373. i40iw_destroy_ccq(iwdev);
  1374. /* fallthrough */
  1375. case HMC_OBJS_CREATED:
  1376. i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
  1377. /* fallthrough */
  1378. case CQP_CREATED:
  1379. i40iw_destroy_cqp(iwdev, true);
  1380. /* fallthrough */
  1381. case INITIAL_STATE:
  1382. i40iw_cleanup_cm_core(&iwdev->cm_core);
  1383. if (iwdev->vsi.pestat) {
  1384. i40iw_vsi_stats_free(&iwdev->vsi);
  1385. kfree(iwdev->vsi.pestat);
  1386. }
  1387. i40iw_del_init_mem(iwdev);
  1388. break;
  1389. case INVALID_STATE:
  1390. /* fallthrough */
  1391. default:
  1392. i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
  1393. break;
  1394. }
  1395. i40iw_del_handler(i40iw_find_i40e_handler(ldev));
  1396. kfree(iwdev->hdl);
  1397. }
  1398. /**
  1399. * i40iw_setup_init_state - set up the initial device struct
  1400. * @hdl: handler for iwarp device - one per instance
  1401. * @ldev: lan device information
  1402. * @client: iwarp client information, provided during registration
  1403. *
  1404. * Initialize the iwarp device and its hdl information
  1405. * using the ldev and client information
  1406. * Return 0 if successful, otherwise return error
  1407. */
  1408. static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
  1409. struct i40e_info *ldev,
  1410. struct i40e_client *client)
  1411. {
  1412. struct i40iw_device *iwdev = &hdl->device;
  1413. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1414. enum i40iw_status_code status;
  1415. memcpy(&hdl->ldev, ldev, sizeof(*ldev));
  1416. iwdev->mpa_version = mpa_version;
  1417. iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
  1418. (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
  1419. I40IW_HMC_PROFILE_DEFAULT;
  1420. iwdev->max_rdma_vfs =
  1421. (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
  1422. iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
  1423. iwdev->netdev = ldev->netdev;
  1424. hdl->client = client;
  1425. if (!ldev->ftype)
  1426. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
  1427. else
  1428. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
  1429. status = i40iw_save_msix_info(iwdev, ldev);
  1430. if (status)
  1431. return status;
  1432. iwdev->hw.dev_context = (void *)ldev->pcidev;
  1433. iwdev->hw.hw_addr = ldev->hw_addr;
  1434. status = i40iw_allocate_dma_mem(&iwdev->hw,
  1435. &iwdev->obj_mem, 8192, 4096);
  1436. if (status)
  1437. goto exit;
  1438. iwdev->obj_next = iwdev->obj_mem;
  1439. iwdev->push_mode = push_mode;
  1440. init_waitqueue_head(&iwdev->vchnl_waitq);
  1441. init_waitqueue_head(&dev->vf_reqs);
  1442. init_waitqueue_head(&iwdev->close_wq);
  1443. status = i40iw_initialize_dev(iwdev, ldev);
  1444. exit:
  1445. if (status) {
  1446. kfree(iwdev->iw_msixtbl);
  1447. i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
  1448. iwdev->iw_msixtbl = NULL;
  1449. }
  1450. return status;
  1451. }
  1452. /**
  1453. * i40iw_get_used_rsrc - determine resources used internally
  1454. * @iwdev: iwarp device
  1455. *
  1456. * Called after internal allocations
  1457. */
  1458. static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
  1459. {
  1460. iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
  1461. iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
  1462. iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
  1463. iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
  1464. }
  1465. /**
  1466. * i40iw_open - client interface operation open for iwarp/uda device
  1467. * @ldev: lan device information
  1468. * @client: iwarp client information, provided during registration
  1469. *
  1470. * Called by the lan driver during the processing of client register
  1471. * Create device resources, set up queues, pble and hmc objects and
  1472. * register the device with the ib verbs interface
  1473. * Return 0 if successful, otherwise return error
  1474. */
  1475. static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
  1476. {
  1477. struct i40iw_device *iwdev;
  1478. struct i40iw_sc_dev *dev;
  1479. enum i40iw_status_code status;
  1480. struct i40iw_handler *hdl;
  1481. hdl = i40iw_find_netdev(ldev->netdev);
  1482. if (hdl)
  1483. return 0;
  1484. hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
  1485. if (!hdl)
  1486. return -ENOMEM;
  1487. iwdev = &hdl->device;
  1488. iwdev->hdl = hdl;
  1489. dev = &iwdev->sc_dev;
  1490. i40iw_setup_cm_core(iwdev);
  1491. dev->back_dev = (void *)iwdev;
  1492. iwdev->ldev = &hdl->ldev;
  1493. iwdev->client = client;
  1494. mutex_init(&iwdev->pbl_mutex);
  1495. i40iw_add_handler(hdl);
  1496. do {
  1497. status = i40iw_setup_init_state(hdl, ldev, client);
  1498. if (status)
  1499. break;
  1500. iwdev->init_state = INITIAL_STATE;
  1501. if (dev->is_pf)
  1502. i40iw_wait_pe_ready(dev->hw);
  1503. status = i40iw_create_cqp(iwdev);
  1504. if (status)
  1505. break;
  1506. iwdev->init_state = CQP_CREATED;
  1507. status = i40iw_hmc_setup(iwdev);
  1508. if (status)
  1509. break;
  1510. status = i40iw_create_ccq(iwdev);
  1511. if (status)
  1512. break;
  1513. iwdev->init_state = CCQ_CREATED;
  1514. status = i40iw_initialize_ilq(iwdev);
  1515. if (status)
  1516. break;
  1517. iwdev->init_state = ILQ_CREATED;
  1518. status = i40iw_initialize_ieq(iwdev);
  1519. if (status)
  1520. break;
  1521. iwdev->init_state = IEQ_CREATED;
  1522. status = i40iw_setup_aeq(iwdev);
  1523. if (status)
  1524. break;
  1525. iwdev->init_state = AEQ_CREATED;
  1526. status = i40iw_setup_ceqs(iwdev, ldev);
  1527. if (status)
  1528. break;
  1529. iwdev->init_state = CEQ_CREATED;
  1530. status = i40iw_initialize_hw_resources(iwdev);
  1531. if (status)
  1532. break;
  1533. i40iw_get_used_rsrc(iwdev);
  1534. dev->ccq_ops->ccq_arm(dev->ccq);
  1535. status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
  1536. if (status)
  1537. break;
  1538. iwdev->init_state = PBLE_CHUNK_MEM;
  1539. iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
  1540. status = i40iw_add_mac_ip(iwdev);
  1541. if (status)
  1542. break;
  1543. iwdev->init_state = IP_ADDR_REGISTERED;
  1544. if (i40iw_register_rdma_device(iwdev)) {
  1545. i40iw_pr_err("register rdma device fail\n");
  1546. break;
  1547. };
  1548. iwdev->init_state = RDMA_DEV_REGISTERED;
  1549. iwdev->iw_status = 1;
  1550. i40iw_port_ibevent(iwdev);
  1551. iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
  1552. if(iwdev->param_wq == NULL)
  1553. break;
  1554. i40iw_pr_info("i40iw_open completed\n");
  1555. return 0;
  1556. } while (0);
  1557. i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
  1558. i40iw_deinit_device(iwdev);
  1559. return -ERESTART;
  1560. }
  1561. /**
  1562. * i40iw_l2params_worker - worker for l2 params change
  1563. * @work: work pointer for l2 params
  1564. */
  1565. static void i40iw_l2params_worker(struct work_struct *work)
  1566. {
  1567. struct l2params_work *dwork =
  1568. container_of(work, struct l2params_work, work);
  1569. struct i40iw_device *iwdev = dwork->iwdev;
  1570. i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
  1571. atomic_dec(&iwdev->params_busy);
  1572. kfree(work);
  1573. }
  1574. /**
  1575. * i40iw_l2param_change - handle qs handles for qos and mss change
  1576. * @ldev: lan device information
  1577. * @client: client for paramater change
  1578. * @params: new parameters from L2
  1579. */
  1580. static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
  1581. struct i40e_params *params)
  1582. {
  1583. struct i40iw_handler *hdl;
  1584. struct i40iw_l2params *l2params;
  1585. struct l2params_work *work;
  1586. struct i40iw_device *iwdev;
  1587. int i;
  1588. hdl = i40iw_find_i40e_handler(ldev);
  1589. if (!hdl)
  1590. return;
  1591. iwdev = &hdl->device;
  1592. if (atomic_read(&iwdev->params_busy))
  1593. return;
  1594. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  1595. if (!work)
  1596. return;
  1597. atomic_inc(&iwdev->params_busy);
  1598. work->iwdev = iwdev;
  1599. l2params = &work->l2params;
  1600. for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
  1601. l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
  1602. l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;
  1603. INIT_WORK(&work->work, i40iw_l2params_worker);
  1604. queue_work(iwdev->param_wq, &work->work);
  1605. }
  1606. /**
  1607. * i40iw_close - client interface operation close for iwarp/uda device
  1608. * @ldev: lan device information
  1609. * @client: client to close
  1610. *
  1611. * Called by the lan driver during the processing of client unregister
  1612. * Destroy and clean up the driver resources
  1613. */
  1614. static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
  1615. {
  1616. struct i40iw_device *iwdev;
  1617. struct i40iw_handler *hdl;
  1618. hdl = i40iw_find_i40e_handler(ldev);
  1619. if (!hdl)
  1620. return;
  1621. iwdev = &hdl->device;
  1622. iwdev->closing = true;
  1623. if (reset)
  1624. iwdev->reset = true;
  1625. i40iw_cm_teardown_connections(iwdev, NULL, NULL, true);
  1626. destroy_workqueue(iwdev->virtchnl_wq);
  1627. i40iw_deinit_device(iwdev);
  1628. }
  1629. /**
  1630. * i40iw_vf_reset - process VF reset
  1631. * @ldev: lan device information
  1632. * @client: client interface instance
  1633. * @vf_id: virtual function id
  1634. *
  1635. * Called when a VF is reset by the PF
  1636. * Destroy and clean up the VF resources
  1637. */
  1638. static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
  1639. {
  1640. struct i40iw_handler *hdl;
  1641. struct i40iw_sc_dev *dev;
  1642. struct i40iw_hmc_fcn_info hmc_fcn_info;
  1643. struct i40iw_virt_mem vf_dev_mem;
  1644. struct i40iw_vfdev *tmp_vfdev;
  1645. unsigned int i;
  1646. unsigned long flags;
  1647. struct i40iw_device *iwdev;
  1648. hdl = i40iw_find_i40e_handler(ldev);
  1649. if (!hdl)
  1650. return;
  1651. dev = &hdl->device.sc_dev;
  1652. iwdev = (struct i40iw_device *)dev->back_dev;
  1653. for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
  1654. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
  1655. continue;
  1656. /* free all resources allocated on behalf of vf */
  1657. tmp_vfdev = dev->vf_dev[i];
  1658. spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
  1659. dev->vf_dev[i] = NULL;
  1660. spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
  1661. i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
  1662. /* remove vf hmc function */
  1663. memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
  1664. hmc_fcn_info.vf_id = vf_id;
  1665. hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
  1666. hmc_fcn_info.free_fcn = true;
  1667. i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
  1668. /* free vf_dev */
  1669. vf_dev_mem.va = tmp_vfdev;
  1670. vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
  1671. sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
  1672. i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
  1673. break;
  1674. }
  1675. }
  1676. /**
  1677. * i40iw_vf_enable - enable a number of VFs
  1678. * @ldev: lan device information
  1679. * @client: client interface instance
  1680. * @num_vfs: number of VFs for the PF
  1681. *
  1682. * Called when the number of VFs changes
  1683. */
  1684. static void i40iw_vf_enable(struct i40e_info *ldev,
  1685. struct i40e_client *client,
  1686. u32 num_vfs)
  1687. {
  1688. struct i40iw_handler *hdl;
  1689. hdl = i40iw_find_i40e_handler(ldev);
  1690. if (!hdl)
  1691. return;
  1692. if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
  1693. hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
  1694. else
  1695. hdl->device.max_enabled_vfs = num_vfs;
  1696. }
  1697. /**
  1698. * i40iw_vf_capable - check if VF capable
  1699. * @ldev: lan device information
  1700. * @client: client interface instance
  1701. * @vf_id: virtual function id
  1702. *
  1703. * Return 1 if a VF slot is available or if VF is already RDMA enabled
  1704. * Return 0 otherwise
  1705. */
  1706. static int i40iw_vf_capable(struct i40e_info *ldev,
  1707. struct i40e_client *client,
  1708. u32 vf_id)
  1709. {
  1710. struct i40iw_handler *hdl;
  1711. struct i40iw_sc_dev *dev;
  1712. unsigned int i;
  1713. hdl = i40iw_find_i40e_handler(ldev);
  1714. if (!hdl)
  1715. return 0;
  1716. dev = &hdl->device.sc_dev;
  1717. for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
  1718. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
  1719. return 1;
  1720. }
  1721. return 0;
  1722. }
  1723. /**
  1724. * i40iw_virtchnl_receive - receive a message through the virtual channel
  1725. * @ldev: lan device information
  1726. * @client: client interface instance
  1727. * @vf_id: virtual function id associated with the message
  1728. * @msg: message buffer pointer
  1729. * @len: length of the message
  1730. *
  1731. * Invoke virtual channel receive operation for the given msg
  1732. * Return 0 if successful, otherwise return error
  1733. */
  1734. static int i40iw_virtchnl_receive(struct i40e_info *ldev,
  1735. struct i40e_client *client,
  1736. u32 vf_id,
  1737. u8 *msg,
  1738. u16 len)
  1739. {
  1740. struct i40iw_handler *hdl;
  1741. struct i40iw_sc_dev *dev;
  1742. struct i40iw_device *iwdev;
  1743. int ret_code = I40IW_NOT_SUPPORTED;
  1744. if (!len || !msg)
  1745. return I40IW_ERR_PARAM;
  1746. hdl = i40iw_find_i40e_handler(ldev);
  1747. if (!hdl)
  1748. return I40IW_ERR_PARAM;
  1749. dev = &hdl->device.sc_dev;
  1750. iwdev = dev->back_dev;
  1751. if (dev->vchnl_if.vchnl_recv) {
  1752. ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
  1753. if (!dev->is_pf) {
  1754. atomic_dec(&iwdev->vchnl_msgs);
  1755. wake_up(&iwdev->vchnl_waitq);
  1756. }
  1757. }
  1758. return ret_code;
  1759. }
  1760. /**
  1761. * i40iw_vf_clear_to_send - wait to send virtual channel message
  1762. * @dev: iwarp device *
  1763. * Wait for until virtual channel is clear
  1764. * before sending the next message
  1765. *
  1766. * Returns false if error
  1767. * Returns true if clear to send
  1768. */
  1769. bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
  1770. {
  1771. struct i40iw_device *iwdev;
  1772. wait_queue_entry_t wait;
  1773. iwdev = dev->back_dev;
  1774. if (!wq_has_sleeper(&dev->vf_reqs) &&
  1775. (atomic_read(&iwdev->vchnl_msgs) == 0))
  1776. return true; /* virtual channel is clear */
  1777. init_wait(&wait);
  1778. add_wait_queue_exclusive(&dev->vf_reqs, &wait);
  1779. if (!wait_event_timeout(dev->vf_reqs,
  1780. (atomic_read(&iwdev->vchnl_msgs) == 0),
  1781. I40IW_VCHNL_EVENT_TIMEOUT))
  1782. dev->vchnl_up = false;
  1783. remove_wait_queue(&dev->vf_reqs, &wait);
  1784. return dev->vchnl_up;
  1785. }
  1786. /**
  1787. * i40iw_virtchnl_send - send a message through the virtual channel
  1788. * @dev: iwarp device
  1789. * @vf_id: virtual function id associated with the message
  1790. * @msg: virtual channel message buffer pointer
  1791. * @len: length of the message
  1792. *
  1793. * Invoke virtual channel send operation for the given msg
  1794. * Return 0 if successful, otherwise return error
  1795. */
  1796. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  1797. u32 vf_id,
  1798. u8 *msg,
  1799. u16 len)
  1800. {
  1801. struct i40iw_device *iwdev;
  1802. struct i40e_info *ldev;
  1803. if (!dev || !dev->back_dev)
  1804. return I40IW_ERR_BAD_PTR;
  1805. iwdev = dev->back_dev;
  1806. ldev = iwdev->ldev;
  1807. if (ldev && ldev->ops && ldev->ops->virtchnl_send)
  1808. return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
  1809. return I40IW_ERR_BAD_PTR;
  1810. }
  1811. /* client interface functions */
  1812. static const struct i40e_client_ops i40e_ops = {
  1813. .open = i40iw_open,
  1814. .close = i40iw_close,
  1815. .l2_param_change = i40iw_l2param_change,
  1816. .virtchnl_receive = i40iw_virtchnl_receive,
  1817. .vf_reset = i40iw_vf_reset,
  1818. .vf_enable = i40iw_vf_enable,
  1819. .vf_capable = i40iw_vf_capable
  1820. };
  1821. /**
  1822. * i40iw_init_module - driver initialization function
  1823. *
  1824. * First function to call when the driver is loaded
  1825. * Register the driver as i40e client and port mapper client
  1826. */
  1827. static int __init i40iw_init_module(void)
  1828. {
  1829. int ret;
  1830. memset(&i40iw_client, 0, sizeof(i40iw_client));
  1831. i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
  1832. i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
  1833. i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
  1834. i40iw_client.ops = &i40e_ops;
  1835. memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
  1836. i40iw_client.type = I40E_CLIENT_IWARP;
  1837. spin_lock_init(&i40iw_handler_lock);
  1838. ret = i40e_register_client(&i40iw_client);
  1839. i40iw_register_notifiers();
  1840. return ret;
  1841. }
  1842. /**
  1843. * i40iw_exit_module - driver exit clean up function
  1844. *
  1845. * The function is called just before the driver is unloaded
  1846. * Unregister the driver as i40e client and port mapper client
  1847. */
  1848. static void __exit i40iw_exit_module(void)
  1849. {
  1850. i40iw_unregister_notifiers();
  1851. i40e_unregister_client(&i40iw_client);
  1852. }
  1853. module_init(i40iw_init_module);
  1854. module_exit(i40iw_exit_module);