i40iw_main.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/ip.h>
  39. #include <linux/tcp.h>
  40. #include <linux/if_vlan.h>
  41. #include <net/addrconf.h>
  42. #include "i40iw.h"
  43. #include "i40iw_register.h"
  44. #include <net/netevent.h>
  45. #define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
  46. #define CLIENT_IW_INTERFACE_VERSION_MINOR 01
  47. #define CLIENT_IW_INTERFACE_VERSION_BUILD 00
  48. #define DRV_VERSION_MAJOR 0
  49. #define DRV_VERSION_MINOR 5
  50. #define DRV_VERSION_BUILD 123
  51. #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
  52. __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
  53. static int push_mode;
  54. module_param(push_mode, int, 0644);
  55. MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
  56. static int debug;
  57. module_param(debug, int, 0644);
  58. MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
  59. static int resource_profile;
  60. module_param(resource_profile, int, 0644);
  61. MODULE_PARM_DESC(resource_profile,
  62. "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
  63. static int max_rdma_vfs = 32;
  64. module_param(max_rdma_vfs, int, 0644);
  65. MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
  66. static int mpa_version = 2;
  67. module_param(mpa_version, int, 0644);
  68. MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
  69. MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
  70. MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
  71. MODULE_LICENSE("Dual BSD/GPL");
  72. static struct i40e_client i40iw_client;
  73. static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
  74. static LIST_HEAD(i40iw_handlers);
  75. static spinlock_t i40iw_handler_lock;
  76. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  77. u32 vf_id, u8 *msg, u16 len);
  78. static struct notifier_block i40iw_inetaddr_notifier = {
  79. .notifier_call = i40iw_inetaddr_event
  80. };
  81. static struct notifier_block i40iw_inetaddr6_notifier = {
  82. .notifier_call = i40iw_inet6addr_event
  83. };
  84. static struct notifier_block i40iw_net_notifier = {
  85. .notifier_call = i40iw_net_event
  86. };
  87. /**
  88. * i40iw_find_i40e_handler - find a handler given a client info
  89. * @ldev: pointer to a client info
  90. */
  91. static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
  92. {
  93. struct i40iw_handler *hdl;
  94. unsigned long flags;
  95. spin_lock_irqsave(&i40iw_handler_lock, flags);
  96. list_for_each_entry(hdl, &i40iw_handlers, list) {
  97. if (hdl->ldev.netdev == ldev->netdev) {
  98. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  99. return hdl;
  100. }
  101. }
  102. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  103. return NULL;
  104. }
  105. /**
  106. * i40iw_find_netdev - find a handler given a netdev
  107. * @netdev: pointer to net_device
  108. */
  109. struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
  110. {
  111. struct i40iw_handler *hdl;
  112. unsigned long flags;
  113. spin_lock_irqsave(&i40iw_handler_lock, flags);
  114. list_for_each_entry(hdl, &i40iw_handlers, list) {
  115. if (hdl->ldev.netdev == netdev) {
  116. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  117. return hdl;
  118. }
  119. }
  120. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  121. return NULL;
  122. }
  123. /**
  124. * i40iw_add_handler - add a handler to the list
  125. * @hdl: handler to be added to the handler list
  126. */
  127. static void i40iw_add_handler(struct i40iw_handler *hdl)
  128. {
  129. unsigned long flags;
  130. spin_lock_irqsave(&i40iw_handler_lock, flags);
  131. list_add(&hdl->list, &i40iw_handlers);
  132. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  133. }
  134. /**
  135. * i40iw_del_handler - delete a handler from the list
  136. * @hdl: handler to be deleted from the handler list
  137. */
  138. static int i40iw_del_handler(struct i40iw_handler *hdl)
  139. {
  140. unsigned long flags;
  141. spin_lock_irqsave(&i40iw_handler_lock, flags);
  142. list_del(&hdl->list);
  143. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  144. return 0;
  145. }
  146. /**
  147. * i40iw_enable_intr - set up device interrupts
  148. * @dev: hardware control device structure
  149. * @msix_id: id of the interrupt to be enabled
  150. */
  151. static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
  152. {
  153. u32 val;
  154. val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
  155. I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
  156. (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
  157. if (dev->is_pf)
  158. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
  159. else
  160. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
  161. }
  162. /**
  163. * i40iw_dpc - tasklet for aeq and ceq 0
  164. * @data: iwarp device
  165. */
  166. static void i40iw_dpc(unsigned long data)
  167. {
  168. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  169. if (iwdev->msix_shared)
  170. i40iw_process_ceq(iwdev, iwdev->ceqlist);
  171. i40iw_process_aeq(iwdev);
  172. i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
  173. }
  174. /**
  175. * i40iw_ceq_dpc - dpc handler for CEQ
  176. * @data: data points to CEQ
  177. */
  178. static void i40iw_ceq_dpc(unsigned long data)
  179. {
  180. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  181. struct i40iw_device *iwdev = iwceq->iwdev;
  182. i40iw_process_ceq(iwdev, iwceq);
  183. i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
  184. }
  185. /**
  186. * i40iw_irq_handler - interrupt handler for aeq and ceq0
  187. * @irq: Interrupt request number
  188. * @data: iwarp device
  189. */
  190. static irqreturn_t i40iw_irq_handler(int irq, void *data)
  191. {
  192. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  193. tasklet_schedule(&iwdev->dpc_tasklet);
  194. return IRQ_HANDLED;
  195. }
  196. /**
  197. * i40iw_destroy_cqp - destroy control qp
  198. * @iwdev: iwarp device
  199. * @create_done: 1 if cqp create poll was success
  200. *
  201. * Issue destroy cqp request and
  202. * free the resources associated with the cqp
  203. */
  204. static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
  205. {
  206. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  207. struct i40iw_cqp *cqp = &iwdev->cqp;
  208. if (free_hwcqp)
  209. dev->cqp_ops->cqp_destroy(dev->cqp);
  210. i40iw_cleanup_pending_cqp_op(iwdev);
  211. i40iw_free_dma_mem(dev->hw, &cqp->sq);
  212. kfree(cqp->scratch_array);
  213. iwdev->cqp.scratch_array = NULL;
  214. kfree(cqp->cqp_requests);
  215. cqp->cqp_requests = NULL;
  216. }
  217. /**
  218. * i40iw_disable_irqs - disable device interrupts
  219. * @dev: hardware control device structure
  220. * @msic_vec: msix vector to disable irq
  221. * @dev_id: parameter to pass to free_irq (used during irq setup)
  222. *
  223. * The function is called when destroying aeq/ceq
  224. */
  225. static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
  226. struct i40iw_msix_vector *msix_vec,
  227. void *dev_id)
  228. {
  229. if (dev->is_pf)
  230. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
  231. else
  232. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
  233. irq_set_affinity_hint(msix_vec->irq, NULL);
  234. free_irq(msix_vec->irq, dev_id);
  235. }
  236. /**
  237. * i40iw_destroy_aeq - destroy aeq
  238. * @iwdev: iwarp device
  239. *
  240. * Issue a destroy aeq request and
  241. * free the resources associated with the aeq
  242. * The function is called during driver unload
  243. */
  244. static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
  245. {
  246. enum i40iw_status_code status = I40IW_ERR_NOT_READY;
  247. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  248. struct i40iw_aeq *aeq = &iwdev->aeq;
  249. if (!iwdev->msix_shared)
  250. i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
  251. if (iwdev->reset)
  252. goto exit;
  253. if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
  254. status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
  255. if (status)
  256. i40iw_pr_err("destroy aeq failed %d\n", status);
  257. exit:
  258. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  259. }
  260. /**
  261. * i40iw_destroy_ceq - destroy ceq
  262. * @iwdev: iwarp device
  263. * @iwceq: ceq to be destroyed
  264. *
  265. * Issue a destroy ceq request and
  266. * free the resources associated with the ceq
  267. */
  268. static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
  269. struct i40iw_ceq *iwceq)
  270. {
  271. enum i40iw_status_code status;
  272. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  273. if (iwdev->reset)
  274. goto exit;
  275. status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
  276. if (status) {
  277. i40iw_pr_err("ceq destroy command failed %d\n", status);
  278. goto exit;
  279. }
  280. status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
  281. if (status)
  282. i40iw_pr_err("ceq destroy completion failed %d\n", status);
  283. exit:
  284. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  285. }
  286. /**
  287. * i40iw_dele_ceqs - destroy all ceq's
  288. * @iwdev: iwarp device
  289. *
  290. * Go through all of the device ceq's and for each ceq
  291. * disable the ceq interrupt and destroy the ceq
  292. */
  293. static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
  294. {
  295. u32 i = 0;
  296. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  297. struct i40iw_ceq *iwceq = iwdev->ceqlist;
  298. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  299. if (iwdev->msix_shared) {
  300. i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
  301. i40iw_destroy_ceq(iwdev, iwceq);
  302. iwceq++;
  303. i++;
  304. }
  305. for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
  306. i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
  307. i40iw_destroy_ceq(iwdev, iwceq);
  308. }
  309. }
  310. /**
  311. * i40iw_destroy_ccq - destroy control cq
  312. * @iwdev: iwarp device
  313. *
  314. * Issue destroy ccq request and
  315. * free the resources associated with the ccq
  316. */
  317. static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
  318. {
  319. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  320. struct i40iw_ccq *ccq = &iwdev->ccq;
  321. enum i40iw_status_code status = 0;
  322. if (!iwdev->reset)
  323. status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
  324. if (status)
  325. i40iw_pr_err("ccq destroy failed %d\n", status);
  326. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  327. }
  328. /* types of hmc objects */
  329. static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
  330. I40IW_HMC_IW_QP,
  331. I40IW_HMC_IW_CQ,
  332. I40IW_HMC_IW_HTE,
  333. I40IW_HMC_IW_ARP,
  334. I40IW_HMC_IW_APBVT_ENTRY,
  335. I40IW_HMC_IW_MR,
  336. I40IW_HMC_IW_XF,
  337. I40IW_HMC_IW_XFFL,
  338. I40IW_HMC_IW_Q1,
  339. I40IW_HMC_IW_Q1FL,
  340. I40IW_HMC_IW_TIMER,
  341. };
  342. /**
  343. * i40iw_close_hmc_objects_type - delete hmc objects of a given type
  344. * @iwdev: iwarp device
  345. * @obj_type: the hmc object type to be deleted
  346. * @is_pf: true if the function is PF otherwise false
  347. * @reset: true if called before reset
  348. */
  349. static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
  350. enum i40iw_hmc_rsrc_type obj_type,
  351. struct i40iw_hmc_info *hmc_info,
  352. bool is_pf,
  353. bool reset)
  354. {
  355. struct i40iw_hmc_del_obj_info info;
  356. memset(&info, 0, sizeof(info));
  357. info.hmc_info = hmc_info;
  358. info.rsrc_type = obj_type;
  359. info.count = hmc_info->hmc_obj[obj_type].cnt;
  360. info.is_pf = is_pf;
  361. if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
  362. i40iw_pr_err("del obj of type %d failed\n", obj_type);
  363. }
  364. /**
  365. * i40iw_del_hmc_objects - remove all device hmc objects
  366. * @dev: iwarp device
  367. * @hmc_info: hmc_info to free
  368. * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
  369. * by PF on behalf of VF
  370. * @reset: true if called before reset
  371. */
  372. static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
  373. struct i40iw_hmc_info *hmc_info,
  374. bool is_pf,
  375. bool reset)
  376. {
  377. unsigned int i;
  378. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
  379. i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
  380. }
  381. /**
  382. * i40iw_ceq_handler - interrupt handler for ceq
  383. * @data: ceq pointer
  384. */
  385. static irqreturn_t i40iw_ceq_handler(int irq, void *data)
  386. {
  387. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  388. if (iwceq->irq != irq)
  389. i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
  390. tasklet_schedule(&iwceq->dpc_tasklet);
  391. return IRQ_HANDLED;
  392. }
  393. /**
  394. * i40iw_create_hmc_obj_type - create hmc object of a given type
  395. * @dev: hardware control device structure
  396. * @info: information for the hmc object to create
  397. */
  398. static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
  399. struct i40iw_hmc_create_obj_info *info)
  400. {
  401. return dev->hmc_ops->create_hmc_object(dev, info);
  402. }
  403. /**
  404. * i40iw_create_hmc_objs - create all hmc objects for the device
  405. * @iwdev: iwarp device
  406. * @is_pf: true if the function is PF otherwise false
  407. *
  408. * Create the device hmc objects and allocate hmc pages
  409. * Return 0 if successful, otherwise clean up and return error
  410. */
  411. static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
  412. bool is_pf)
  413. {
  414. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  415. struct i40iw_hmc_create_obj_info info;
  416. enum i40iw_status_code status;
  417. int i;
  418. memset(&info, 0, sizeof(info));
  419. info.hmc_info = dev->hmc_info;
  420. info.is_pf = is_pf;
  421. info.entry_type = iwdev->sd_type;
  422. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
  423. info.rsrc_type = iw_hmc_obj_types[i];
  424. info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
  425. status = i40iw_create_hmc_obj_type(dev, &info);
  426. if (status) {
  427. i40iw_pr_err("create obj type %d status = %d\n",
  428. iw_hmc_obj_types[i], status);
  429. break;
  430. }
  431. }
  432. if (!status)
  433. return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
  434. dev->hmc_fn_id,
  435. true, true));
  436. while (i) {
  437. i--;
  438. /* destroy the hmc objects of a given type */
  439. i40iw_close_hmc_objects_type(dev,
  440. iw_hmc_obj_types[i],
  441. dev->hmc_info,
  442. is_pf,
  443. false);
  444. }
  445. return status;
  446. }
  447. /**
  448. * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
  449. * @iwdev: iwarp device
  450. * @memptr: points to the memory addresses
  451. * @size: size of memory needed
  452. * @mask: mask for the aligned memory
  453. *
  454. * Get aligned memory of the requested size and
  455. * update the memptr to point to the new aligned memory
  456. * Return 0 if successful, otherwise return no memory error
  457. */
  458. enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
  459. struct i40iw_dma_mem *memptr,
  460. u32 size,
  461. u32 mask)
  462. {
  463. unsigned long va, newva;
  464. unsigned long extra;
  465. va = (unsigned long)iwdev->obj_next.va;
  466. newva = va;
  467. if (mask)
  468. newva = ALIGN(va, (mask + 1));
  469. extra = newva - va;
  470. memptr->va = (u8 *)va + extra;
  471. memptr->pa = iwdev->obj_next.pa + extra;
  472. memptr->size = size;
  473. if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
  474. return I40IW_ERR_NO_MEMORY;
  475. iwdev->obj_next.va = memptr->va + size;
  476. iwdev->obj_next.pa = memptr->pa + size;
  477. return 0;
  478. }
  479. /**
  480. * i40iw_create_cqp - create control qp
  481. * @iwdev: iwarp device
  482. *
  483. * Return 0, if the cqp and all the resources associated with it
  484. * are successfully created, otherwise return error
  485. */
  486. static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
  487. {
  488. enum i40iw_status_code status;
  489. u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
  490. struct i40iw_dma_mem mem;
  491. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  492. struct i40iw_cqp_init_info cqp_init_info;
  493. struct i40iw_cqp *cqp = &iwdev->cqp;
  494. u16 maj_err, min_err;
  495. int i;
  496. cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
  497. if (!cqp->cqp_requests)
  498. return I40IW_ERR_NO_MEMORY;
  499. cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
  500. if (!cqp->scratch_array) {
  501. kfree(cqp->cqp_requests);
  502. return I40IW_ERR_NO_MEMORY;
  503. }
  504. dev->cqp = &cqp->sc_cqp;
  505. dev->cqp->dev = dev;
  506. memset(&cqp_init_info, 0, sizeof(cqp_init_info));
  507. status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
  508. (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
  509. I40IW_CQP_ALIGNMENT);
  510. if (status)
  511. goto exit;
  512. status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
  513. I40IW_HOST_CTX_ALIGNMENT_MASK);
  514. if (status)
  515. goto exit;
  516. dev->cqp->host_ctx_pa = mem.pa;
  517. dev->cqp->host_ctx = mem.va;
  518. /* populate the cqp init info */
  519. cqp_init_info.dev = dev;
  520. cqp_init_info.sq_size = sqsize;
  521. cqp_init_info.sq = cqp->sq.va;
  522. cqp_init_info.sq_pa = cqp->sq.pa;
  523. cqp_init_info.host_ctx_pa = mem.pa;
  524. cqp_init_info.host_ctx = mem.va;
  525. cqp_init_info.hmc_profile = iwdev->resource_profile;
  526. cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
  527. cqp_init_info.scratch_array = cqp->scratch_array;
  528. status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
  529. if (status) {
  530. i40iw_pr_err("cqp init status %d\n", status);
  531. goto exit;
  532. }
  533. status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
  534. if (status) {
  535. i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
  536. status, maj_err, min_err);
  537. goto exit;
  538. }
  539. spin_lock_init(&cqp->req_lock);
  540. INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
  541. INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
  542. /* init the waitq of the cqp_requests and add them to the list */
  543. for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
  544. init_waitqueue_head(&cqp->cqp_requests[i].waitq);
  545. list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
  546. }
  547. return 0;
  548. exit:
  549. /* clean up the created resources */
  550. i40iw_destroy_cqp(iwdev, false);
  551. return status;
  552. }
  553. /**
  554. * i40iw_create_ccq - create control cq
  555. * @iwdev: iwarp device
  556. *
  557. * Return 0, if the ccq and the resources associated with it
  558. * are successfully created, otherwise return error
  559. */
  560. static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
  561. {
  562. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  563. struct i40iw_dma_mem mem;
  564. enum i40iw_status_code status;
  565. struct i40iw_ccq_init_info info;
  566. struct i40iw_ccq *ccq = &iwdev->ccq;
  567. memset(&info, 0, sizeof(info));
  568. dev->ccq = &ccq->sc_cq;
  569. dev->ccq->dev = dev;
  570. info.dev = dev;
  571. ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
  572. ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
  573. status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
  574. ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
  575. if (status)
  576. goto exit;
  577. status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
  578. I40IW_SHADOWAREA_MASK);
  579. if (status)
  580. goto exit;
  581. ccq->sc_cq.back_cq = (void *)ccq;
  582. /* populate the ccq init info */
  583. info.cq_base = ccq->mem_cq.va;
  584. info.cq_pa = ccq->mem_cq.pa;
  585. info.num_elem = IW_CCQ_SIZE;
  586. info.shadow_area = mem.va;
  587. info.shadow_area_pa = mem.pa;
  588. info.ceqe_mask = false;
  589. info.ceq_id_valid = true;
  590. info.shadow_read_threshold = 16;
  591. status = dev->ccq_ops->ccq_init(dev->ccq, &info);
  592. if (!status)
  593. status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
  594. exit:
  595. if (status)
  596. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  597. return status;
  598. }
  599. /**
  600. * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
  601. * @iwdev: iwarp device
  602. * @msix_vec: interrupt vector information
  603. * @iwceq: ceq associated with the vector
  604. * @ceq_id: the id number of the iwceq
  605. *
  606. * Allocate interrupt resources and enable irq handling
  607. * Return 0 if successful, otherwise return error
  608. */
  609. static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
  610. struct i40iw_ceq *iwceq,
  611. u32 ceq_id,
  612. struct i40iw_msix_vector *msix_vec)
  613. {
  614. enum i40iw_status_code status;
  615. cpumask_t mask;
  616. if (iwdev->msix_shared && !ceq_id) {
  617. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  618. status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
  619. } else {
  620. tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
  621. status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
  622. }
  623. cpumask_clear(&mask);
  624. cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
  625. irq_set_affinity_hint(msix_vec->irq, &mask);
  626. if (status) {
  627. i40iw_pr_err("ceq irq config fail\n");
  628. return I40IW_ERR_CONFIG;
  629. }
  630. msix_vec->ceq_id = ceq_id;
  631. return 0;
  632. }
  633. /**
  634. * i40iw_create_ceq - create completion event queue
  635. * @iwdev: iwarp device
  636. * @iwceq: pointer to the ceq resources to be created
  637. * @ceq_id: the id number of the iwceq
  638. *
  639. * Return 0, if the ceq and the resources associated with it
  640. * are successfully created, otherwise return error
  641. */
  642. static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
  643. struct i40iw_ceq *iwceq,
  644. u32 ceq_id)
  645. {
  646. enum i40iw_status_code status;
  647. struct i40iw_ceq_init_info info;
  648. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  649. u64 scratch;
  650. memset(&info, 0, sizeof(info));
  651. info.ceq_id = ceq_id;
  652. iwceq->iwdev = iwdev;
  653. iwceq->mem.size = sizeof(struct i40iw_ceqe) *
  654. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  655. status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
  656. I40IW_CEQ_ALIGNMENT);
  657. if (status)
  658. goto exit;
  659. info.ceq_id = ceq_id;
  660. info.ceqe_base = iwceq->mem.va;
  661. info.ceqe_pa = iwceq->mem.pa;
  662. info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  663. iwceq->sc_ceq.ceq_id = ceq_id;
  664. info.dev = dev;
  665. scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
  666. status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
  667. if (!status)
  668. status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
  669. exit:
  670. if (status)
  671. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  672. return status;
  673. }
  674. void i40iw_request_reset(struct i40iw_device *iwdev)
  675. {
  676. struct i40e_info *ldev = iwdev->ldev;
  677. ldev->ops->request_reset(ldev, iwdev->client, 1);
  678. }
  679. /**
  680. * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
  681. * @iwdev: iwarp device
  682. * @ldev: i40e lan device
  683. *
  684. * Allocate a list for all device completion event queues
  685. * Create the ceq's and configure their msix interrupt vectors
  686. * Return 0, if at least one ceq is successfully set up, otherwise return error
  687. */
  688. static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
  689. struct i40e_info *ldev)
  690. {
  691. u32 i;
  692. u32 ceq_id;
  693. struct i40iw_ceq *iwceq;
  694. struct i40iw_msix_vector *msix_vec;
  695. enum i40iw_status_code status = 0;
  696. u32 num_ceqs;
  697. if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
  698. status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
  699. iwdev->iw_qvlist);
  700. if (status)
  701. goto exit;
  702. } else {
  703. status = I40IW_ERR_BAD_PTR;
  704. goto exit;
  705. }
  706. num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
  707. iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
  708. if (!iwdev->ceqlist) {
  709. status = I40IW_ERR_NO_MEMORY;
  710. goto exit;
  711. }
  712. i = (iwdev->msix_shared) ? 0 : 1;
  713. for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
  714. iwceq = &iwdev->ceqlist[ceq_id];
  715. status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
  716. if (status) {
  717. i40iw_pr_err("create ceq status = %d\n", status);
  718. break;
  719. }
  720. msix_vec = &iwdev->iw_msixtbl[i];
  721. iwceq->irq = msix_vec->irq;
  722. iwceq->msix_idx = msix_vec->idx;
  723. status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
  724. if (status) {
  725. i40iw_destroy_ceq(iwdev, iwceq);
  726. break;
  727. }
  728. i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
  729. iwdev->ceqs_count++;
  730. }
  731. exit:
  732. if (status) {
  733. if (!iwdev->ceqs_count) {
  734. kfree(iwdev->ceqlist);
  735. iwdev->ceqlist = NULL;
  736. } else {
  737. status = 0;
  738. }
  739. }
  740. return status;
  741. }
  742. /**
  743. * i40iw_configure_aeq_vector - set up the msix vector for aeq
  744. * @iwdev: iwarp device
  745. *
  746. * Allocate interrupt resources and enable irq handling
  747. * Return 0 if successful, otherwise return error
  748. */
  749. static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
  750. {
  751. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  752. u32 ret = 0;
  753. if (!iwdev->msix_shared) {
  754. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  755. ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
  756. }
  757. if (ret) {
  758. i40iw_pr_err("aeq irq config fail\n");
  759. return I40IW_ERR_CONFIG;
  760. }
  761. return 0;
  762. }
  763. /**
  764. * i40iw_create_aeq - create async event queue
  765. * @iwdev: iwarp device
  766. *
  767. * Return 0, if the aeq and the resources associated with it
  768. * are successfully created, otherwise return error
  769. */
  770. static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
  771. {
  772. enum i40iw_status_code status;
  773. struct i40iw_aeq_init_info info;
  774. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  775. struct i40iw_aeq *aeq = &iwdev->aeq;
  776. u64 scratch = 0;
  777. u32 aeq_size;
  778. aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
  779. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  780. memset(&info, 0, sizeof(info));
  781. aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
  782. status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
  783. I40IW_AEQ_ALIGNMENT);
  784. if (status)
  785. goto exit;
  786. info.aeqe_base = aeq->mem.va;
  787. info.aeq_elem_pa = aeq->mem.pa;
  788. info.elem_cnt = aeq_size;
  789. info.dev = dev;
  790. status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
  791. if (status)
  792. goto exit;
  793. status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
  794. if (!status)
  795. status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
  796. exit:
  797. if (status)
  798. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  799. return status;
  800. }
  801. /**
  802. * i40iw_setup_aeq - set up the device aeq
  803. * @iwdev: iwarp device
  804. *
  805. * Create the aeq and configure its msix interrupt vector
  806. * Return 0 if successful, otherwise return error
  807. */
  808. static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
  809. {
  810. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  811. enum i40iw_status_code status;
  812. status = i40iw_create_aeq(iwdev);
  813. if (status)
  814. return status;
  815. status = i40iw_configure_aeq_vector(iwdev);
  816. if (status) {
  817. i40iw_destroy_aeq(iwdev);
  818. return status;
  819. }
  820. if (!iwdev->msix_shared)
  821. i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
  822. return 0;
  823. }
  824. /**
  825. * i40iw_initialize_ilq - create iwarp local queue for cm
  826. * @iwdev: iwarp device
  827. *
  828. * Return 0 if successful, otherwise return error
  829. */
  830. static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
  831. {
  832. struct i40iw_puda_rsrc_info info;
  833. enum i40iw_status_code status;
  834. memset(&info, 0, sizeof(info));
  835. info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
  836. info.cq_id = 1;
  837. info.qp_id = 0;
  838. info.count = 1;
  839. info.pd_id = 1;
  840. info.sq_size = 8192;
  841. info.rq_size = 8192;
  842. info.buf_size = 1024;
  843. info.tx_buf_cnt = 16384;
  844. info.receive = i40iw_receive_ilq;
  845. info.xmit_complete = i40iw_free_sqbuf;
  846. status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
  847. if (status)
  848. i40iw_pr_err("ilq create fail\n");
  849. return status;
  850. }
  851. /**
  852. * i40iw_initialize_ieq - create iwarp exception queue
  853. * @iwdev: iwarp device
  854. *
  855. * Return 0 if successful, otherwise return error
  856. */
  857. static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
  858. {
  859. struct i40iw_puda_rsrc_info info;
  860. enum i40iw_status_code status;
  861. memset(&info, 0, sizeof(info));
  862. info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
  863. info.cq_id = 2;
  864. info.qp_id = iwdev->sc_dev.exception_lan_queue;
  865. info.count = 1;
  866. info.pd_id = 2;
  867. info.sq_size = 8192;
  868. info.rq_size = 8192;
  869. info.buf_size = 2048;
  870. info.tx_buf_cnt = 16384;
  871. status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
  872. if (status)
  873. i40iw_pr_err("ieq create fail\n");
  874. return status;
  875. }
  876. /**
  877. * i40iw_hmc_setup - create hmc objects for the device
  878. * @iwdev: iwarp device
  879. *
  880. * Set up the device private memory space for the number and size of
  881. * the hmc objects and create the objects
  882. * Return 0 if successful, otherwise return error
  883. */
  884. static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
  885. {
  886. enum i40iw_status_code status;
  887. iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
  888. status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
  889. if (status)
  890. goto exit;
  891. status = i40iw_create_hmc_objs(iwdev, true);
  892. if (status)
  893. goto exit;
  894. iwdev->init_state = HMC_OBJS_CREATED;
  895. exit:
  896. return status;
  897. }
  898. /**
  899. * i40iw_del_init_mem - deallocate memory resources
  900. * @iwdev: iwarp device
  901. */
  902. static void i40iw_del_init_mem(struct i40iw_device *iwdev)
  903. {
  904. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  905. i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
  906. kfree(dev->hmc_info->sd_table.sd_entry);
  907. dev->hmc_info->sd_table.sd_entry = NULL;
  908. kfree(iwdev->mem_resources);
  909. iwdev->mem_resources = NULL;
  910. kfree(iwdev->ceqlist);
  911. iwdev->ceqlist = NULL;
  912. kfree(iwdev->iw_msixtbl);
  913. iwdev->iw_msixtbl = NULL;
  914. kfree(iwdev->hmc_info_mem);
  915. iwdev->hmc_info_mem = NULL;
  916. }
  917. /**
  918. * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
  919. * @iwdev: iwarp device
  920. * @idx: the index of the mac ip address to delete
  921. */
  922. static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
  923. {
  924. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  925. struct i40iw_cqp_request *cqp_request;
  926. struct cqp_commands_info *cqp_info;
  927. enum i40iw_status_code status = 0;
  928. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  929. if (!cqp_request) {
  930. i40iw_pr_err("cqp_request memory failed\n");
  931. return;
  932. }
  933. cqp_info = &cqp_request->info;
  934. cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
  935. cqp_info->post_sq = 1;
  936. cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  937. cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  938. cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
  939. cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
  940. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  941. if (status)
  942. i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
  943. }
  944. /**
  945. * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
  946. * @iwdev: iwarp device
  947. * @mac_addr: pointer to mac address
  948. * @idx: the index of the mac ip address to add
  949. */
  950. static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
  951. u8 *mac_addr,
  952. u8 idx)
  953. {
  954. struct i40iw_local_mac_ipaddr_entry_info *info;
  955. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  956. struct i40iw_cqp_request *cqp_request;
  957. struct cqp_commands_info *cqp_info;
  958. enum i40iw_status_code status = 0;
  959. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  960. if (!cqp_request) {
  961. i40iw_pr_err("cqp_request memory failed\n");
  962. return I40IW_ERR_NO_MEMORY;
  963. }
  964. cqp_info = &cqp_request->info;
  965. cqp_info->post_sq = 1;
  966. info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
  967. ether_addr_copy(info->mac_addr, mac_addr);
  968. info->entry_idx = idx;
  969. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  970. cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
  971. cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  972. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  973. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  974. if (status)
  975. i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
  976. return status;
  977. }
  978. /**
  979. * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
  980. * @iwdev: iwarp device
  981. * @mac_ip_tbl_idx: the index of the new mac ip address
  982. *
  983. * Allocate a mac ip address entry and update the mac_ip_tbl_idx
  984. * to hold the index of the newly created mac ip address
  985. * Return 0 if successful, otherwise return error
  986. */
  987. static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
  988. u16 *mac_ip_tbl_idx)
  989. {
  990. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  991. struct i40iw_cqp_request *cqp_request;
  992. struct cqp_commands_info *cqp_info;
  993. enum i40iw_status_code status = 0;
  994. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  995. if (!cqp_request) {
  996. i40iw_pr_err("cqp_request memory failed\n");
  997. return I40IW_ERR_NO_MEMORY;
  998. }
  999. /* increment refcount, because we need the cqp request ret value */
  1000. atomic_inc(&cqp_request->refcount);
  1001. cqp_info = &cqp_request->info;
  1002. cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
  1003. cqp_info->post_sq = 1;
  1004. cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  1005. cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  1006. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  1007. if (!status)
  1008. *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
  1009. else
  1010. i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
  1011. /* decrement refcount and free the cqp request, if no longer used */
  1012. i40iw_put_cqp_request(iwcqp, cqp_request);
  1013. return status;
  1014. }
  1015. /**
  1016. * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
  1017. * @iwdev: iwarp device
  1018. * @macaddr: pointer to mac address
  1019. *
  1020. * Allocate a mac ip address entry and add it to the hw table
  1021. * Return 0 if successful, otherwise return error
  1022. */
  1023. static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
  1024. u8 *macaddr)
  1025. {
  1026. enum i40iw_status_code status;
  1027. status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
  1028. if (!status) {
  1029. status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
  1030. (u8)iwdev->mac_ip_table_idx);
  1031. if (status)
  1032. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1033. }
  1034. return status;
  1035. }
  1036. /**
  1037. * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
  1038. * @iwdev: iwarp device
  1039. */
  1040. static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
  1041. {
  1042. struct net_device *ip_dev;
  1043. struct inet6_dev *idev;
  1044. struct inet6_ifaddr *ifp, *tmp;
  1045. u32 local_ipaddr6[4];
  1046. rcu_read_lock();
  1047. for_each_netdev_rcu(&init_net, ip_dev) {
  1048. if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
  1049. (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
  1050. (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
  1051. idev = __in6_dev_get(ip_dev);
  1052. if (!idev) {
  1053. i40iw_pr_err("ipv6 inet device not found\n");
  1054. break;
  1055. }
  1056. list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
  1057. i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
  1058. rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
  1059. i40iw_copy_ip_ntohl(local_ipaddr6,
  1060. ifp->addr.in6_u.u6_addr32);
  1061. i40iw_manage_arp_cache(iwdev,
  1062. ip_dev->dev_addr,
  1063. local_ipaddr6,
  1064. false,
  1065. I40IW_ARP_ADD);
  1066. }
  1067. }
  1068. }
  1069. rcu_read_unlock();
  1070. }
  1071. /**
  1072. * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
  1073. * @iwdev: iwarp device
  1074. */
  1075. static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
  1076. {
  1077. struct net_device *dev;
  1078. struct in_device *idev;
  1079. bool got_lock = true;
  1080. u32 ip_addr;
  1081. if (!rtnl_trylock())
  1082. got_lock = false;
  1083. for_each_netdev(&init_net, dev) {
  1084. if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
  1085. (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
  1086. (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
  1087. idev = in_dev_get(dev);
  1088. for_ifa(idev) {
  1089. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  1090. "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
  1091. rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
  1092. ip_addr = ntohl(ifa->ifa_address);
  1093. i40iw_manage_arp_cache(iwdev,
  1094. dev->dev_addr,
  1095. &ip_addr,
  1096. true,
  1097. I40IW_ARP_ADD);
  1098. }
  1099. endfor_ifa(idev);
  1100. in_dev_put(idev);
  1101. }
  1102. }
  1103. if (got_lock)
  1104. rtnl_unlock();
  1105. }
  1106. /**
  1107. * i40iw_add_mac_ip - add mac and ip addresses
  1108. * @iwdev: iwarp device
  1109. *
  1110. * Create and add a mac ip address entry to the hw table and
  1111. * ipv4/ipv6 addresses to the arp cache
  1112. * Return 0 if successful, otherwise return error
  1113. */
  1114. static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
  1115. {
  1116. struct net_device *netdev = iwdev->netdev;
  1117. enum i40iw_status_code status;
  1118. status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
  1119. if (status)
  1120. return status;
  1121. i40iw_add_ipv4_addr(iwdev);
  1122. i40iw_add_ipv6_addr(iwdev);
  1123. return 0;
  1124. }
  1125. /**
  1126. * i40iw_wait_pe_ready - Check if firmware is ready
  1127. * @hw: provides access to registers
  1128. */
  1129. static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
  1130. {
  1131. u32 statusfw;
  1132. u32 statuscpu0;
  1133. u32 statuscpu1;
  1134. u32 statuscpu2;
  1135. u32 retrycount = 0;
  1136. do {
  1137. statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
  1138. i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
  1139. statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
  1140. i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
  1141. statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
  1142. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
  1143. __LINE__, statuscpu1);
  1144. statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
  1145. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
  1146. __LINE__, statuscpu2);
  1147. if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
  1148. break; /* SUCCESS */
  1149. mdelay(1000);
  1150. retrycount++;
  1151. } while (retrycount < 14);
  1152. i40iw_wr32(hw, 0xb4040, 0x4C104C5);
  1153. }
  1154. /**
  1155. * i40iw_initialize_dev - initialize device
  1156. * @iwdev: iwarp device
  1157. * @ldev: lan device information
  1158. *
  1159. * Allocate memory for the hmc objects and initialize iwdev
  1160. * Return 0 if successful, otherwise clean up the resources
  1161. * and return error
  1162. */
  1163. static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
  1164. struct i40e_info *ldev)
  1165. {
  1166. enum i40iw_status_code status;
  1167. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1168. struct i40iw_device_init_info info;
  1169. struct i40iw_vsi_init_info vsi_info;
  1170. struct i40iw_dma_mem mem;
  1171. struct i40iw_l2params l2params;
  1172. u32 size;
  1173. struct i40iw_vsi_stats_info stats_info;
  1174. u16 last_qset = I40IW_NO_QSET;
  1175. u16 qset;
  1176. u32 i;
  1177. memset(&l2params, 0, sizeof(l2params));
  1178. memset(&info, 0, sizeof(info));
  1179. size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
  1180. (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
  1181. iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
  1182. if (!iwdev->hmc_info_mem)
  1183. return I40IW_ERR_NO_MEMORY;
  1184. iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
  1185. dev->hmc_info = &iwdev->hw.hmc;
  1186. dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
  1187. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
  1188. I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
  1189. if (status)
  1190. goto error;
  1191. info.fpm_query_buf_pa = mem.pa;
  1192. info.fpm_query_buf = mem.va;
  1193. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
  1194. I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
  1195. if (status)
  1196. goto error;
  1197. info.fpm_commit_buf_pa = mem.pa;
  1198. info.fpm_commit_buf = mem.va;
  1199. info.hmc_fn_id = ldev->fid;
  1200. info.is_pf = (ldev->ftype) ? false : true;
  1201. info.bar0 = ldev->hw_addr;
  1202. info.hw = &iwdev->hw;
  1203. info.debug_mask = debug;
  1204. l2params.mss =
  1205. (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
  1206. for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
  1207. qset = ldev->params.qos.prio_qos[i].qs_handle;
  1208. l2params.qs_handle_list[i] = qset;
  1209. if (last_qset == I40IW_NO_QSET)
  1210. last_qset = qset;
  1211. else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
  1212. iwdev->dcb = true;
  1213. }
  1214. i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
  1215. info.exception_lan_queue = 1;
  1216. info.vchnl_send = i40iw_virtchnl_send;
  1217. status = i40iw_device_init(&iwdev->sc_dev, &info);
  1218. if (status)
  1219. goto error;
  1220. memset(&vsi_info, 0, sizeof(vsi_info));
  1221. vsi_info.dev = &iwdev->sc_dev;
  1222. vsi_info.back_vsi = (void *)iwdev;
  1223. vsi_info.params = &l2params;
  1224. i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
  1225. if (dev->is_pf) {
  1226. memset(&stats_info, 0, sizeof(stats_info));
  1227. stats_info.fcn_id = ldev->fid;
  1228. stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
  1229. if (!stats_info.pestat) {
  1230. status = I40IW_ERR_NO_MEMORY;
  1231. goto error;
  1232. }
  1233. stats_info.stats_initialize = true;
  1234. if (stats_info.pestat)
  1235. i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
  1236. }
  1237. return status;
  1238. error:
  1239. kfree(iwdev->hmc_info_mem);
  1240. iwdev->hmc_info_mem = NULL;
  1241. return status;
  1242. }
  1243. /**
  1244. * i40iw_register_notifiers - register tcp ip notifiers
  1245. */
  1246. static void i40iw_register_notifiers(void)
  1247. {
  1248. register_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1249. register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1250. register_netevent_notifier(&i40iw_net_notifier);
  1251. }
  1252. /**
  1253. * i40iw_unregister_notifiers - unregister tcp ip notifiers
  1254. */
  1255. static void i40iw_unregister_notifiers(void)
  1256. {
  1257. unregister_netevent_notifier(&i40iw_net_notifier);
  1258. unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1259. unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1260. }
  1261. /**
  1262. * i40iw_save_msix_info - copy msix vector information to iwarp device
  1263. * @iwdev: iwarp device
  1264. * @ldev: lan device information
  1265. *
  1266. * Allocate iwdev msix table and copy the ldev msix info to the table
  1267. * Return 0 if successful, otherwise return error
  1268. */
  1269. static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
  1270. struct i40e_info *ldev)
  1271. {
  1272. struct i40e_qvlist_info *iw_qvlist;
  1273. struct i40e_qv_info *iw_qvinfo;
  1274. u32 ceq_idx;
  1275. u32 i;
  1276. u32 size;
  1277. if (!ldev->msix_count) {
  1278. i40iw_pr_err("No MSI-X vectors\n");
  1279. return I40IW_ERR_CONFIG;
  1280. }
  1281. iwdev->msix_count = ldev->msix_count;
  1282. size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
  1283. size += sizeof(struct i40e_qvlist_info);
  1284. size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
  1285. iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
  1286. if (!iwdev->iw_msixtbl)
  1287. return I40IW_ERR_NO_MEMORY;
  1288. iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
  1289. iw_qvlist = iwdev->iw_qvlist;
  1290. iw_qvinfo = iw_qvlist->qv_info;
  1291. iw_qvlist->num_vectors = iwdev->msix_count;
  1292. if (iwdev->msix_count <= num_online_cpus())
  1293. iwdev->msix_shared = true;
  1294. for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
  1295. iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
  1296. iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
  1297. iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
  1298. if (i == 0) {
  1299. iw_qvinfo->aeq_idx = 0;
  1300. if (iwdev->msix_shared)
  1301. iw_qvinfo->ceq_idx = ceq_idx++;
  1302. else
  1303. iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
  1304. } else {
  1305. iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
  1306. iw_qvinfo->ceq_idx = ceq_idx++;
  1307. }
  1308. iw_qvinfo->itr_idx = 3;
  1309. iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
  1310. }
  1311. return 0;
  1312. }
  1313. /**
  1314. * i40iw_deinit_device - clean up the device resources
  1315. * @iwdev: iwarp device
  1316. *
  1317. * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
  1318. * destroy the device queues and free the pble and the hmc objects
  1319. */
  1320. static void i40iw_deinit_device(struct i40iw_device *iwdev)
  1321. {
  1322. struct i40e_info *ldev = iwdev->ldev;
  1323. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1324. i40iw_pr_info("state = %d\n", iwdev->init_state);
  1325. if (iwdev->param_wq)
  1326. destroy_workqueue(iwdev->param_wq);
  1327. switch (iwdev->init_state) {
  1328. case RDMA_DEV_REGISTERED:
  1329. iwdev->iw_status = 0;
  1330. i40iw_port_ibevent(iwdev);
  1331. i40iw_destroy_rdma_device(iwdev->iwibdev);
  1332. /* fallthrough */
  1333. case IP_ADDR_REGISTERED:
  1334. if (!iwdev->reset)
  1335. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1336. /* fallthrough */
  1337. /* fallthrough */
  1338. case PBLE_CHUNK_MEM:
  1339. i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
  1340. /* fallthrough */
  1341. case CEQ_CREATED:
  1342. i40iw_dele_ceqs(iwdev);
  1343. /* fallthrough */
  1344. case AEQ_CREATED:
  1345. i40iw_destroy_aeq(iwdev);
  1346. /* fallthrough */
  1347. case IEQ_CREATED:
  1348. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
  1349. /* fallthrough */
  1350. case ILQ_CREATED:
  1351. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
  1352. /* fallthrough */
  1353. case CCQ_CREATED:
  1354. i40iw_destroy_ccq(iwdev);
  1355. /* fallthrough */
  1356. case HMC_OBJS_CREATED:
  1357. i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
  1358. /* fallthrough */
  1359. case CQP_CREATED:
  1360. i40iw_destroy_cqp(iwdev, true);
  1361. /* fallthrough */
  1362. case INITIAL_STATE:
  1363. i40iw_cleanup_cm_core(&iwdev->cm_core);
  1364. if (iwdev->vsi.pestat) {
  1365. i40iw_vsi_stats_free(&iwdev->vsi);
  1366. kfree(iwdev->vsi.pestat);
  1367. }
  1368. i40iw_del_init_mem(iwdev);
  1369. break;
  1370. case INVALID_STATE:
  1371. /* fallthrough */
  1372. default:
  1373. i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
  1374. break;
  1375. }
  1376. i40iw_del_handler(i40iw_find_i40e_handler(ldev));
  1377. kfree(iwdev->hdl);
  1378. }
  1379. /**
  1380. * i40iw_setup_init_state - set up the initial device struct
  1381. * @hdl: handler for iwarp device - one per instance
  1382. * @ldev: lan device information
  1383. * @client: iwarp client information, provided during registration
  1384. *
  1385. * Initialize the iwarp device and its hdl information
  1386. * using the ldev and client information
  1387. * Return 0 if successful, otherwise return error
  1388. */
  1389. static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
  1390. struct i40e_info *ldev,
  1391. struct i40e_client *client)
  1392. {
  1393. struct i40iw_device *iwdev = &hdl->device;
  1394. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1395. enum i40iw_status_code status;
  1396. memcpy(&hdl->ldev, ldev, sizeof(*ldev));
  1397. if (resource_profile == 1)
  1398. resource_profile = 2;
  1399. iwdev->mpa_version = mpa_version;
  1400. iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
  1401. (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
  1402. I40IW_HMC_PROFILE_DEFAULT;
  1403. iwdev->max_rdma_vfs =
  1404. (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
  1405. iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
  1406. iwdev->netdev = ldev->netdev;
  1407. hdl->client = client;
  1408. if (!ldev->ftype)
  1409. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
  1410. else
  1411. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
  1412. status = i40iw_save_msix_info(iwdev, ldev);
  1413. if (status)
  1414. return status;
  1415. iwdev->hw.dev_context = (void *)ldev->pcidev;
  1416. iwdev->hw.hw_addr = ldev->hw_addr;
  1417. status = i40iw_allocate_dma_mem(&iwdev->hw,
  1418. &iwdev->obj_mem, 8192, 4096);
  1419. if (status)
  1420. goto exit;
  1421. iwdev->obj_next = iwdev->obj_mem;
  1422. iwdev->push_mode = push_mode;
  1423. init_waitqueue_head(&iwdev->vchnl_waitq);
  1424. init_waitqueue_head(&dev->vf_reqs);
  1425. init_waitqueue_head(&iwdev->close_wq);
  1426. status = i40iw_initialize_dev(iwdev, ldev);
  1427. exit:
  1428. if (status) {
  1429. kfree(iwdev->iw_msixtbl);
  1430. i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
  1431. iwdev->iw_msixtbl = NULL;
  1432. }
  1433. return status;
  1434. }
  1435. /**
  1436. * i40iw_get_used_rsrc - determine resources used internally
  1437. * @iwdev: iwarp device
  1438. *
  1439. * Called after internal allocations
  1440. */
  1441. static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
  1442. {
  1443. iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
  1444. iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
  1445. iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
  1446. iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
  1447. }
  1448. /**
  1449. * i40iw_open - client interface operation open for iwarp/uda device
  1450. * @ldev: lan device information
  1451. * @client: iwarp client information, provided during registration
  1452. *
  1453. * Called by the lan driver during the processing of client register
  1454. * Create device resources, set up queues, pble and hmc objects and
  1455. * register the device with the ib verbs interface
  1456. * Return 0 if successful, otherwise return error
  1457. */
  1458. static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
  1459. {
  1460. struct i40iw_device *iwdev;
  1461. struct i40iw_sc_dev *dev;
  1462. enum i40iw_status_code status;
  1463. struct i40iw_handler *hdl;
  1464. hdl = i40iw_find_netdev(ldev->netdev);
  1465. if (hdl)
  1466. return 0;
  1467. hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
  1468. if (!hdl)
  1469. return -ENOMEM;
  1470. iwdev = &hdl->device;
  1471. iwdev->hdl = hdl;
  1472. dev = &iwdev->sc_dev;
  1473. i40iw_setup_cm_core(iwdev);
  1474. dev->back_dev = (void *)iwdev;
  1475. iwdev->ldev = &hdl->ldev;
  1476. iwdev->client = client;
  1477. mutex_init(&iwdev->pbl_mutex);
  1478. i40iw_add_handler(hdl);
  1479. do {
  1480. status = i40iw_setup_init_state(hdl, ldev, client);
  1481. if (status)
  1482. break;
  1483. iwdev->init_state = INITIAL_STATE;
  1484. if (dev->is_pf)
  1485. i40iw_wait_pe_ready(dev->hw);
  1486. status = i40iw_create_cqp(iwdev);
  1487. if (status)
  1488. break;
  1489. iwdev->init_state = CQP_CREATED;
  1490. status = i40iw_hmc_setup(iwdev);
  1491. if (status)
  1492. break;
  1493. status = i40iw_create_ccq(iwdev);
  1494. if (status)
  1495. break;
  1496. iwdev->init_state = CCQ_CREATED;
  1497. status = i40iw_initialize_ilq(iwdev);
  1498. if (status)
  1499. break;
  1500. iwdev->init_state = ILQ_CREATED;
  1501. status = i40iw_initialize_ieq(iwdev);
  1502. if (status)
  1503. break;
  1504. iwdev->init_state = IEQ_CREATED;
  1505. status = i40iw_setup_aeq(iwdev);
  1506. if (status)
  1507. break;
  1508. iwdev->init_state = AEQ_CREATED;
  1509. status = i40iw_setup_ceqs(iwdev, ldev);
  1510. if (status)
  1511. break;
  1512. iwdev->init_state = CEQ_CREATED;
  1513. status = i40iw_initialize_hw_resources(iwdev);
  1514. if (status)
  1515. break;
  1516. i40iw_get_used_rsrc(iwdev);
  1517. dev->ccq_ops->ccq_arm(dev->ccq);
  1518. status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
  1519. if (status)
  1520. break;
  1521. iwdev->init_state = PBLE_CHUNK_MEM;
  1522. iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
  1523. status = i40iw_add_mac_ip(iwdev);
  1524. if (status)
  1525. break;
  1526. iwdev->init_state = IP_ADDR_REGISTERED;
  1527. if (i40iw_register_rdma_device(iwdev)) {
  1528. i40iw_pr_err("register rdma device fail\n");
  1529. break;
  1530. };
  1531. iwdev->init_state = RDMA_DEV_REGISTERED;
  1532. iwdev->iw_status = 1;
  1533. i40iw_port_ibevent(iwdev);
  1534. iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
  1535. if(iwdev->param_wq == NULL)
  1536. break;
  1537. i40iw_pr_info("i40iw_open completed\n");
  1538. return 0;
  1539. } while (0);
  1540. i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
  1541. i40iw_deinit_device(iwdev);
  1542. return -ERESTART;
  1543. }
  1544. /**
  1545. * i40iw_l2params_worker - worker for l2 params change
  1546. * @work: work pointer for l2 params
  1547. */
  1548. static void i40iw_l2params_worker(struct work_struct *work)
  1549. {
  1550. struct l2params_work *dwork =
  1551. container_of(work, struct l2params_work, work);
  1552. struct i40iw_device *iwdev = dwork->iwdev;
  1553. i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
  1554. atomic_dec(&iwdev->params_busy);
  1555. kfree(work);
  1556. }
  1557. /**
  1558. * i40iw_l2param_change - handle qs handles for qos and mss change
  1559. * @ldev: lan device information
  1560. * @client: client for paramater change
  1561. * @params: new parameters from L2
  1562. */
  1563. static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
  1564. struct i40e_params *params)
  1565. {
  1566. struct i40iw_handler *hdl;
  1567. struct i40iw_l2params *l2params;
  1568. struct l2params_work *work;
  1569. struct i40iw_device *iwdev;
  1570. int i;
  1571. hdl = i40iw_find_i40e_handler(ldev);
  1572. if (!hdl)
  1573. return;
  1574. iwdev = &hdl->device;
  1575. if (atomic_read(&iwdev->params_busy))
  1576. return;
  1577. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  1578. if (!work)
  1579. return;
  1580. atomic_inc(&iwdev->params_busy);
  1581. work->iwdev = iwdev;
  1582. l2params = &work->l2params;
  1583. for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
  1584. l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
  1585. l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
  1586. INIT_WORK(&work->work, i40iw_l2params_worker);
  1587. queue_work(iwdev->param_wq, &work->work);
  1588. }
  1589. /**
  1590. * i40iw_close - client interface operation close for iwarp/uda device
  1591. * @ldev: lan device information
  1592. * @client: client to close
  1593. *
  1594. * Called by the lan driver during the processing of client unregister
  1595. * Destroy and clean up the driver resources
  1596. */
  1597. static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
  1598. {
  1599. struct i40iw_device *iwdev;
  1600. struct i40iw_handler *hdl;
  1601. hdl = i40iw_find_i40e_handler(ldev);
  1602. if (!hdl)
  1603. return;
  1604. iwdev = &hdl->device;
  1605. iwdev->closing = true;
  1606. if (reset)
  1607. iwdev->reset = true;
  1608. i40iw_cm_disconnect_all(iwdev);
  1609. destroy_workqueue(iwdev->virtchnl_wq);
  1610. i40iw_deinit_device(iwdev);
  1611. }
  1612. /**
  1613. * i40iw_vf_reset - process VF reset
  1614. * @ldev: lan device information
  1615. * @client: client interface instance
  1616. * @vf_id: virtual function id
  1617. *
  1618. * Called when a VF is reset by the PF
  1619. * Destroy and clean up the VF resources
  1620. */
  1621. static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
  1622. {
  1623. struct i40iw_handler *hdl;
  1624. struct i40iw_sc_dev *dev;
  1625. struct i40iw_hmc_fcn_info hmc_fcn_info;
  1626. struct i40iw_virt_mem vf_dev_mem;
  1627. struct i40iw_vfdev *tmp_vfdev;
  1628. unsigned int i;
  1629. unsigned long flags;
  1630. struct i40iw_device *iwdev;
  1631. hdl = i40iw_find_i40e_handler(ldev);
  1632. if (!hdl)
  1633. return;
  1634. dev = &hdl->device.sc_dev;
  1635. iwdev = (struct i40iw_device *)dev->back_dev;
  1636. for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
  1637. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
  1638. continue;
  1639. /* free all resources allocated on behalf of vf */
  1640. tmp_vfdev = dev->vf_dev[i];
  1641. spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
  1642. dev->vf_dev[i] = NULL;
  1643. spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
  1644. i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
  1645. /* remove vf hmc function */
  1646. memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
  1647. hmc_fcn_info.vf_id = vf_id;
  1648. hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
  1649. hmc_fcn_info.free_fcn = true;
  1650. i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
  1651. /* free vf_dev */
  1652. vf_dev_mem.va = tmp_vfdev;
  1653. vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
  1654. sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
  1655. i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
  1656. break;
  1657. }
  1658. }
  1659. /**
  1660. * i40iw_vf_enable - enable a number of VFs
  1661. * @ldev: lan device information
  1662. * @client: client interface instance
  1663. * @num_vfs: number of VFs for the PF
  1664. *
  1665. * Called when the number of VFs changes
  1666. */
  1667. static void i40iw_vf_enable(struct i40e_info *ldev,
  1668. struct i40e_client *client,
  1669. u32 num_vfs)
  1670. {
  1671. struct i40iw_handler *hdl;
  1672. hdl = i40iw_find_i40e_handler(ldev);
  1673. if (!hdl)
  1674. return;
  1675. if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
  1676. hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
  1677. else
  1678. hdl->device.max_enabled_vfs = num_vfs;
  1679. }
  1680. /**
  1681. * i40iw_vf_capable - check if VF capable
  1682. * @ldev: lan device information
  1683. * @client: client interface instance
  1684. * @vf_id: virtual function id
  1685. *
  1686. * Return 1 if a VF slot is available or if VF is already RDMA enabled
  1687. * Return 0 otherwise
  1688. */
  1689. static int i40iw_vf_capable(struct i40e_info *ldev,
  1690. struct i40e_client *client,
  1691. u32 vf_id)
  1692. {
  1693. struct i40iw_handler *hdl;
  1694. struct i40iw_sc_dev *dev;
  1695. unsigned int i;
  1696. hdl = i40iw_find_i40e_handler(ldev);
  1697. if (!hdl)
  1698. return 0;
  1699. dev = &hdl->device.sc_dev;
  1700. for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
  1701. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
  1702. return 1;
  1703. }
  1704. return 0;
  1705. }
  1706. /**
  1707. * i40iw_virtchnl_receive - receive a message through the virtual channel
  1708. * @ldev: lan device information
  1709. * @client: client interface instance
  1710. * @vf_id: virtual function id associated with the message
  1711. * @msg: message buffer pointer
  1712. * @len: length of the message
  1713. *
  1714. * Invoke virtual channel receive operation for the given msg
  1715. * Return 0 if successful, otherwise return error
  1716. */
  1717. static int i40iw_virtchnl_receive(struct i40e_info *ldev,
  1718. struct i40e_client *client,
  1719. u32 vf_id,
  1720. u8 *msg,
  1721. u16 len)
  1722. {
  1723. struct i40iw_handler *hdl;
  1724. struct i40iw_sc_dev *dev;
  1725. struct i40iw_device *iwdev;
  1726. int ret_code = I40IW_NOT_SUPPORTED;
  1727. if (!len || !msg)
  1728. return I40IW_ERR_PARAM;
  1729. hdl = i40iw_find_i40e_handler(ldev);
  1730. if (!hdl)
  1731. return I40IW_ERR_PARAM;
  1732. dev = &hdl->device.sc_dev;
  1733. iwdev = dev->back_dev;
  1734. if (dev->vchnl_if.vchnl_recv) {
  1735. ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
  1736. if (!dev->is_pf) {
  1737. atomic_dec(&iwdev->vchnl_msgs);
  1738. wake_up(&iwdev->vchnl_waitq);
  1739. }
  1740. }
  1741. return ret_code;
  1742. }
  1743. /**
  1744. * i40iw_vf_clear_to_send - wait to send virtual channel message
  1745. * @dev: iwarp device *
  1746. * Wait for until virtual channel is clear
  1747. * before sending the next message
  1748. *
  1749. * Returns false if error
  1750. * Returns true if clear to send
  1751. */
  1752. bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
  1753. {
  1754. struct i40iw_device *iwdev;
  1755. wait_queue_entry_t wait;
  1756. iwdev = dev->back_dev;
  1757. if (!wq_has_sleeper(&dev->vf_reqs) &&
  1758. (atomic_read(&iwdev->vchnl_msgs) == 0))
  1759. return true; /* virtual channel is clear */
  1760. init_wait(&wait);
  1761. add_wait_queue_exclusive(&dev->vf_reqs, &wait);
  1762. if (!wait_event_timeout(dev->vf_reqs,
  1763. (atomic_read(&iwdev->vchnl_msgs) == 0),
  1764. I40IW_VCHNL_EVENT_TIMEOUT))
  1765. dev->vchnl_up = false;
  1766. remove_wait_queue(&dev->vf_reqs, &wait);
  1767. return dev->vchnl_up;
  1768. }
  1769. /**
  1770. * i40iw_virtchnl_send - send a message through the virtual channel
  1771. * @dev: iwarp device
  1772. * @vf_id: virtual function id associated with the message
  1773. * @msg: virtual channel message buffer pointer
  1774. * @len: length of the message
  1775. *
  1776. * Invoke virtual channel send operation for the given msg
  1777. * Return 0 if successful, otherwise return error
  1778. */
  1779. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  1780. u32 vf_id,
  1781. u8 *msg,
  1782. u16 len)
  1783. {
  1784. struct i40iw_device *iwdev;
  1785. struct i40e_info *ldev;
  1786. if (!dev || !dev->back_dev)
  1787. return I40IW_ERR_BAD_PTR;
  1788. iwdev = dev->back_dev;
  1789. ldev = iwdev->ldev;
  1790. if (ldev && ldev->ops && ldev->ops->virtchnl_send)
  1791. return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
  1792. return I40IW_ERR_BAD_PTR;
  1793. }
  1794. /* client interface functions */
  1795. static const struct i40e_client_ops i40e_ops = {
  1796. .open = i40iw_open,
  1797. .close = i40iw_close,
  1798. .l2_param_change = i40iw_l2param_change,
  1799. .virtchnl_receive = i40iw_virtchnl_receive,
  1800. .vf_reset = i40iw_vf_reset,
  1801. .vf_enable = i40iw_vf_enable,
  1802. .vf_capable = i40iw_vf_capable
  1803. };
  1804. /**
  1805. * i40iw_init_module - driver initialization function
  1806. *
  1807. * First function to call when the driver is loaded
  1808. * Register the driver as i40e client and port mapper client
  1809. */
  1810. static int __init i40iw_init_module(void)
  1811. {
  1812. int ret;
  1813. memset(&i40iw_client, 0, sizeof(i40iw_client));
  1814. i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
  1815. i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
  1816. i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
  1817. i40iw_client.ops = &i40e_ops;
  1818. memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
  1819. i40iw_client.type = I40E_CLIENT_IWARP;
  1820. spin_lock_init(&i40iw_handler_lock);
  1821. ret = i40e_register_client(&i40iw_client);
  1822. i40iw_register_notifiers();
  1823. return ret;
  1824. }
  1825. /**
  1826. * i40iw_exit_module - driver exit clean up function
  1827. *
  1828. * The function is called just before the driver is unloaded
  1829. * Unregister the driver as i40e client and port mapper client
  1830. */
  1831. static void __exit i40iw_exit_module(void)
  1832. {
  1833. i40iw_unregister_notifiers();
  1834. i40e_unregister_client(&i40iw_client);
  1835. }
  1836. module_init(i40iw_init_module);
  1837. module_exit(i40iw_exit_module);