cxgb4_uld.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /*
  2. * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
  3. *
  4. * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written by: Atul Gupta (atul.gupta@chelsio.com)
  35. * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
  36. */
  37. #include <linux/kernel.h>
  38. #include <linux/module.h>
  39. #include <linux/errno.h>
  40. #include <linux/types.h>
  41. #include <linux/debugfs.h>
  42. #include <linux/export.h>
  43. #include <linux/list.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/pci.h>
  46. #include "cxgb4.h"
  47. #include "cxgb4_uld.h"
  48. #include "t4_regs.h"
  49. #include "t4fw_api.h"
  50. #include "t4_msg.h"
  51. #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
  52. static int get_msix_idx_from_bmap(struct adapter *adap)
  53. {
  54. struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
  55. unsigned long flags;
  56. unsigned int msix_idx;
  57. spin_lock_irqsave(&bmap->lock, flags);
  58. msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
  59. if (msix_idx < bmap->mapsize) {
  60. __set_bit(msix_idx, bmap->msix_bmap);
  61. } else {
  62. spin_unlock_irqrestore(&bmap->lock, flags);
  63. return -ENOSPC;
  64. }
  65. spin_unlock_irqrestore(&bmap->lock, flags);
  66. return msix_idx;
  67. }
  68. static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
  69. {
  70. struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
  71. unsigned long flags;
  72. spin_lock_irqsave(&bmap->lock, flags);
  73. __clear_bit(msix_idx, bmap->msix_bmap);
  74. spin_unlock_irqrestore(&bmap->lock, flags);
  75. }
  76. /* Flush the aggregated lro sessions */
  77. static void uldrx_flush_handler(struct sge_rspq *q)
  78. {
  79. struct adapter *adap = q->adap;
  80. if (adap->uld[q->uld].lro_flush)
  81. adap->uld[q->uld].lro_flush(&q->lro_mgr);
  82. }
  83. /**
  84. * uldrx_handler - response queue handler for ULD queues
  85. * @q: the response queue that received the packet
  86. * @rsp: the response queue descriptor holding the offload message
  87. * @gl: the gather list of packet fragments
  88. *
  89. * Deliver an ingress offload packet to a ULD. All processing is done by
  90. * the ULD, we just maintain statistics.
  91. */
  92. static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
  93. const struct pkt_gl *gl)
  94. {
  95. struct adapter *adap = q->adap;
  96. struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
  97. int ret;
  98. /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
  99. if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
  100. ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
  101. rsp += 2;
  102. if (q->flush_handler)
  103. ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
  104. rsp, gl, &q->lro_mgr,
  105. &q->napi);
  106. else
  107. ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
  108. rsp, gl);
  109. if (ret) {
  110. rxq->stats.nomem++;
  111. return -1;
  112. }
  113. if (!gl)
  114. rxq->stats.imm++;
  115. else if (gl == CXGB4_MSG_AN)
  116. rxq->stats.an++;
  117. else
  118. rxq->stats.pkts++;
  119. return 0;
  120. }
  121. static int alloc_uld_rxqs(struct adapter *adap,
  122. struct sge_uld_rxq_info *rxq_info, bool lro)
  123. {
  124. struct sge *s = &adap->sge;
  125. unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
  126. struct sge_ofld_rxq *q = rxq_info->uldrxq;
  127. unsigned short *ids = rxq_info->rspq_id;
  128. unsigned int bmap_idx = 0;
  129. unsigned int per_chan;
  130. int i, err, msi_idx, que_idx = 0;
  131. per_chan = rxq_info->nrxq / adap->params.nports;
  132. if (adap->flags & USING_MSIX)
  133. msi_idx = 1;
  134. else
  135. msi_idx = -((int)s->intrq.abs_id + 1);
  136. for (i = 0; i < nq; i++, q++) {
  137. if (i == rxq_info->nrxq) {
  138. /* start allocation of concentrator queues */
  139. per_chan = rxq_info->nciq / adap->params.nports;
  140. que_idx = 0;
  141. }
  142. if (msi_idx >= 0) {
  143. bmap_idx = get_msix_idx_from_bmap(adap);
  144. msi_idx = adap->msix_info_ulds[bmap_idx].idx;
  145. }
  146. err = t4_sge_alloc_rxq(adap, &q->rspq, false,
  147. adap->port[que_idx++ / per_chan],
  148. msi_idx,
  149. q->fl.size ? &q->fl : NULL,
  150. uldrx_handler,
  151. lro ? uldrx_flush_handler : NULL,
  152. 0);
  153. if (err)
  154. goto freeout;
  155. if (msi_idx >= 0)
  156. rxq_info->msix_tbl[i] = bmap_idx;
  157. memset(&q->stats, 0, sizeof(q->stats));
  158. if (ids)
  159. ids[i] = q->rspq.abs_id;
  160. }
  161. return 0;
  162. freeout:
  163. q = rxq_info->uldrxq;
  164. for ( ; i; i--, q++) {
  165. if (q->rspq.desc)
  166. free_rspq_fl(adap, &q->rspq,
  167. q->fl.size ? &q->fl : NULL);
  168. }
  169. return err;
  170. }
  171. static int
  172. setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
  173. {
  174. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  175. int i, ret = 0;
  176. if (adap->flags & USING_MSIX) {
  177. rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
  178. sizeof(unsigned short),
  179. GFP_KERNEL);
  180. if (!rxq_info->msix_tbl)
  181. return -ENOMEM;
  182. }
  183. ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
  184. /* Tell uP to route control queue completions to rdma rspq */
  185. if (adap->flags & FULL_INIT_DONE &&
  186. !ret && uld_type == CXGB4_ULD_RDMA) {
  187. struct sge *s = &adap->sge;
  188. unsigned int cmplqid;
  189. u32 param, cmdop;
  190. cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
  191. for_each_port(adap, i) {
  192. cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
  193. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  194. FW_PARAMS_PARAM_X_V(cmdop) |
  195. FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
  196. ret = t4_set_params(adap, adap->mbox, adap->pf,
  197. 0, 1, &param, &cmplqid);
  198. }
  199. }
  200. return ret;
  201. }
  202. static void t4_free_uld_rxqs(struct adapter *adap, int n,
  203. struct sge_ofld_rxq *q)
  204. {
  205. for ( ; n; n--, q++) {
  206. if (q->rspq.desc)
  207. free_rspq_fl(adap, &q->rspq,
  208. q->fl.size ? &q->fl : NULL);
  209. }
  210. }
  211. static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
  212. {
  213. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  214. if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
  215. struct sge *s = &adap->sge;
  216. u32 param, cmdop, cmplqid = 0;
  217. int i;
  218. cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
  219. for_each_port(adap, i) {
  220. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  221. FW_PARAMS_PARAM_X_V(cmdop) |
  222. FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
  223. t4_set_params(adap, adap->mbox, adap->pf,
  224. 0, 1, &param, &cmplqid);
  225. }
  226. }
  227. if (rxq_info->nciq)
  228. t4_free_uld_rxqs(adap, rxq_info->nciq,
  229. rxq_info->uldrxq + rxq_info->nrxq);
  230. t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
  231. if (adap->flags & USING_MSIX)
  232. kfree(rxq_info->msix_tbl);
  233. }
  234. static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
  235. const struct cxgb4_uld_info *uld_info)
  236. {
  237. struct sge *s = &adap->sge;
  238. struct sge_uld_rxq_info *rxq_info;
  239. int i, nrxq, ciq_size;
  240. rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
  241. if (!rxq_info)
  242. return -ENOMEM;
  243. if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
  244. i = s->nqs_per_uld;
  245. rxq_info->nrxq = roundup(i, adap->params.nports);
  246. } else {
  247. i = min_t(int, uld_info->nrxq,
  248. num_online_cpus());
  249. rxq_info->nrxq = roundup(i, adap->params.nports);
  250. }
  251. if (!uld_info->ciq) {
  252. rxq_info->nciq = 0;
  253. } else {
  254. if (adap->flags & USING_MSIX)
  255. rxq_info->nciq = min_t(int, s->nqs_per_uld,
  256. num_online_cpus());
  257. else
  258. rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
  259. num_online_cpus());
  260. rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
  261. adap->params.nports);
  262. rxq_info->nciq = max_t(int, rxq_info->nciq,
  263. adap->params.nports);
  264. }
  265. nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
  266. rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
  267. GFP_KERNEL);
  268. if (!rxq_info->uldrxq) {
  269. kfree(rxq_info);
  270. return -ENOMEM;
  271. }
  272. rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
  273. if (!rxq_info->rspq_id) {
  274. kfree(rxq_info->uldrxq);
  275. kfree(rxq_info);
  276. return -ENOMEM;
  277. }
  278. for (i = 0; i < rxq_info->nrxq; i++) {
  279. struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
  280. init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
  281. r->rspq.uld = uld_type;
  282. r->fl.size = 72;
  283. }
  284. ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
  285. if (ciq_size > SGE_MAX_IQ_SIZE) {
  286. dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
  287. ciq_size = SGE_MAX_IQ_SIZE;
  288. }
  289. for (i = rxq_info->nrxq; i < nrxq; i++) {
  290. struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
  291. init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
  292. r->rspq.uld = uld_type;
  293. }
  294. memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
  295. adap->sge.uld_rxq_info[uld_type] = rxq_info;
  296. return 0;
  297. }
  298. static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
  299. {
  300. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  301. adap->sge.uld_rxq_info[uld_type] = NULL;
  302. kfree(rxq_info->rspq_id);
  303. kfree(rxq_info->uldrxq);
  304. kfree(rxq_info);
  305. }
  306. static int
  307. request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
  308. {
  309. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  310. int err = 0;
  311. unsigned int idx, bmap_idx;
  312. for_each_uldrxq(rxq_info, idx) {
  313. bmap_idx = rxq_info->msix_tbl[idx];
  314. err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
  315. t4_sge_intr_msix, 0,
  316. adap->msix_info_ulds[bmap_idx].desc,
  317. &rxq_info->uldrxq[idx].rspq);
  318. if (err)
  319. goto unwind;
  320. }
  321. return 0;
  322. unwind:
  323. while (idx-- > 0) {
  324. bmap_idx = rxq_info->msix_tbl[idx];
  325. free_msix_idx_in_bmap(adap, bmap_idx);
  326. free_irq(adap->msix_info_ulds[bmap_idx].vec,
  327. &rxq_info->uldrxq[idx].rspq);
  328. }
  329. return err;
  330. }
  331. static void
  332. free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
  333. {
  334. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  335. unsigned int idx, bmap_idx;
  336. for_each_uldrxq(rxq_info, idx) {
  337. bmap_idx = rxq_info->msix_tbl[idx];
  338. free_msix_idx_in_bmap(adap, bmap_idx);
  339. free_irq(adap->msix_info_ulds[bmap_idx].vec,
  340. &rxq_info->uldrxq[idx].rspq);
  341. }
  342. }
  343. static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
  344. {
  345. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  346. int n = sizeof(adap->msix_info_ulds[0].desc);
  347. unsigned int idx, bmap_idx;
  348. for_each_uldrxq(rxq_info, idx) {
  349. bmap_idx = rxq_info->msix_tbl[idx];
  350. snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
  351. adap->port[0]->name, rxq_info->name, idx);
  352. }
  353. }
  354. static void enable_rx(struct adapter *adap, struct sge_rspq *q)
  355. {
  356. if (!q)
  357. return;
  358. if (q->handler)
  359. napi_enable(&q->napi);
  360. /* 0-increment GTS to start the timer and enable interrupts */
  361. t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
  362. SEINTARM_V(q->intr_params) |
  363. INGRESSQID_V(q->cntxt_id));
  364. }
  365. static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
  366. {
  367. if (q && q->handler)
  368. napi_disable(&q->napi);
  369. }
  370. static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
  371. {
  372. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  373. int idx;
  374. for_each_uldrxq(rxq_info, idx)
  375. enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
  376. }
  377. static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
  378. {
  379. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  380. int idx;
  381. for_each_uldrxq(rxq_info, idx)
  382. quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
  383. }
  384. static void
  385. free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
  386. {
  387. int nq = txq_info->ntxq;
  388. int i;
  389. for (i = 0; i < nq; i++) {
  390. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  391. if (txq && txq->q.desc) {
  392. tasklet_kill(&txq->qresume_tsk);
  393. t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
  394. txq->q.cntxt_id);
  395. free_tx_desc(adap, &txq->q, txq->q.in_use, false);
  396. kfree(txq->q.sdesc);
  397. __skb_queue_purge(&txq->sendq);
  398. free_txq(adap, &txq->q);
  399. }
  400. }
  401. }
  402. static int
  403. alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
  404. unsigned int uld_type)
  405. {
  406. struct sge *s = &adap->sge;
  407. int nq = txq_info->ntxq;
  408. int i, j, err;
  409. j = nq / adap->params.nports;
  410. for (i = 0; i < nq; i++) {
  411. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  412. txq->q.size = 1024;
  413. err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
  414. s->fw_evtq.cntxt_id, uld_type);
  415. if (err)
  416. goto freeout;
  417. }
  418. return 0;
  419. freeout:
  420. free_sge_txq_uld(adap, txq_info);
  421. return err;
  422. }
  423. static void
  424. release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
  425. {
  426. struct sge_uld_txq_info *txq_info = NULL;
  427. int tx_uld_type = TX_ULD(uld_type);
  428. txq_info = adap->sge.uld_txq_info[tx_uld_type];
  429. if (txq_info && atomic_dec_and_test(&txq_info->users)) {
  430. free_sge_txq_uld(adap, txq_info);
  431. kfree(txq_info->uldtxq);
  432. kfree(txq_info);
  433. adap->sge.uld_txq_info[tx_uld_type] = NULL;
  434. }
  435. }
  436. static int
  437. setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
  438. const struct cxgb4_uld_info *uld_info)
  439. {
  440. struct sge_uld_txq_info *txq_info = NULL;
  441. int tx_uld_type, i;
  442. tx_uld_type = TX_ULD(uld_type);
  443. txq_info = adap->sge.uld_txq_info[tx_uld_type];
  444. if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
  445. (atomic_inc_return(&txq_info->users) > 1))
  446. return 0;
  447. txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
  448. if (!txq_info)
  449. return -ENOMEM;
  450. i = min_t(int, uld_info->ntxq, num_online_cpus());
  451. txq_info->ntxq = roundup(i, adap->params.nports);
  452. txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
  453. GFP_KERNEL);
  454. if (!txq_info->uldtxq) {
  455. kfree(txq_info);
  456. return -ENOMEM;
  457. }
  458. if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
  459. kfree(txq_info->uldtxq);
  460. kfree(txq_info);
  461. return -ENOMEM;
  462. }
  463. atomic_inc(&txq_info->users);
  464. adap->sge.uld_txq_info[tx_uld_type] = txq_info;
  465. return 0;
  466. }
  467. static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
  468. struct cxgb4_lld_info *lli)
  469. {
  470. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  471. lli->rxq_ids = rxq_info->rspq_id;
  472. lli->nrxq = rxq_info->nrxq;
  473. lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
  474. lli->nciq = rxq_info->nciq;
  475. }
  476. int t4_uld_mem_alloc(struct adapter *adap)
  477. {
  478. struct sge *s = &adap->sge;
  479. adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
  480. if (!adap->uld)
  481. return -ENOMEM;
  482. s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX,
  483. sizeof(struct sge_uld_rxq_info *),
  484. GFP_KERNEL);
  485. if (!s->uld_rxq_info)
  486. goto err_uld;
  487. s->uld_txq_info = kcalloc(CXGB4_TX_MAX,
  488. sizeof(struct sge_uld_txq_info *),
  489. GFP_KERNEL);
  490. if (!s->uld_txq_info)
  491. goto err_uld_rx;
  492. return 0;
  493. err_uld_rx:
  494. kfree(s->uld_rxq_info);
  495. err_uld:
  496. kfree(adap->uld);
  497. return -ENOMEM;
  498. }
  499. void t4_uld_mem_free(struct adapter *adap)
  500. {
  501. struct sge *s = &adap->sge;
  502. kfree(s->uld_txq_info);
  503. kfree(s->uld_rxq_info);
  504. kfree(adap->uld);
  505. }
  506. /* This function should be called with uld_mutex taken. */
  507. static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
  508. {
  509. if (adap->uld[type].handle) {
  510. adap->uld[type].handle = NULL;
  511. adap->uld[type].add = NULL;
  512. release_sge_txq_uld(adap, type);
  513. if (adap->flags & FULL_INIT_DONE)
  514. quiesce_rx_uld(adap, type);
  515. if (adap->flags & USING_MSIX)
  516. free_msix_queue_irqs_uld(adap, type);
  517. free_sge_queues_uld(adap, type);
  518. free_queues_uld(adap, type);
  519. }
  520. }
  521. void t4_uld_clean_up(struct adapter *adap)
  522. {
  523. unsigned int i;
  524. mutex_lock(&uld_mutex);
  525. for (i = 0; i < CXGB4_ULD_MAX; i++) {
  526. if (!adap->uld[i].handle)
  527. continue;
  528. cxgb4_shutdown_uld_adapter(adap, i);
  529. }
  530. mutex_unlock(&uld_mutex);
  531. }
  532. static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
  533. {
  534. int i;
  535. lld->pdev = adap->pdev;
  536. lld->pf = adap->pf;
  537. lld->l2t = adap->l2t;
  538. lld->tids = &adap->tids;
  539. lld->ports = adap->port;
  540. lld->vr = &adap->vres;
  541. lld->mtus = adap->params.mtus;
  542. lld->ntxq = adap->sge.ofldqsets;
  543. lld->nchan = adap->params.nports;
  544. lld->nports = adap->params.nports;
  545. lld->wr_cred = adap->params.ofldq_wr_cred;
  546. lld->crypto = adap->params.crypto;
  547. lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
  548. lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
  549. lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
  550. lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
  551. lld->iscsi_ppm = &adap->iscsi_ppm;
  552. lld->adapter_type = adap->params.chip;
  553. lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
  554. lld->udb_density = 1 << adap->params.sge.eq_qpp;
  555. lld->ucq_density = 1 << adap->params.sge.iq_qpp;
  556. lld->filt_mode = adap->params.tp.vlan_pri_map;
  557. /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
  558. for (i = 0; i < NCHAN; i++)
  559. lld->tx_modq[i] = i;
  560. lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
  561. lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
  562. lld->fw_vers = adap->params.fw_vers;
  563. lld->dbfifo_int_thresh = dbfifo_int_thresh;
  564. lld->sge_ingpadboundary = adap->sge.fl_align;
  565. lld->sge_egrstatuspagesize = adap->sge.stat_len;
  566. lld->sge_pktshift = adap->sge.pktshift;
  567. lld->ulp_crypto = adap->params.crypto;
  568. lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
  569. lld->max_ordird_qp = adap->params.max_ordird_qp;
  570. lld->max_ird_adapter = adap->params.max_ird_adapter;
  571. lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
  572. lld->nodeid = dev_to_node(adap->pdev_dev);
  573. lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
  574. lld->write_w_imm_support = adap->params.write_w_imm_support;
  575. lld->write_cmpl_support = adap->params.write_cmpl_support;
  576. }
  577. static void uld_attach(struct adapter *adap, unsigned int uld)
  578. {
  579. void *handle;
  580. struct cxgb4_lld_info lli;
  581. uld_init(adap, &lli);
  582. uld_queue_init(adap, uld, &lli);
  583. handle = adap->uld[uld].add(&lli);
  584. if (IS_ERR(handle)) {
  585. dev_warn(adap->pdev_dev,
  586. "could not attach to the %s driver, error %ld\n",
  587. adap->uld[uld].name, PTR_ERR(handle));
  588. return;
  589. }
  590. adap->uld[uld].handle = handle;
  591. t4_register_netevent_notifier();
  592. if (adap->flags & FULL_INIT_DONE)
  593. adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
  594. }
  595. /**
  596. * cxgb4_register_uld - register an upper-layer driver
  597. * @type: the ULD type
  598. * @p: the ULD methods
  599. *
  600. * Registers an upper-layer driver with this driver and notifies the ULD
  601. * about any presently available devices that support its type. Returns
  602. * %-EBUSY if a ULD of the same type is already registered.
  603. */
  604. int cxgb4_register_uld(enum cxgb4_uld type,
  605. const struct cxgb4_uld_info *p)
  606. {
  607. int ret = 0;
  608. unsigned int adap_idx = 0;
  609. struct adapter *adap;
  610. if (type >= CXGB4_ULD_MAX)
  611. return -EINVAL;
  612. mutex_lock(&uld_mutex);
  613. list_for_each_entry(adap, &adapter_list, list_node) {
  614. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  615. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  616. continue;
  617. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  618. continue;
  619. ret = cfg_queues_uld(adap, type, p);
  620. if (ret)
  621. goto out;
  622. ret = setup_sge_queues_uld(adap, type, p->lro);
  623. if (ret)
  624. goto free_queues;
  625. if (adap->flags & USING_MSIX) {
  626. name_msix_vecs_uld(adap, type);
  627. ret = request_msix_queue_irqs_uld(adap, type);
  628. if (ret)
  629. goto free_rxq;
  630. }
  631. if (adap->flags & FULL_INIT_DONE)
  632. enable_rx_uld(adap, type);
  633. if (adap->uld[type].add) {
  634. ret = -EBUSY;
  635. goto free_irq;
  636. }
  637. ret = setup_sge_txq_uld(adap, type, p);
  638. if (ret)
  639. goto free_irq;
  640. adap->uld[type] = *p;
  641. uld_attach(adap, type);
  642. adap_idx++;
  643. }
  644. mutex_unlock(&uld_mutex);
  645. return 0;
  646. free_irq:
  647. if (adap->flags & FULL_INIT_DONE)
  648. quiesce_rx_uld(adap, type);
  649. if (adap->flags & USING_MSIX)
  650. free_msix_queue_irqs_uld(adap, type);
  651. free_rxq:
  652. free_sge_queues_uld(adap, type);
  653. free_queues:
  654. free_queues_uld(adap, type);
  655. out:
  656. list_for_each_entry(adap, &adapter_list, list_node) {
  657. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  658. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  659. continue;
  660. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  661. continue;
  662. if (!adap_idx)
  663. break;
  664. adap->uld[type].handle = NULL;
  665. adap->uld[type].add = NULL;
  666. release_sge_txq_uld(adap, type);
  667. if (adap->flags & FULL_INIT_DONE)
  668. quiesce_rx_uld(adap, type);
  669. if (adap->flags & USING_MSIX)
  670. free_msix_queue_irqs_uld(adap, type);
  671. free_sge_queues_uld(adap, type);
  672. free_queues_uld(adap, type);
  673. adap_idx--;
  674. }
  675. mutex_unlock(&uld_mutex);
  676. return ret;
  677. }
  678. EXPORT_SYMBOL(cxgb4_register_uld);
  679. /**
  680. * cxgb4_unregister_uld - unregister an upper-layer driver
  681. * @type: the ULD type
  682. *
  683. * Unregisters an existing upper-layer driver.
  684. */
  685. int cxgb4_unregister_uld(enum cxgb4_uld type)
  686. {
  687. struct adapter *adap;
  688. if (type >= CXGB4_ULD_MAX)
  689. return -EINVAL;
  690. mutex_lock(&uld_mutex);
  691. list_for_each_entry(adap, &adapter_list, list_node) {
  692. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  693. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  694. continue;
  695. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  696. continue;
  697. cxgb4_shutdown_uld_adapter(adap, type);
  698. }
  699. mutex_unlock(&uld_mutex);
  700. return 0;
  701. }
  702. EXPORT_SYMBOL(cxgb4_unregister_uld);