cxgb4_uld.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. /*
  2. * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
  3. *
  4. * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written by: Atul Gupta (atul.gupta@chelsio.com)
  35. * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
  36. */
  37. #include <linux/kernel.h>
  38. #include <linux/module.h>
  39. #include <linux/errno.h>
  40. #include <linux/types.h>
  41. #include <linux/debugfs.h>
  42. #include <linux/export.h>
  43. #include <linux/list.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/pci.h>
  46. #include "cxgb4.h"
  47. #include "cxgb4_uld.h"
  48. #include "t4_regs.h"
  49. #include "t4fw_api.h"
  50. #include "t4_msg.h"
  51. #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
  52. static int get_msix_idx_from_bmap(struct adapter *adap)
  53. {
  54. struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
  55. unsigned long flags;
  56. unsigned int msix_idx;
  57. spin_lock_irqsave(&bmap->lock, flags);
  58. msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
  59. if (msix_idx < bmap->mapsize) {
  60. __set_bit(msix_idx, bmap->msix_bmap);
  61. } else {
  62. spin_unlock_irqrestore(&bmap->lock, flags);
  63. return -ENOSPC;
  64. }
  65. spin_unlock_irqrestore(&bmap->lock, flags);
  66. return msix_idx;
  67. }
  68. static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
  69. {
  70. struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
  71. unsigned long flags;
  72. spin_lock_irqsave(&bmap->lock, flags);
  73. __clear_bit(msix_idx, bmap->msix_bmap);
  74. spin_unlock_irqrestore(&bmap->lock, flags);
  75. }
  76. /* Flush the aggregated lro sessions */
  77. static void uldrx_flush_handler(struct sge_rspq *q)
  78. {
  79. struct adapter *adap = q->adap;
  80. if (adap->uld[q->uld].lro_flush)
  81. adap->uld[q->uld].lro_flush(&q->lro_mgr);
  82. }
  83. /**
  84. * uldrx_handler - response queue handler for ULD queues
  85. * @q: the response queue that received the packet
  86. * @rsp: the response queue descriptor holding the offload message
  87. * @gl: the gather list of packet fragments
  88. *
  89. * Deliver an ingress offload packet to a ULD. All processing is done by
  90. * the ULD, we just maintain statistics.
  91. */
  92. static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
  93. const struct pkt_gl *gl)
  94. {
  95. struct adapter *adap = q->adap;
  96. struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
  97. int ret;
  98. /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
  99. if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
  100. ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
  101. rsp += 2;
  102. if (q->flush_handler)
  103. ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
  104. rsp, gl, &q->lro_mgr,
  105. &q->napi);
  106. else
  107. ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
  108. rsp, gl);
  109. if (ret) {
  110. rxq->stats.nomem++;
  111. return -1;
  112. }
  113. if (!gl)
  114. rxq->stats.imm++;
  115. else if (gl == CXGB4_MSG_AN)
  116. rxq->stats.an++;
  117. else
  118. rxq->stats.pkts++;
  119. return 0;
  120. }
  121. static int alloc_uld_rxqs(struct adapter *adap,
  122. struct sge_uld_rxq_info *rxq_info, bool lro)
  123. {
  124. struct sge *s = &adap->sge;
  125. unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
  126. struct sge_ofld_rxq *q = rxq_info->uldrxq;
  127. unsigned short *ids = rxq_info->rspq_id;
  128. unsigned int bmap_idx = 0;
  129. unsigned int per_chan;
  130. int i, err, msi_idx, que_idx = 0;
  131. per_chan = rxq_info->nrxq / adap->params.nports;
  132. if (adap->flags & USING_MSIX)
  133. msi_idx = 1;
  134. else
  135. msi_idx = -((int)s->intrq.abs_id + 1);
  136. for (i = 0; i < nq; i++, q++) {
  137. if (i == rxq_info->nrxq) {
  138. /* start allocation of concentrator queues */
  139. per_chan = rxq_info->nciq / adap->params.nports;
  140. que_idx = 0;
  141. }
  142. if (msi_idx >= 0) {
  143. bmap_idx = get_msix_idx_from_bmap(adap);
  144. msi_idx = adap->msix_info_ulds[bmap_idx].idx;
  145. }
  146. err = t4_sge_alloc_rxq(adap, &q->rspq, false,
  147. adap->port[que_idx++ / per_chan],
  148. msi_idx,
  149. q->fl.size ? &q->fl : NULL,
  150. uldrx_handler,
  151. lro ? uldrx_flush_handler : NULL,
  152. 0);
  153. if (err)
  154. goto freeout;
  155. if (msi_idx >= 0)
  156. rxq_info->msix_tbl[i] = bmap_idx;
  157. memset(&q->stats, 0, sizeof(q->stats));
  158. if (ids)
  159. ids[i] = q->rspq.abs_id;
  160. }
  161. return 0;
  162. freeout:
  163. q = rxq_info->uldrxq;
  164. for ( ; i; i--, q++) {
  165. if (q->rspq.desc)
  166. free_rspq_fl(adap, &q->rspq,
  167. q->fl.size ? &q->fl : NULL);
  168. }
  169. return err;
  170. }
  171. static int
  172. setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
  173. {
  174. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  175. int i, ret = 0;
  176. if (adap->flags & USING_MSIX) {
  177. rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
  178. sizeof(unsigned short),
  179. GFP_KERNEL);
  180. if (!rxq_info->msix_tbl)
  181. return -ENOMEM;
  182. }
  183. ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
  184. /* Tell uP to route control queue completions to rdma rspq */
  185. if (adap->flags & FULL_INIT_DONE &&
  186. !ret && uld_type == CXGB4_ULD_RDMA) {
  187. struct sge *s = &adap->sge;
  188. unsigned int cmplqid;
  189. u32 param, cmdop;
  190. cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
  191. for_each_port(adap, i) {
  192. cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
  193. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  194. FW_PARAMS_PARAM_X_V(cmdop) |
  195. FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
  196. ret = t4_set_params(adap, adap->mbox, adap->pf,
  197. 0, 1, &param, &cmplqid);
  198. }
  199. }
  200. return ret;
  201. }
  202. static void t4_free_uld_rxqs(struct adapter *adap, int n,
  203. struct sge_ofld_rxq *q)
  204. {
  205. for ( ; n; n--, q++) {
  206. if (q->rspq.desc)
  207. free_rspq_fl(adap, &q->rspq,
  208. q->fl.size ? &q->fl : NULL);
  209. }
  210. }
  211. static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
  212. {
  213. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  214. if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
  215. struct sge *s = &adap->sge;
  216. u32 param, cmdop, cmplqid = 0;
  217. int i;
  218. cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
  219. for_each_port(adap, i) {
  220. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  221. FW_PARAMS_PARAM_X_V(cmdop) |
  222. FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
  223. t4_set_params(adap, adap->mbox, adap->pf,
  224. 0, 1, &param, &cmplqid);
  225. }
  226. }
  227. if (rxq_info->nciq)
  228. t4_free_uld_rxqs(adap, rxq_info->nciq,
  229. rxq_info->uldrxq + rxq_info->nrxq);
  230. t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
  231. if (adap->flags & USING_MSIX)
  232. kfree(rxq_info->msix_tbl);
  233. }
  234. static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
  235. const struct cxgb4_uld_info *uld_info)
  236. {
  237. struct sge *s = &adap->sge;
  238. struct sge_uld_rxq_info *rxq_info;
  239. int i, nrxq, ciq_size;
  240. rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
  241. if (!rxq_info)
  242. return -ENOMEM;
  243. if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
  244. i = s->nqs_per_uld;
  245. rxq_info->nrxq = roundup(i, adap->params.nports);
  246. } else {
  247. i = min_t(int, uld_info->nrxq,
  248. num_online_cpus());
  249. rxq_info->nrxq = roundup(i, adap->params.nports);
  250. }
  251. if (!uld_info->ciq) {
  252. rxq_info->nciq = 0;
  253. } else {
  254. if (adap->flags & USING_MSIX)
  255. rxq_info->nciq = min_t(int, s->nqs_per_uld,
  256. num_online_cpus());
  257. else
  258. rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
  259. num_online_cpus());
  260. rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
  261. adap->params.nports);
  262. rxq_info->nciq = max_t(int, rxq_info->nciq,
  263. adap->params.nports);
  264. }
  265. nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
  266. rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
  267. GFP_KERNEL);
  268. if (!rxq_info->uldrxq) {
  269. kfree(rxq_info);
  270. return -ENOMEM;
  271. }
  272. rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
  273. if (!rxq_info->rspq_id) {
  274. kfree(rxq_info->uldrxq);
  275. kfree(rxq_info);
  276. return -ENOMEM;
  277. }
  278. for (i = 0; i < rxq_info->nrxq; i++) {
  279. struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
  280. init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
  281. r->rspq.uld = uld_type;
  282. r->fl.size = 72;
  283. }
  284. ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
  285. if (ciq_size > SGE_MAX_IQ_SIZE) {
  286. dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
  287. ciq_size = SGE_MAX_IQ_SIZE;
  288. }
  289. for (i = rxq_info->nrxq; i < nrxq; i++) {
  290. struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
  291. init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
  292. r->rspq.uld = uld_type;
  293. }
  294. memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
  295. adap->sge.uld_rxq_info[uld_type] = rxq_info;
  296. return 0;
  297. }
  298. static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
  299. {
  300. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  301. kfree(rxq_info->rspq_id);
  302. kfree(rxq_info->uldrxq);
  303. kfree(rxq_info);
  304. }
  305. static int
  306. request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
  307. {
  308. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  309. int err = 0;
  310. unsigned int idx, bmap_idx;
  311. for_each_uldrxq(rxq_info, idx) {
  312. bmap_idx = rxq_info->msix_tbl[idx];
  313. err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
  314. t4_sge_intr_msix, 0,
  315. adap->msix_info_ulds[bmap_idx].desc,
  316. &rxq_info->uldrxq[idx].rspq);
  317. if (err)
  318. goto unwind;
  319. }
  320. return 0;
  321. unwind:
  322. while (idx-- > 0) {
  323. bmap_idx = rxq_info->msix_tbl[idx];
  324. free_msix_idx_in_bmap(adap, bmap_idx);
  325. free_irq(adap->msix_info_ulds[bmap_idx].vec,
  326. &rxq_info->uldrxq[idx].rspq);
  327. }
  328. return err;
  329. }
  330. static void
  331. free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
  332. {
  333. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  334. unsigned int idx, bmap_idx;
  335. for_each_uldrxq(rxq_info, idx) {
  336. bmap_idx = rxq_info->msix_tbl[idx];
  337. free_msix_idx_in_bmap(adap, bmap_idx);
  338. free_irq(adap->msix_info_ulds[bmap_idx].vec,
  339. &rxq_info->uldrxq[idx].rspq);
  340. }
  341. }
  342. static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
  343. {
  344. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  345. int n = sizeof(adap->msix_info_ulds[0].desc);
  346. unsigned int idx, bmap_idx;
  347. for_each_uldrxq(rxq_info, idx) {
  348. bmap_idx = rxq_info->msix_tbl[idx];
  349. snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
  350. adap->port[0]->name, rxq_info->name, idx);
  351. }
  352. }
  353. static void enable_rx(struct adapter *adap, struct sge_rspq *q)
  354. {
  355. if (!q)
  356. return;
  357. if (q->handler)
  358. napi_enable(&q->napi);
  359. /* 0-increment GTS to start the timer and enable interrupts */
  360. t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
  361. SEINTARM_V(q->intr_params) |
  362. INGRESSQID_V(q->cntxt_id));
  363. }
  364. static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
  365. {
  366. if (q && q->handler)
  367. napi_disable(&q->napi);
  368. }
  369. static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
  370. {
  371. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  372. int idx;
  373. for_each_uldrxq(rxq_info, idx)
  374. enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
  375. }
  376. static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
  377. {
  378. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  379. int idx;
  380. for_each_uldrxq(rxq_info, idx)
  381. quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
  382. }
  383. static void
  384. free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
  385. {
  386. int nq = txq_info->ntxq;
  387. int i;
  388. for (i = 0; i < nq; i++) {
  389. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  390. if (txq && txq->q.desc) {
  391. tasklet_kill(&txq->qresume_tsk);
  392. t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
  393. txq->q.cntxt_id);
  394. free_tx_desc(adap, &txq->q, txq->q.in_use, false);
  395. kfree(txq->q.sdesc);
  396. __skb_queue_purge(&txq->sendq);
  397. free_txq(adap, &txq->q);
  398. }
  399. }
  400. }
  401. static int
  402. alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
  403. unsigned int uld_type)
  404. {
  405. struct sge *s = &adap->sge;
  406. int nq = txq_info->ntxq;
  407. int i, j, err;
  408. j = nq / adap->params.nports;
  409. for (i = 0; i < nq; i++) {
  410. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  411. txq->q.size = 1024;
  412. err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
  413. s->fw_evtq.cntxt_id, uld_type);
  414. if (err)
  415. goto freeout;
  416. }
  417. return 0;
  418. freeout:
  419. free_sge_txq_uld(adap, txq_info);
  420. return err;
  421. }
  422. static void
  423. release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
  424. {
  425. struct sge_uld_txq_info *txq_info = NULL;
  426. int tx_uld_type = TX_ULD(uld_type);
  427. txq_info = adap->sge.uld_txq_info[tx_uld_type];
  428. if (txq_info && atomic_dec_and_test(&txq_info->users)) {
  429. free_sge_txq_uld(adap, txq_info);
  430. kfree(txq_info->uldtxq);
  431. kfree(txq_info);
  432. adap->sge.uld_txq_info[tx_uld_type] = NULL;
  433. }
  434. }
  435. static int
  436. setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
  437. const struct cxgb4_uld_info *uld_info)
  438. {
  439. struct sge_uld_txq_info *txq_info = NULL;
  440. int tx_uld_type, i;
  441. tx_uld_type = TX_ULD(uld_type);
  442. txq_info = adap->sge.uld_txq_info[tx_uld_type];
  443. if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
  444. (atomic_inc_return(&txq_info->users) > 1))
  445. return 0;
  446. txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
  447. if (!txq_info)
  448. return -ENOMEM;
  449. i = min_t(int, uld_info->ntxq, num_online_cpus());
  450. txq_info->ntxq = roundup(i, adap->params.nports);
  451. txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
  452. GFP_KERNEL);
  453. if (!txq_info->uldtxq) {
  454. kfree(txq_info);
  455. return -ENOMEM;
  456. }
  457. if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
  458. kfree(txq_info->uldtxq);
  459. kfree(txq_info);
  460. return -ENOMEM;
  461. }
  462. atomic_inc(&txq_info->users);
  463. adap->sge.uld_txq_info[tx_uld_type] = txq_info;
  464. return 0;
  465. }
  466. static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
  467. struct cxgb4_lld_info *lli)
  468. {
  469. struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
  470. lli->rxq_ids = rxq_info->rspq_id;
  471. lli->nrxq = rxq_info->nrxq;
  472. lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
  473. lli->nciq = rxq_info->nciq;
  474. }
  475. int t4_uld_mem_alloc(struct adapter *adap)
  476. {
  477. struct sge *s = &adap->sge;
  478. adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
  479. if (!adap->uld)
  480. return -ENOMEM;
  481. s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
  482. sizeof(struct sge_uld_rxq_info *),
  483. GFP_KERNEL);
  484. if (!s->uld_rxq_info)
  485. goto err_uld;
  486. s->uld_txq_info = kzalloc(CXGB4_TX_MAX *
  487. sizeof(struct sge_uld_txq_info *),
  488. GFP_KERNEL);
  489. if (!s->uld_txq_info)
  490. goto err_uld_rx;
  491. return 0;
  492. err_uld_rx:
  493. kfree(s->uld_rxq_info);
  494. err_uld:
  495. kfree(adap->uld);
  496. return -ENOMEM;
  497. }
  498. void t4_uld_mem_free(struct adapter *adap)
  499. {
  500. struct sge *s = &adap->sge;
  501. kfree(s->uld_txq_info);
  502. kfree(s->uld_rxq_info);
  503. kfree(adap->uld);
  504. }
  505. void t4_uld_clean_up(struct adapter *adap)
  506. {
  507. unsigned int i;
  508. if (!adap->uld)
  509. return;
  510. for (i = 0; i < CXGB4_ULD_MAX; i++) {
  511. if (!adap->uld[i].handle)
  512. continue;
  513. if (adap->flags & FULL_INIT_DONE)
  514. quiesce_rx_uld(adap, i);
  515. if (adap->flags & USING_MSIX)
  516. free_msix_queue_irqs_uld(adap, i);
  517. free_sge_queues_uld(adap, i);
  518. free_queues_uld(adap, i);
  519. }
  520. }
  521. static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
  522. {
  523. int i;
  524. lld->pdev = adap->pdev;
  525. lld->pf = adap->pf;
  526. lld->l2t = adap->l2t;
  527. lld->tids = &adap->tids;
  528. lld->ports = adap->port;
  529. lld->vr = &adap->vres;
  530. lld->mtus = adap->params.mtus;
  531. lld->ntxq = adap->sge.ofldqsets;
  532. lld->nchan = adap->params.nports;
  533. lld->nports = adap->params.nports;
  534. lld->wr_cred = adap->params.ofldq_wr_cred;
  535. lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
  536. lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
  537. lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
  538. lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
  539. lld->iscsi_ppm = &adap->iscsi_ppm;
  540. lld->adapter_type = adap->params.chip;
  541. lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
  542. lld->udb_density = 1 << adap->params.sge.eq_qpp;
  543. lld->ucq_density = 1 << adap->params.sge.iq_qpp;
  544. lld->filt_mode = adap->params.tp.vlan_pri_map;
  545. /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
  546. for (i = 0; i < NCHAN; i++)
  547. lld->tx_modq[i] = i;
  548. lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
  549. lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
  550. lld->fw_vers = adap->params.fw_vers;
  551. lld->dbfifo_int_thresh = dbfifo_int_thresh;
  552. lld->sge_ingpadboundary = adap->sge.fl_align;
  553. lld->sge_egrstatuspagesize = adap->sge.stat_len;
  554. lld->sge_pktshift = adap->sge.pktshift;
  555. lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
  556. lld->max_ordird_qp = adap->params.max_ordird_qp;
  557. lld->max_ird_adapter = adap->params.max_ird_adapter;
  558. lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
  559. lld->nodeid = dev_to_node(adap->pdev_dev);
  560. lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
  561. }
  562. static void uld_attach(struct adapter *adap, unsigned int uld)
  563. {
  564. void *handle;
  565. struct cxgb4_lld_info lli;
  566. uld_init(adap, &lli);
  567. uld_queue_init(adap, uld, &lli);
  568. handle = adap->uld[uld].add(&lli);
  569. if (IS_ERR(handle)) {
  570. dev_warn(adap->pdev_dev,
  571. "could not attach to the %s driver, error %ld\n",
  572. adap->uld[uld].name, PTR_ERR(handle));
  573. return;
  574. }
  575. adap->uld[uld].handle = handle;
  576. t4_register_netevent_notifier();
  577. if (adap->flags & FULL_INIT_DONE)
  578. adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
  579. }
  580. /**
  581. * cxgb4_register_uld - register an upper-layer driver
  582. * @type: the ULD type
  583. * @p: the ULD methods
  584. *
  585. * Registers an upper-layer driver with this driver and notifies the ULD
  586. * about any presently available devices that support its type. Returns
  587. * %-EBUSY if a ULD of the same type is already registered.
  588. */
  589. int cxgb4_register_uld(enum cxgb4_uld type,
  590. const struct cxgb4_uld_info *p)
  591. {
  592. int ret = 0;
  593. unsigned int adap_idx = 0;
  594. struct adapter *adap;
  595. if (type >= CXGB4_ULD_MAX)
  596. return -EINVAL;
  597. mutex_lock(&uld_mutex);
  598. list_for_each_entry(adap, &adapter_list, list_node) {
  599. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  600. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  601. continue;
  602. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  603. continue;
  604. ret = cfg_queues_uld(adap, type, p);
  605. if (ret)
  606. goto out;
  607. ret = setup_sge_queues_uld(adap, type, p->lro);
  608. if (ret)
  609. goto free_queues;
  610. if (adap->flags & USING_MSIX) {
  611. name_msix_vecs_uld(adap, type);
  612. ret = request_msix_queue_irqs_uld(adap, type);
  613. if (ret)
  614. goto free_rxq;
  615. }
  616. if (adap->flags & FULL_INIT_DONE)
  617. enable_rx_uld(adap, type);
  618. if (adap->uld[type].add) {
  619. ret = -EBUSY;
  620. goto free_irq;
  621. }
  622. ret = setup_sge_txq_uld(adap, type, p);
  623. if (ret)
  624. goto free_irq;
  625. adap->uld[type] = *p;
  626. uld_attach(adap, type);
  627. adap_idx++;
  628. }
  629. mutex_unlock(&uld_mutex);
  630. return 0;
  631. free_irq:
  632. if (adap->flags & FULL_INIT_DONE)
  633. quiesce_rx_uld(adap, type);
  634. if (adap->flags & USING_MSIX)
  635. free_msix_queue_irqs_uld(adap, type);
  636. free_rxq:
  637. free_sge_queues_uld(adap, type);
  638. free_queues:
  639. free_queues_uld(adap, type);
  640. out:
  641. list_for_each_entry(adap, &adapter_list, list_node) {
  642. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  643. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  644. continue;
  645. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  646. continue;
  647. if (!adap_idx)
  648. break;
  649. adap->uld[type].handle = NULL;
  650. adap->uld[type].add = NULL;
  651. release_sge_txq_uld(adap, type);
  652. if (adap->flags & FULL_INIT_DONE)
  653. quiesce_rx_uld(adap, type);
  654. if (adap->flags & USING_MSIX)
  655. free_msix_queue_irqs_uld(adap, type);
  656. free_sge_queues_uld(adap, type);
  657. free_queues_uld(adap, type);
  658. adap_idx--;
  659. }
  660. mutex_unlock(&uld_mutex);
  661. return ret;
  662. }
  663. EXPORT_SYMBOL(cxgb4_register_uld);
  664. /**
  665. * cxgb4_unregister_uld - unregister an upper-layer driver
  666. * @type: the ULD type
  667. *
  668. * Unregisters an existing upper-layer driver.
  669. */
  670. int cxgb4_unregister_uld(enum cxgb4_uld type)
  671. {
  672. struct adapter *adap;
  673. if (type >= CXGB4_ULD_MAX)
  674. return -EINVAL;
  675. mutex_lock(&uld_mutex);
  676. list_for_each_entry(adap, &adapter_list, list_node) {
  677. if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
  678. (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
  679. continue;
  680. if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
  681. continue;
  682. adap->uld[type].handle = NULL;
  683. adap->uld[type].add = NULL;
  684. release_sge_txq_uld(adap, type);
  685. if (adap->flags & FULL_INIT_DONE)
  686. quiesce_rx_uld(adap, type);
  687. if (adap->flags & USING_MSIX)
  688. free_msix_queue_irqs_uld(adap, type);
  689. free_sge_queues_uld(adap, type);
  690. free_queues_uld(adap, type);
  691. }
  692. mutex_unlock(&uld_mutex);
  693. return 0;
  694. }
  695. EXPORT_SYMBOL(cxgb4_unregister_uld);