nes_mgt.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156
  1. /*
  2. * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/skbuff.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/kthread.h>
  36. #include <linux/ip.h>
  37. #include <linux/tcp.h>
  38. #include <net/tcp.h>
  39. #include "nes.h"
  40. #include "nes_mgt.h"
  41. atomic_t pau_qps_created;
  42. atomic_t pau_qps_destroyed;
  43. static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
  44. {
  45. unsigned long flags;
  46. dma_addr_t bus_address;
  47. struct sk_buff *skb;
  48. struct nes_hw_nic_rq_wqe *nic_rqe;
  49. struct nes_hw_mgt *nesmgt;
  50. struct nes_device *nesdev;
  51. struct nes_rskb_cb *cb;
  52. u32 rx_wqes_posted = 0;
  53. nesmgt = &mgtvnic->mgt;
  54. nesdev = mgtvnic->nesvnic->nesdev;
  55. spin_lock_irqsave(&nesmgt->rq_lock, flags);
  56. if (nesmgt->replenishing_rq != 0) {
  57. if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
  58. (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
  59. atomic_set(&mgtvnic->rx_skb_timer_running, 1);
  60. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  61. mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
  62. add_timer(&mgtvnic->rq_wqes_timer);
  63. } else {
  64. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  65. }
  66. return;
  67. }
  68. nesmgt->replenishing_rq = 1;
  69. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  70. do {
  71. skb = dev_alloc_skb(mgtvnic->nesvnic->max_frame_size);
  72. if (skb) {
  73. skb->dev = mgtvnic->nesvnic->netdev;
  74. bus_address = pci_map_single(nesdev->pcidev,
  75. skb->data, mgtvnic->nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
  76. cb = (struct nes_rskb_cb *)&skb->cb[0];
  77. cb->busaddr = bus_address;
  78. cb->maplen = mgtvnic->nesvnic->max_frame_size;
  79. nic_rqe = &nesmgt->rq_vbase[mgtvnic->mgt.rq_head];
  80. nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
  81. cpu_to_le32(mgtvnic->nesvnic->max_frame_size);
  82. nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
  83. nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] =
  84. cpu_to_le32((u32)bus_address);
  85. nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] =
  86. cpu_to_le32((u32)((u64)bus_address >> 32));
  87. nesmgt->rx_skb[nesmgt->rq_head] = skb;
  88. nesmgt->rq_head++;
  89. nesmgt->rq_head &= nesmgt->rq_size - 1;
  90. atomic_dec(&mgtvnic->rx_skbs_needed);
  91. barrier();
  92. if (++rx_wqes_posted == 255) {
  93. nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
  94. rx_wqes_posted = 0;
  95. }
  96. } else {
  97. spin_lock_irqsave(&nesmgt->rq_lock, flags);
  98. if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
  99. (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
  100. atomic_set(&mgtvnic->rx_skb_timer_running, 1);
  101. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  102. mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
  103. add_timer(&mgtvnic->rq_wqes_timer);
  104. } else {
  105. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  106. }
  107. break;
  108. }
  109. } while (atomic_read(&mgtvnic->rx_skbs_needed));
  110. barrier();
  111. if (rx_wqes_posted)
  112. nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
  113. nesmgt->replenishing_rq = 0;
  114. }
  115. /**
  116. * nes_mgt_rq_wqes_timeout
  117. */
  118. static void nes_mgt_rq_wqes_timeout(unsigned long parm)
  119. {
  120. struct nes_vnic_mgt *mgtvnic = (struct nes_vnic_mgt *)parm;
  121. atomic_set(&mgtvnic->rx_skb_timer_running, 0);
  122. if (atomic_read(&mgtvnic->rx_skbs_needed))
  123. nes_replenish_mgt_rq(mgtvnic);
  124. }
  125. /**
  126. * nes_mgt_free_skb - unmap and free skb
  127. */
  128. static void nes_mgt_free_skb(struct nes_device *nesdev, struct sk_buff *skb, u32 dir)
  129. {
  130. struct nes_rskb_cb *cb;
  131. cb = (struct nes_rskb_cb *)&skb->cb[0];
  132. pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, dir);
  133. cb->busaddr = 0;
  134. dev_kfree_skb_any(skb);
  135. }
  136. /**
  137. * nes_download_callback - handle download completions
  138. */
  139. static void nes_download_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
  140. {
  141. struct pau_fpdu_info *fpdu_info = cqp_request->cqp_callback_pointer;
  142. struct nes_qp *nesqp = fpdu_info->nesqp;
  143. struct sk_buff *skb;
  144. int i;
  145. for (i = 0; i < fpdu_info->frag_cnt; i++) {
  146. skb = fpdu_info->frags[i].skb;
  147. if (fpdu_info->frags[i].cmplt) {
  148. nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
  149. nes_rem_ref_cm_node(nesqp->cm_node);
  150. }
  151. }
  152. if (fpdu_info->hdr_vbase)
  153. pci_free_consistent(nesdev->pcidev, fpdu_info->hdr_len,
  154. fpdu_info->hdr_vbase, fpdu_info->hdr_pbase);
  155. kfree(fpdu_info);
  156. }
  157. /**
  158. * nes_get_seq - Get the seq, ack_seq and window from the packet
  159. */
  160. static u32 nes_get_seq(struct sk_buff *skb, u32 *ack, u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
  161. {
  162. struct nes_rskb_cb *cb = (struct nes_rskb_cb *)&skb->cb[0];
  163. struct iphdr *iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  164. struct tcphdr *tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  165. *ack = be32_to_cpu(tcph->ack_seq);
  166. *wnd = be16_to_cpu(tcph->window);
  167. *fin_rcvd = tcph->fin;
  168. *rst_rcvd = tcph->rst;
  169. return be32_to_cpu(tcph->seq);
  170. }
  171. /**
  172. * nes_get_next_skb - Get the next skb based on where current skb is in the queue
  173. */
  174. static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp *nesqp,
  175. struct sk_buff *skb, u32 nextseq, u32 *ack,
  176. u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
  177. {
  178. u32 seq;
  179. bool processacks;
  180. struct sk_buff *old_skb;
  181. if (skb) {
  182. /* Continue processing fpdu */
  183. if (skb->next == (struct sk_buff *)&nesqp->pau_list)
  184. goto out;
  185. skb = skb->next;
  186. processacks = false;
  187. } else {
  188. /* Starting a new one */
  189. if (skb_queue_empty(&nesqp->pau_list))
  190. goto out;
  191. skb = skb_peek(&nesqp->pau_list);
  192. processacks = true;
  193. }
  194. while (1) {
  195. if (skb_queue_empty(&nesqp->pau_list))
  196. goto out;
  197. seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd);
  198. if (seq == nextseq) {
  199. if (skb->len || processacks)
  200. break;
  201. } else if (after(seq, nextseq)) {
  202. goto out;
  203. }
  204. old_skb = skb;
  205. skb = skb->next;
  206. skb_unlink(old_skb, &nesqp->pau_list);
  207. nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
  208. nes_rem_ref_cm_node(nesqp->cm_node);
  209. if (skb == (struct sk_buff *)&nesqp->pau_list)
  210. goto out;
  211. }
  212. return skb;
  213. out:
  214. return NULL;
  215. }
  216. /**
  217. * get_fpdu_info - Find the next complete fpdu and return its fragments.
  218. */
  219. static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
  220. struct pau_fpdu_info **pau_fpdu_info)
  221. {
  222. struct sk_buff *skb;
  223. struct iphdr *iph;
  224. struct tcphdr *tcph;
  225. struct nes_rskb_cb *cb;
  226. struct pau_fpdu_info *fpdu_info = NULL;
  227. struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
  228. u32 fpdu_len = 0;
  229. u32 tmp_len;
  230. int frag_cnt = 0;
  231. u32 tot_len;
  232. u32 frag_tot;
  233. u32 ack;
  234. u32 fin_rcvd;
  235. u32 rst_rcvd;
  236. u16 wnd;
  237. int i;
  238. int rc = 0;
  239. *pau_fpdu_info = NULL;
  240. skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd);
  241. if (!skb)
  242. goto out;
  243. cb = (struct nes_rskb_cb *)&skb->cb[0];
  244. if (skb->len) {
  245. fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING;
  246. fpdu_len = (fpdu_len + 3) & 0xfffffffc;
  247. tmp_len = fpdu_len;
  248. /* See if we have all of the fpdu */
  249. frag_tot = 0;
  250. memset(&frags, 0, sizeof frags);
  251. for (i = 0; i < MAX_FPDU_FRAGS; i++) {
  252. frags[i].physaddr = cb->busaddr;
  253. frags[i].physaddr += skb->data - cb->data_start;
  254. frags[i].frag_len = min(tmp_len, skb->len);
  255. frags[i].skb = skb;
  256. frags[i].cmplt = (skb->len == frags[i].frag_len);
  257. frag_tot += frags[i].frag_len;
  258. frag_cnt++;
  259. tmp_len -= frags[i].frag_len;
  260. if (tmp_len == 0)
  261. break;
  262. skb = nes_get_next_skb(nesdev, nesqp, skb,
  263. nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd);
  264. if (!skb)
  265. goto out;
  266. if (rst_rcvd) {
  267. /* rst received in the middle of fpdu */
  268. for (; i >= 0; i--) {
  269. skb_unlink(frags[i].skb, &nesqp->pau_list);
  270. nes_mgt_free_skb(nesdev, frags[i].skb, PCI_DMA_TODEVICE);
  271. }
  272. cb = (struct nes_rskb_cb *)&skb->cb[0];
  273. frags[0].physaddr = cb->busaddr;
  274. frags[0].physaddr += skb->data - cb->data_start;
  275. frags[0].frag_len = skb->len;
  276. frags[0].skb = skb;
  277. frags[0].cmplt = true;
  278. frag_cnt = 1;
  279. break;
  280. }
  281. cb = (struct nes_rskb_cb *)&skb->cb[0];
  282. }
  283. } else {
  284. /* no data */
  285. frags[0].physaddr = cb->busaddr;
  286. frags[0].frag_len = 0;
  287. frags[0].skb = skb;
  288. frags[0].cmplt = true;
  289. frag_cnt = 1;
  290. }
  291. /* Found one */
  292. fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
  293. if (!fpdu_info) {
  294. rc = -ENOMEM;
  295. goto out;
  296. }
  297. fpdu_info->cqp_request = nes_get_cqp_request(nesdev);
  298. if (fpdu_info->cqp_request == NULL) {
  299. nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
  300. rc = -ENOMEM;
  301. goto out;
  302. }
  303. cb = (struct nes_rskb_cb *)&frags[0].skb->cb[0];
  304. iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  305. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  306. fpdu_info->hdr_len = (((unsigned char *)tcph) + 4 * (tcph->doff)) - cb->data_start;
  307. fpdu_info->data_len = fpdu_len;
  308. tot_len = fpdu_info->hdr_len + fpdu_len - ETH_HLEN;
  309. if (frags[0].cmplt) {
  310. fpdu_info->hdr_pbase = cb->busaddr;
  311. fpdu_info->hdr_vbase = NULL;
  312. } else {
  313. fpdu_info->hdr_vbase = pci_alloc_consistent(nesdev->pcidev,
  314. fpdu_info->hdr_len, &fpdu_info->hdr_pbase);
  315. if (!fpdu_info->hdr_vbase) {
  316. nes_debug(NES_DBG_PAU, "Unable to allocate memory for pau first frag\n");
  317. rc = -ENOMEM;
  318. goto out;
  319. }
  320. /* Copy hdrs, adjusting len and seqnum */
  321. memcpy(fpdu_info->hdr_vbase, cb->data_start, fpdu_info->hdr_len);
  322. iph = (struct iphdr *)(fpdu_info->hdr_vbase + ETH_HLEN);
  323. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  324. }
  325. iph->tot_len = cpu_to_be16(tot_len);
  326. iph->saddr = cpu_to_be32(0x7f000001);
  327. tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
  328. tcph->ack_seq = cpu_to_be32(ack);
  329. tcph->window = cpu_to_be16(wnd);
  330. nesqp->pau_rcv_nxt += fpdu_len + fin_rcvd;
  331. memcpy(fpdu_info->frags, frags, sizeof(fpdu_info->frags));
  332. fpdu_info->frag_cnt = frag_cnt;
  333. fpdu_info->nesqp = nesqp;
  334. *pau_fpdu_info = fpdu_info;
  335. /* Update skb's for next pass */
  336. for (i = 0; i < frag_cnt; i++) {
  337. cb = (struct nes_rskb_cb *)&frags[i].skb->cb[0];
  338. skb_pull(frags[i].skb, frags[i].frag_len);
  339. if (frags[i].skb->len == 0) {
  340. /* Pull skb off the list - it will be freed in the callback */
  341. if (!skb_queue_empty(&nesqp->pau_list))
  342. skb_unlink(frags[i].skb, &nesqp->pau_list);
  343. } else {
  344. /* Last skb still has data so update the seq */
  345. iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  346. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  347. tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
  348. }
  349. }
  350. out:
  351. if (rc) {
  352. if (fpdu_info) {
  353. if (fpdu_info->cqp_request)
  354. nes_put_cqp_request(nesdev, fpdu_info->cqp_request);
  355. kfree(fpdu_info);
  356. }
  357. }
  358. return rc;
  359. }
  360. /**
  361. * forward_fpdu - send complete fpdus, one at a time
  362. */
  363. static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  364. {
  365. struct nes_device *nesdev = nesvnic->nesdev;
  366. struct pau_fpdu_info *fpdu_info;
  367. struct nes_hw_cqp_wqe *cqp_wqe;
  368. struct nes_cqp_request *cqp_request;
  369. unsigned long flags;
  370. u64 u64tmp;
  371. u32 u32tmp;
  372. int rc;
  373. while (1) {
  374. spin_lock_irqsave(&nesqp->pau_lock, flags);
  375. rc = get_fpdu_info(nesdev, nesqp, &fpdu_info);
  376. if (rc || (fpdu_info == NULL)) {
  377. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  378. return rc;
  379. }
  380. cqp_request = fpdu_info->cqp_request;
  381. cqp_wqe = &cqp_request->cqp_wqe;
  382. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  383. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_OPCODE_IDX,
  384. NES_CQP_DOWNLOAD_SEGMENT |
  385. (((u32)nesvnic->logical_port) << NES_CQP_OP_LOGICAL_PORT_SHIFT));
  386. u32tmp = fpdu_info->hdr_len << 16;
  387. u32tmp |= fpdu_info->hdr_len + (u32)fpdu_info->data_len;
  388. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX,
  389. u32tmp);
  390. u32tmp = (fpdu_info->frags[1].frag_len << 16) | fpdu_info->frags[0].frag_len;
  391. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_2_1_IDX,
  392. u32tmp);
  393. u32tmp = (fpdu_info->frags[3].frag_len << 16) | fpdu_info->frags[2].frag_len;
  394. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_4_3_IDX,
  395. u32tmp);
  396. u64tmp = (u64)fpdu_info->hdr_pbase;
  397. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
  398. lower_32_bits(u64tmp));
  399. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
  400. upper_32_bits(u64tmp));
  401. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
  402. lower_32_bits(fpdu_info->frags[0].physaddr));
  403. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_HIGH_IDX,
  404. upper_32_bits(fpdu_info->frags[0].physaddr));
  405. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_LOW_IDX,
  406. lower_32_bits(fpdu_info->frags[1].physaddr));
  407. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_HIGH_IDX,
  408. upper_32_bits(fpdu_info->frags[1].physaddr));
  409. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_LOW_IDX,
  410. lower_32_bits(fpdu_info->frags[2].physaddr));
  411. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_HIGH_IDX,
  412. upper_32_bits(fpdu_info->frags[2].physaddr));
  413. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_LOW_IDX,
  414. lower_32_bits(fpdu_info->frags[3].physaddr));
  415. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_HIGH_IDX,
  416. upper_32_bits(fpdu_info->frags[3].physaddr));
  417. cqp_request->cqp_callback_pointer = fpdu_info;
  418. cqp_request->callback = 1;
  419. cqp_request->cqp_callback = nes_download_callback;
  420. atomic_set(&cqp_request->refcount, 1);
  421. nes_post_cqp_request(nesdev, cqp_request);
  422. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  423. }
  424. return 0;
  425. }
  426. static void process_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  427. {
  428. int again = 1;
  429. unsigned long flags;
  430. do {
  431. /* Ignore rc - if it failed, tcp retries will cause it to try again */
  432. forward_fpdus(nesvnic, nesqp);
  433. spin_lock_irqsave(&nesqp->pau_lock, flags);
  434. if (nesqp->pau_pending) {
  435. nesqp->pau_pending = 0;
  436. } else {
  437. nesqp->pau_busy = 0;
  438. again = 0;
  439. }
  440. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  441. } while (again);
  442. }
  443. /**
  444. * queue_fpdus - Handle fpdu's that hw passed up to sw
  445. */
  446. static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  447. {
  448. struct sk_buff *tmpskb;
  449. struct nes_rskb_cb *cb;
  450. struct iphdr *iph;
  451. struct tcphdr *tcph;
  452. unsigned char *tcph_end;
  453. u32 rcv_nxt;
  454. u32 rcv_wnd;
  455. u32 seqnum;
  456. u32 len;
  457. bool process_it = false;
  458. unsigned long flags;
  459. /* Move data ptr to after tcp header */
  460. iph = (struct iphdr *)skb->data;
  461. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  462. seqnum = be32_to_cpu(tcph->seq);
  463. tcph_end = (((char *)tcph) + (4 * tcph->doff));
  464. len = be16_to_cpu(iph->tot_len);
  465. if (skb->len > len)
  466. skb_trim(skb, len);
  467. skb_pull(skb, tcph_end - skb->data);
  468. /* Initialize tracking values */
  469. cb = (struct nes_rskb_cb *)&skb->cb[0];
  470. cb->seqnum = seqnum;
  471. /* Make sure data is in the receive window */
  472. rcv_nxt = nesqp->pau_rcv_nxt;
  473. rcv_wnd = le32_to_cpu(nesqp->nesqp_context->rcv_wnd);
  474. if (!between(seqnum, rcv_nxt, (rcv_nxt + rcv_wnd))) {
  475. nes_mgt_free_skb(nesvnic->nesdev, skb, PCI_DMA_TODEVICE);
  476. nes_rem_ref_cm_node(nesqp->cm_node);
  477. return;
  478. }
  479. spin_lock_irqsave(&nesqp->pau_lock, flags);
  480. if (nesqp->pau_busy)
  481. nesqp->pau_pending = 1;
  482. else
  483. nesqp->pau_busy = 1;
  484. /* Queue skb by sequence number */
  485. if (skb_queue_len(&nesqp->pau_list) == 0) {
  486. skb_queue_head(&nesqp->pau_list, skb);
  487. } else {
  488. tmpskb = nesqp->pau_list.next;
  489. while (tmpskb != (struct sk_buff *)&nesqp->pau_list) {
  490. cb = (struct nes_rskb_cb *)&tmpskb->cb[0];
  491. if (before(seqnum, cb->seqnum))
  492. break;
  493. tmpskb = tmpskb->next;
  494. }
  495. skb_insert(tmpskb, skb, &nesqp->pau_list);
  496. }
  497. if (nesqp->pau_state == PAU_READY)
  498. process_it = true;
  499. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  500. if (process_it)
  501. process_fpdus(nesvnic, nesqp);
  502. return;
  503. }
  504. /**
  505. * mgt_thread - Handle mgt skbs in a safe context
  506. */
  507. static int mgt_thread(void *context)
  508. {
  509. struct nes_vnic *nesvnic = context;
  510. struct sk_buff *skb;
  511. struct nes_rskb_cb *cb;
  512. while (!kthread_should_stop()) {
  513. wait_event_interruptible(nesvnic->mgt_wait_queue,
  514. skb_queue_len(&nesvnic->mgt_skb_list) || kthread_should_stop());
  515. while ((skb_queue_len(&nesvnic->mgt_skb_list)) && !kthread_should_stop()) {
  516. skb = skb_dequeue(&nesvnic->mgt_skb_list);
  517. cb = (struct nes_rskb_cb *)&skb->cb[0];
  518. cb->data_start = skb->data - ETH_HLEN;
  519. cb->busaddr = pci_map_single(nesvnic->nesdev->pcidev, cb->data_start,
  520. nesvnic->max_frame_size, PCI_DMA_TODEVICE);
  521. queue_fpdus(skb, nesvnic, cb->nesqp);
  522. }
  523. }
  524. /* Closing down so delete any entries on the queue */
  525. while (skb_queue_len(&nesvnic->mgt_skb_list)) {
  526. skb = skb_dequeue(&nesvnic->mgt_skb_list);
  527. cb = (struct nes_rskb_cb *)&skb->cb[0];
  528. nes_rem_ref_cm_node(cb->nesqp->cm_node);
  529. dev_kfree_skb_any(skb);
  530. }
  531. return 0;
  532. }
  533. /**
  534. * nes_queue_skbs - Queue skb so it can be handled in a thread context
  535. */
  536. void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  537. {
  538. struct nes_rskb_cb *cb;
  539. cb = (struct nes_rskb_cb *)&skb->cb[0];
  540. cb->nesqp = nesqp;
  541. skb_queue_tail(&nesvnic->mgt_skb_list, skb);
  542. wake_up_interruptible(&nesvnic->mgt_wait_queue);
  543. }
  544. void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
  545. {
  546. struct sk_buff *skb;
  547. unsigned long flags;
  548. atomic_inc(&pau_qps_destroyed);
  549. /* Free packets that have not yet been forwarded */
  550. /* Lock is acquired by skb_dequeue when removing the skb */
  551. spin_lock_irqsave(&nesqp->pau_lock, flags);
  552. while (skb_queue_len(&nesqp->pau_list)) {
  553. skb = skb_dequeue(&nesqp->pau_list);
  554. nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
  555. nes_rem_ref_cm_node(nesqp->cm_node);
  556. }
  557. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  558. }
  559. static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
  560. {
  561. struct pau_qh_chg *qh_chg = cqp_request->cqp_callback_pointer;
  562. struct nes_cqp_request *new_request;
  563. struct nes_hw_cqp_wqe *cqp_wqe;
  564. struct nes_adapter *nesadapter;
  565. struct nes_qp *nesqp;
  566. struct nes_v4_quad nes_quad;
  567. u32 crc_value;
  568. u64 u64temp;
  569. nesadapter = nesdev->nesadapter;
  570. nesqp = qh_chg->nesqp;
  571. /* Should we handle the bad completion */
  572. if (cqp_request->major_code)
  573. WARN(1, PFX "Invalid cqp_request major_code=0x%x\n",
  574. cqp_request->major_code);
  575. switch (nesqp->pau_state) {
  576. case PAU_DEL_QH:
  577. /* Old hash code deleted, now set the new one */
  578. nesqp->pau_state = PAU_ADD_LB_QH;
  579. new_request = nes_get_cqp_request(nesdev);
  580. if (new_request == NULL) {
  581. nes_debug(NES_DBG_PAU, "Failed to get a new_request.\n");
  582. WARN_ON(1);
  583. return;
  584. }
  585. memset(&nes_quad, 0, sizeof(nes_quad));
  586. nes_quad.DstIpAdrIndex =
  587. cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
  588. nes_quad.SrcIpadr = cpu_to_be32(0x7f000001);
  589. nes_quad.TcpPorts[0] = swab16(nesqp->nesqp_context->tcpPorts[1]);
  590. nes_quad.TcpPorts[1] = swab16(nesqp->nesqp_context->tcpPorts[0]);
  591. /* Produce hash key */
  592. crc_value = get_crc_value(&nes_quad);
  593. nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
  594. nes_debug(NES_DBG_PAU, "new HTE Index = 0x%08X, CRC = 0x%08X\n",
  595. nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
  596. nesqp->hte_index &= nesadapter->hte_index_mask;
  597. nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
  598. nesqp->nesqp_context->ip0 = cpu_to_le32(0x7f000001);
  599. nesqp->nesqp_context->rcv_nxt = cpu_to_le32(nesqp->pau_rcv_nxt);
  600. cqp_wqe = &new_request->cqp_wqe;
  601. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  602. set_wqe_32bit_value(cqp_wqe->wqe_words,
  603. NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH |
  604. NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
  605. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
  606. u64temp = (u64)nesqp->nesqp_context_pbase;
  607. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  608. nes_debug(NES_DBG_PAU, "Waiting for CQP completion for adding the quad hash.\n");
  609. new_request->cqp_callback_pointer = qh_chg;
  610. new_request->callback = 1;
  611. new_request->cqp_callback = nes_chg_qh_handler;
  612. atomic_set(&new_request->refcount, 1);
  613. nes_post_cqp_request(nesdev, new_request);
  614. break;
  615. case PAU_ADD_LB_QH:
  616. /* Start processing the queued fpdu's */
  617. nesqp->pau_state = PAU_READY;
  618. process_fpdus(qh_chg->nesvnic, qh_chg->nesqp);
  619. kfree(qh_chg);
  620. break;
  621. }
  622. }
  623. /**
  624. * nes_change_quad_hash
  625. */
  626. static int nes_change_quad_hash(struct nes_device *nesdev,
  627. struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  628. {
  629. struct nes_cqp_request *cqp_request = NULL;
  630. struct pau_qh_chg *qh_chg = NULL;
  631. u64 u64temp;
  632. struct nes_hw_cqp_wqe *cqp_wqe;
  633. int ret = 0;
  634. cqp_request = nes_get_cqp_request(nesdev);
  635. if (cqp_request == NULL) {
  636. nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
  637. ret = -ENOMEM;
  638. goto chg_qh_err;
  639. }
  640. qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC);
  641. if (!qh_chg) {
  642. ret = -ENOMEM;
  643. goto chg_qh_err;
  644. }
  645. qh_chg->nesdev = nesdev;
  646. qh_chg->nesvnic = nesvnic;
  647. qh_chg->nesqp = nesqp;
  648. nesqp->pau_state = PAU_DEL_QH;
  649. cqp_wqe = &cqp_request->cqp_wqe;
  650. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  651. set_wqe_32bit_value(cqp_wqe->wqe_words,
  652. NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH | NES_CQP_QP_DEL_HTE |
  653. NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
  654. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
  655. u64temp = (u64)nesqp->nesqp_context_pbase;
  656. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  657. nes_debug(NES_DBG_PAU, "Waiting for CQP completion for deleting the quad hash.\n");
  658. cqp_request->cqp_callback_pointer = qh_chg;
  659. cqp_request->callback = 1;
  660. cqp_request->cqp_callback = nes_chg_qh_handler;
  661. atomic_set(&cqp_request->refcount, 1);
  662. nes_post_cqp_request(nesdev, cqp_request);
  663. return ret;
  664. chg_qh_err:
  665. kfree(qh_chg);
  666. if (cqp_request)
  667. nes_put_cqp_request(nesdev, cqp_request);
  668. return ret;
  669. }
  670. /**
  671. * nes_mgt_ce_handler
  672. * This management code deals with any packed and unaligned (pau) fpdu's
  673. * that the hardware cannot handle.
  674. */
  675. static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
  676. {
  677. struct nes_vnic_mgt *mgtvnic = container_of(cq, struct nes_vnic_mgt, mgt_cq);
  678. struct nes_adapter *nesadapter = nesdev->nesadapter;
  679. u32 head;
  680. u32 cq_size;
  681. u32 cqe_count = 0;
  682. u32 cqe_misc;
  683. u32 qp_id = 0;
  684. u32 skbs_needed;
  685. unsigned long context;
  686. struct nes_qp *nesqp;
  687. struct sk_buff *rx_skb;
  688. struct nes_rskb_cb *cb;
  689. head = cq->cq_head;
  690. cq_size = cq->cq_size;
  691. while (1) {
  692. cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]);
  693. if (!(cqe_misc & NES_NIC_CQE_VALID))
  694. break;
  695. nesqp = NULL;
  696. if (cqe_misc & NES_NIC_CQE_ACCQP_VALID) {
  697. qp_id = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_ACCQP_ID_IDX]);
  698. qp_id &= 0x001fffff;
  699. if (qp_id < nesadapter->max_qp) {
  700. context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN];
  701. nesqp = (struct nes_qp *)context;
  702. }
  703. }
  704. if (nesqp) {
  705. if (nesqp->pau_mode == false) {
  706. nesqp->pau_mode = true; /* First time for this qp */
  707. nesqp->pau_rcv_nxt = le32_to_cpu(
  708. cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
  709. skb_queue_head_init(&nesqp->pau_list);
  710. spin_lock_init(&nesqp->pau_lock);
  711. atomic_inc(&pau_qps_created);
  712. nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
  713. }
  714. rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
  715. rx_skb->len = 0;
  716. skb_put(rx_skb, cqe_misc & 0x0000ffff);
  717. rx_skb->protocol = eth_type_trans(rx_skb, mgtvnic->nesvnic->netdev);
  718. cb = (struct nes_rskb_cb *)&rx_skb->cb[0];
  719. pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, PCI_DMA_FROMDEVICE);
  720. cb->busaddr = 0;
  721. mgtvnic->mgt.rq_tail++;
  722. mgtvnic->mgt.rq_tail &= mgtvnic->mgt.rq_size - 1;
  723. nes_add_ref_cm_node(nesqp->cm_node);
  724. nes_queue_mgt_skbs(rx_skb, mgtvnic->nesvnic, nesqp);
  725. } else {
  726. printk(KERN_ERR PFX "Invalid QP %d for packed/unaligned handling\n", qp_id);
  727. }
  728. cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
  729. cqe_count++;
  730. if (++head >= cq_size)
  731. head = 0;
  732. if (cqe_count == 255) {
  733. /* Replenish mgt CQ */
  734. nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16));
  735. nesdev->currcq_count += cqe_count;
  736. cqe_count = 0;
  737. }
  738. skbs_needed = atomic_inc_return(&mgtvnic->rx_skbs_needed);
  739. if (skbs_needed > (mgtvnic->mgt.rq_size >> 1))
  740. nes_replenish_mgt_rq(mgtvnic);
  741. }
  742. cq->cq_head = head;
  743. nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
  744. cq->cq_number | (cqe_count << 16));
  745. nes_read32(nesdev->regs + NES_CQE_ALLOC);
  746. nesdev->currcq_count += cqe_count;
  747. }
  748. /**
  749. * nes_init_mgt_qp
  750. */
  751. int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic)
  752. {
  753. struct nes_vnic_mgt *mgtvnic;
  754. u32 counter;
  755. void *vmem;
  756. dma_addr_t pmem;
  757. struct nes_hw_cqp_wqe *cqp_wqe;
  758. u32 cqp_head;
  759. unsigned long flags;
  760. struct nes_hw_nic_qp_context *mgt_context;
  761. u64 u64temp;
  762. struct nes_hw_nic_rq_wqe *mgt_rqe;
  763. struct sk_buff *skb;
  764. u32 wqe_count;
  765. struct nes_rskb_cb *cb;
  766. u32 mgt_mem_size;
  767. void *mgt_vbase;
  768. dma_addr_t mgt_pbase;
  769. int i;
  770. int ret;
  771. /* Allocate space the all mgt QPs once */
  772. mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL);
  773. if (!mgtvnic)
  774. return -ENOMEM;
  775. /* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */
  776. /* We are not sending from this NIC so sq is not allocated */
  777. mgt_mem_size = 256 +
  778. (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)) +
  779. (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_cqe)) +
  780. sizeof(struct nes_hw_nic_qp_context);
  781. mgt_mem_size = (mgt_mem_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
  782. mgt_vbase = pci_alloc_consistent(nesdev->pcidev, NES_MGT_QP_COUNT * mgt_mem_size, &mgt_pbase);
  783. if (!mgt_vbase) {
  784. kfree(mgtvnic);
  785. nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt host descriptor rings\n");
  786. return -ENOMEM;
  787. }
  788. nesvnic->mgt_mem_size = NES_MGT_QP_COUNT * mgt_mem_size;
  789. nesvnic->mgt_vbase = mgt_vbase;
  790. nesvnic->mgt_pbase = mgt_pbase;
  791. skb_queue_head_init(&nesvnic->mgt_skb_list);
  792. init_waitqueue_head(&nesvnic->mgt_wait_queue);
  793. nesvnic->mgt_thread = kthread_run(mgt_thread, nesvnic, "nes_mgt_thread");
  794. for (i = 0; i < NES_MGT_QP_COUNT; i++) {
  795. mgtvnic->nesvnic = nesvnic;
  796. mgtvnic->mgt.qp_id = nesdev->mac_index + NES_MGT_QP_OFFSET + i;
  797. memset(mgt_vbase, 0, mgt_mem_size);
  798. nes_debug(NES_DBG_INIT, "Allocated mgt QP structures at %p (phys = %016lX), size = %u.\n",
  799. mgt_vbase, (unsigned long)mgt_pbase, mgt_mem_size);
  800. vmem = (void *)(((unsigned long)mgt_vbase + (256 - 1)) &
  801. ~(unsigned long)(256 - 1));
  802. pmem = (dma_addr_t)(((unsigned long long)mgt_pbase + (256 - 1)) &
  803. ~(unsigned long long)(256 - 1));
  804. spin_lock_init(&mgtvnic->mgt.rq_lock);
  805. /* setup the RQ */
  806. mgtvnic->mgt.rq_vbase = vmem;
  807. mgtvnic->mgt.rq_pbase = pmem;
  808. mgtvnic->mgt.rq_head = 0;
  809. mgtvnic->mgt.rq_tail = 0;
  810. mgtvnic->mgt.rq_size = NES_MGT_WQ_COUNT;
  811. /* setup the CQ */
  812. vmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
  813. pmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
  814. mgtvnic->mgt_cq.cq_number = mgtvnic->mgt.qp_id;
  815. mgtvnic->mgt_cq.cq_vbase = vmem;
  816. mgtvnic->mgt_cq.cq_pbase = pmem;
  817. mgtvnic->mgt_cq.cq_head = 0;
  818. mgtvnic->mgt_cq.cq_size = NES_MGT_WQ_COUNT;
  819. mgtvnic->mgt_cq.ce_handler = nes_mgt_ce_handler;
  820. /* Send CreateCQ request to CQP */
  821. spin_lock_irqsave(&nesdev->cqp.lock, flags);
  822. cqp_head = nesdev->cqp.sq_head;
  823. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  824. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  825. cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
  826. NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
  827. ((u32)mgtvnic->mgt_cq.cq_size << 16));
  828. cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
  829. mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16));
  830. u64temp = (u64)mgtvnic->mgt_cq.cq_pbase;
  831. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
  832. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
  833. u64temp = (unsigned long)&mgtvnic->mgt_cq;
  834. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1));
  835. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
  836. cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
  837. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
  838. if (++cqp_head >= nesdev->cqp.sq_size)
  839. cqp_head = 0;
  840. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  841. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  842. /* Send CreateQP request to CQP */
  843. mgt_context = (void *)(&mgtvnic->mgt_cq.cq_vbase[mgtvnic->mgt_cq.cq_size]);
  844. mgt_context->context_words[NES_NIC_CTX_MISC_IDX] =
  845. cpu_to_le32((u32)NES_MGT_CTX_SIZE |
  846. ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12));
  847. nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n",
  848. nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE),
  849. nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE));
  850. if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0)
  851. mgt_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE);
  852. u64temp = (u64)mgtvnic->mgt.rq_pbase;
  853. mgt_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
  854. mgt_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
  855. u64temp = (u64)mgtvnic->mgt.rq_pbase;
  856. mgt_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
  857. mgt_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
  858. cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
  859. NES_CQP_QP_TYPE_NIC);
  860. cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(mgtvnic->mgt.qp_id);
  861. u64temp = (u64)mgtvnic->mgt_cq.cq_pbase +
  862. (mgtvnic->mgt_cq.cq_size * sizeof(struct nes_hw_nic_cqe));
  863. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  864. if (++cqp_head >= nesdev->cqp.sq_size)
  865. cqp_head = 0;
  866. nesdev->cqp.sq_head = cqp_head;
  867. barrier();
  868. /* Ring doorbell (2 WQEs) */
  869. nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
  870. spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
  871. nes_debug(NES_DBG_INIT, "Waiting for create MGT QP%u to complete.\n",
  872. mgtvnic->mgt.qp_id);
  873. ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
  874. NES_EVENT_TIMEOUT);
  875. nes_debug(NES_DBG_INIT, "Create MGT QP%u completed, wait_event_timeout ret = %u.\n",
  876. mgtvnic->mgt.qp_id, ret);
  877. if (!ret) {
  878. nes_debug(NES_DBG_INIT, "MGT QP%u create timeout expired\n", mgtvnic->mgt.qp_id);
  879. if (i == 0) {
  880. pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
  881. nesvnic->mgt_pbase);
  882. kfree(mgtvnic);
  883. } else {
  884. nes_destroy_mgt(nesvnic);
  885. }
  886. return -EIO;
  887. }
  888. /* Populate the RQ */
  889. for (counter = 0; counter < (NES_MGT_WQ_COUNT - 1); counter++) {
  890. skb = dev_alloc_skb(nesvnic->max_frame_size);
  891. if (!skb) {
  892. nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name);
  893. return -ENOMEM;
  894. }
  895. skb->dev = netdev;
  896. pmem = pci_map_single(nesdev->pcidev, skb->data,
  897. nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
  898. cb = (struct nes_rskb_cb *)&skb->cb[0];
  899. cb->busaddr = pmem;
  900. cb->maplen = nesvnic->max_frame_size;
  901. mgt_rqe = &mgtvnic->mgt.rq_vbase[counter];
  902. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32((u32)nesvnic->max_frame_size);
  903. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
  904. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
  905. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
  906. mgtvnic->mgt.rx_skb[counter] = skb;
  907. }
  908. init_timer(&mgtvnic->rq_wqes_timer);
  909. mgtvnic->rq_wqes_timer.function = nes_mgt_rq_wqes_timeout;
  910. mgtvnic->rq_wqes_timer.data = (unsigned long)mgtvnic;
  911. wqe_count = NES_MGT_WQ_COUNT - 1;
  912. mgtvnic->mgt.rq_head = wqe_count;
  913. barrier();
  914. do {
  915. counter = min(wqe_count, ((u32)255));
  916. wqe_count -= counter;
  917. nes_write32(nesdev->regs + NES_WQE_ALLOC, (counter << 24) | mgtvnic->mgt.qp_id);
  918. } while (wqe_count);
  919. nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
  920. mgtvnic->mgt_cq.cq_number);
  921. nes_read32(nesdev->regs + NES_CQE_ALLOC);
  922. mgt_vbase += mgt_mem_size;
  923. mgt_pbase += mgt_mem_size;
  924. nesvnic->mgtvnic[i] = mgtvnic++;
  925. }
  926. return 0;
  927. }
  928. void nes_destroy_mgt(struct nes_vnic *nesvnic)
  929. {
  930. struct nes_device *nesdev = nesvnic->nesdev;
  931. struct nes_vnic_mgt *mgtvnic;
  932. struct nes_vnic_mgt *first_mgtvnic;
  933. unsigned long flags;
  934. struct nes_hw_cqp_wqe *cqp_wqe;
  935. u32 cqp_head;
  936. struct sk_buff *rx_skb;
  937. int i;
  938. int ret;
  939. kthread_stop(nesvnic->mgt_thread);
  940. /* Free remaining NIC receive buffers */
  941. first_mgtvnic = nesvnic->mgtvnic[0];
  942. for (i = 0; i < NES_MGT_QP_COUNT; i++) {
  943. mgtvnic = nesvnic->mgtvnic[i];
  944. if (mgtvnic == NULL)
  945. continue;
  946. while (mgtvnic->mgt.rq_head != mgtvnic->mgt.rq_tail) {
  947. rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
  948. nes_mgt_free_skb(nesdev, rx_skb, PCI_DMA_FROMDEVICE);
  949. mgtvnic->mgt.rq_tail++;
  950. mgtvnic->mgt.rq_tail &= (mgtvnic->mgt.rq_size - 1);
  951. }
  952. spin_lock_irqsave(&nesdev->cqp.lock, flags);
  953. /* Destroy NIC QP */
  954. cqp_head = nesdev->cqp.sq_head;
  955. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  956. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  957. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
  958. (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC));
  959. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
  960. mgtvnic->mgt.qp_id);
  961. if (++cqp_head >= nesdev->cqp.sq_size)
  962. cqp_head = 0;
  963. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  964. /* Destroy NIC CQ */
  965. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  966. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
  967. (NES_CQP_DESTROY_CQ | ((u32)mgtvnic->mgt_cq.cq_size << 16)));
  968. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
  969. (mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
  970. if (++cqp_head >= nesdev->cqp.sq_size)
  971. cqp_head = 0;
  972. nesdev->cqp.sq_head = cqp_head;
  973. barrier();
  974. /* Ring doorbell (2 WQEs) */
  975. nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
  976. spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
  977. nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u,"
  978. " cqp.sq_tail=%u, cqp.sq_size=%u\n",
  979. cqp_head, nesdev->cqp.sq_head,
  980. nesdev->cqp.sq_tail, nesdev->cqp.sq_size);
  981. ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
  982. NES_EVENT_TIMEOUT);
  983. nes_debug(NES_DBG_SHUTDOWN, "Destroy MGT QP returned, wait_event_timeout ret = %u, cqp_head=%u,"
  984. " cqp.sq_head=%u, cqp.sq_tail=%u\n",
  985. ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
  986. if (!ret)
  987. nes_debug(NES_DBG_SHUTDOWN, "MGT QP%u destroy timeout expired\n",
  988. mgtvnic->mgt.qp_id);
  989. nesvnic->mgtvnic[i] = NULL;
  990. }
  991. if (nesvnic->mgt_vbase) {
  992. pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
  993. nesvnic->mgt_pbase);
  994. nesvnic->mgt_vbase = NULL;
  995. nesvnic->mgt_pbase = 0;
  996. }
  997. kfree(first_mgtvnic);
  998. }