cxgbit_cm.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930
  1. /*
  2. * Copyright (c) 2016 Chelsio Communications, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/list.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/timer.h>
  13. #include <linux/notifier.h>
  14. #include <linux/inetdevice.h>
  15. #include <linux/ip.h>
  16. #include <linux/tcp.h>
  17. #include <linux/if_vlan.h>
  18. #include <net/neighbour.h>
  19. #include <net/netevent.h>
  20. #include <net/route.h>
  21. #include <net/tcp.h>
  22. #include <net/ip6_route.h>
  23. #include <net/addrconf.h>
  24. #include <libcxgb_cm.h>
  25. #include "cxgbit.h"
  26. #include "clip_tbl.h"
  27. static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  28. {
  29. wr_waitp->ret = 0;
  30. reinit_completion(&wr_waitp->completion);
  31. }
  32. static void
  33. cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  34. {
  35. if (ret == CPL_ERR_NONE)
  36. wr_waitp->ret = 0;
  37. else
  38. wr_waitp->ret = -EIO;
  39. if (wr_waitp->ret)
  40. pr_err("%s: err:%u", func, ret);
  41. complete(&wr_waitp->completion);
  42. }
  43. static int
  44. cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  45. struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  46. const char *func)
  47. {
  48. int ret;
  49. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  50. wr_waitp->ret = -EIO;
  51. goto out;
  52. }
  53. ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  54. if (!ret) {
  55. pr_info("%s - Device %s not responding tid %u\n",
  56. func, pci_name(cdev->lldi.pdev), tid);
  57. wr_waitp->ret = -ETIMEDOUT;
  58. }
  59. out:
  60. if (wr_waitp->ret)
  61. pr_info("%s: FW reply %d tid %u\n",
  62. pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  63. return wr_waitp->ret;
  64. }
  65. static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  66. {
  67. return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  68. }
  69. static struct np_info *
  70. cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  71. unsigned int stid)
  72. {
  73. struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  74. if (p) {
  75. int bucket = cxgbit_np_hashfn(cnp);
  76. p->cnp = cnp;
  77. p->stid = stid;
  78. spin_lock(&cdev->np_lock);
  79. p->next = cdev->np_hash_tab[bucket];
  80. cdev->np_hash_tab[bucket] = p;
  81. spin_unlock(&cdev->np_lock);
  82. }
  83. return p;
  84. }
  85. static int
  86. cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  87. {
  88. int stid = -1, bucket = cxgbit_np_hashfn(cnp);
  89. struct np_info *p;
  90. spin_lock(&cdev->np_lock);
  91. for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
  92. if (p->cnp == cnp) {
  93. stid = p->stid;
  94. break;
  95. }
  96. }
  97. spin_unlock(&cdev->np_lock);
  98. return stid;
  99. }
  100. static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  101. {
  102. int stid = -1, bucket = cxgbit_np_hashfn(cnp);
  103. struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
  104. spin_lock(&cdev->np_lock);
  105. for (p = *prev; p; prev = &p->next, p = p->next) {
  106. if (p->cnp == cnp) {
  107. stid = p->stid;
  108. *prev = p->next;
  109. kfree(p);
  110. break;
  111. }
  112. }
  113. spin_unlock(&cdev->np_lock);
  114. return stid;
  115. }
  116. void _cxgbit_free_cnp(struct kref *kref)
  117. {
  118. struct cxgbit_np *cnp;
  119. cnp = container_of(kref, struct cxgbit_np, kref);
  120. kfree(cnp);
  121. }
  122. static int
  123. cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
  124. struct cxgbit_np *cnp)
  125. {
  126. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  127. &cnp->com.local_addr;
  128. int addr_type;
  129. int ret;
  130. pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
  131. __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
  132. addr_type = ipv6_addr_type((const struct in6_addr *)
  133. &sin6->sin6_addr);
  134. if (addr_type != IPV6_ADDR_ANY) {
  135. ret = cxgb4_clip_get(cdev->lldi.ports[0],
  136. (const u32 *)&sin6->sin6_addr.s6_addr, 1);
  137. if (ret) {
  138. pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
  139. sin6->sin6_addr.s6_addr, ret);
  140. return -ENOMEM;
  141. }
  142. }
  143. cxgbit_get_cnp(cnp);
  144. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  145. ret = cxgb4_create_server6(cdev->lldi.ports[0],
  146. stid, &sin6->sin6_addr,
  147. sin6->sin6_port,
  148. cdev->lldi.rxq_ids[0]);
  149. if (!ret)
  150. ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
  151. 0, 10, __func__);
  152. else if (ret > 0)
  153. ret = net_xmit_errno(ret);
  154. else
  155. cxgbit_put_cnp(cnp);
  156. if (ret) {
  157. if (ret != -ETIMEDOUT)
  158. cxgb4_clip_release(cdev->lldi.ports[0],
  159. (const u32 *)&sin6->sin6_addr.s6_addr, 1);
  160. pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
  161. ret, stid, sin6->sin6_addr.s6_addr,
  162. ntohs(sin6->sin6_port));
  163. }
  164. return ret;
  165. }
  166. static int
  167. cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
  168. struct cxgbit_np *cnp)
  169. {
  170. struct sockaddr_in *sin = (struct sockaddr_in *)
  171. &cnp->com.local_addr;
  172. int ret;
  173. pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
  174. __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
  175. cxgbit_get_cnp(cnp);
  176. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  177. ret = cxgb4_create_server(cdev->lldi.ports[0],
  178. stid, sin->sin_addr.s_addr,
  179. sin->sin_port, 0,
  180. cdev->lldi.rxq_ids[0]);
  181. if (!ret)
  182. ret = cxgbit_wait_for_reply(cdev,
  183. &cnp->com.wr_wait,
  184. 0, 10, __func__);
  185. else if (ret > 0)
  186. ret = net_xmit_errno(ret);
  187. else
  188. cxgbit_put_cnp(cnp);
  189. if (ret)
  190. pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
  191. ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
  192. return ret;
  193. }
  194. struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
  195. {
  196. struct cxgbit_device *cdev;
  197. u8 i;
  198. list_for_each_entry(cdev, &cdev_list_head, list) {
  199. struct cxgb4_lld_info *lldi = &cdev->lldi;
  200. for (i = 0; i < lldi->nports; i++) {
  201. if (lldi->ports[i] == ndev) {
  202. if (port_id)
  203. *port_id = i;
  204. return cdev;
  205. }
  206. }
  207. }
  208. return NULL;
  209. }
  210. static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
  211. {
  212. if (ndev->priv_flags & IFF_BONDING) {
  213. pr_err("Bond devices are not supported. Interface:%s\n",
  214. ndev->name);
  215. return NULL;
  216. }
  217. if (is_vlan_dev(ndev))
  218. return vlan_dev_real_dev(ndev);
  219. return ndev;
  220. }
  221. static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
  222. {
  223. struct net_device *ndev;
  224. ndev = __ip_dev_find(&init_net, saddr, false);
  225. if (!ndev)
  226. return NULL;
  227. return cxgbit_get_real_dev(ndev);
  228. }
  229. static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
  230. {
  231. struct net_device *ndev = NULL;
  232. bool found = false;
  233. if (IS_ENABLED(CONFIG_IPV6)) {
  234. for_each_netdev_rcu(&init_net, ndev)
  235. if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
  236. found = true;
  237. break;
  238. }
  239. }
  240. if (!found)
  241. return NULL;
  242. return cxgbit_get_real_dev(ndev);
  243. }
  244. static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
  245. {
  246. struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
  247. int ss_family = sockaddr->ss_family;
  248. struct net_device *ndev = NULL;
  249. struct cxgbit_device *cdev = NULL;
  250. rcu_read_lock();
  251. if (ss_family == AF_INET) {
  252. struct sockaddr_in *sin;
  253. sin = (struct sockaddr_in *)sockaddr;
  254. ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
  255. } else if (ss_family == AF_INET6) {
  256. struct sockaddr_in6 *sin6;
  257. sin6 = (struct sockaddr_in6 *)sockaddr;
  258. ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
  259. }
  260. if (!ndev)
  261. goto out;
  262. cdev = cxgbit_find_device(ndev, NULL);
  263. out:
  264. rcu_read_unlock();
  265. return cdev;
  266. }
  267. static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
  268. {
  269. struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
  270. int ss_family = sockaddr->ss_family;
  271. int addr_type;
  272. if (ss_family == AF_INET) {
  273. struct sockaddr_in *sin;
  274. sin = (struct sockaddr_in *)sockaddr;
  275. if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
  276. return true;
  277. } else if (ss_family == AF_INET6) {
  278. struct sockaddr_in6 *sin6;
  279. sin6 = (struct sockaddr_in6 *)sockaddr;
  280. addr_type = ipv6_addr_type((const struct in6_addr *)
  281. &sin6->sin6_addr);
  282. if (addr_type == IPV6_ADDR_ANY)
  283. return true;
  284. }
  285. return false;
  286. }
  287. static int
  288. __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  289. {
  290. int stid, ret;
  291. int ss_family = cnp->com.local_addr.ss_family;
  292. if (!test_bit(CDEV_STATE_UP, &cdev->flags))
  293. return -EINVAL;
  294. stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
  295. if (stid < 0)
  296. return -EINVAL;
  297. if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
  298. cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
  299. return -EINVAL;
  300. }
  301. if (ss_family == AF_INET)
  302. ret = cxgbit_create_server4(cdev, stid, cnp);
  303. else
  304. ret = cxgbit_create_server6(cdev, stid, cnp);
  305. if (ret) {
  306. if (ret != -ETIMEDOUT)
  307. cxgb4_free_stid(cdev->lldi.tids, stid,
  308. ss_family);
  309. cxgbit_np_hash_del(cdev, cnp);
  310. return ret;
  311. }
  312. return ret;
  313. }
  314. static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
  315. {
  316. struct cxgbit_device *cdev;
  317. int ret = -1;
  318. mutex_lock(&cdev_list_lock);
  319. cdev = cxgbit_find_np_cdev(cnp);
  320. if (!cdev)
  321. goto out;
  322. if (cxgbit_np_hash_find(cdev, cnp) >= 0)
  323. goto out;
  324. if (__cxgbit_setup_cdev_np(cdev, cnp))
  325. goto out;
  326. cnp->com.cdev = cdev;
  327. ret = 0;
  328. out:
  329. mutex_unlock(&cdev_list_lock);
  330. return ret;
  331. }
  332. static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
  333. {
  334. struct cxgbit_device *cdev;
  335. int ret;
  336. u32 count = 0;
  337. mutex_lock(&cdev_list_lock);
  338. list_for_each_entry(cdev, &cdev_list_head, list) {
  339. if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
  340. mutex_unlock(&cdev_list_lock);
  341. return -1;
  342. }
  343. }
  344. list_for_each_entry(cdev, &cdev_list_head, list) {
  345. ret = __cxgbit_setup_cdev_np(cdev, cnp);
  346. if (ret == -ETIMEDOUT)
  347. break;
  348. if (ret != 0)
  349. continue;
  350. count++;
  351. }
  352. mutex_unlock(&cdev_list_lock);
  353. return count ? 0 : -1;
  354. }
  355. int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
  356. {
  357. struct cxgbit_np *cnp;
  358. int ret;
  359. if ((ksockaddr->ss_family != AF_INET) &&
  360. (ksockaddr->ss_family != AF_INET6))
  361. return -EINVAL;
  362. cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
  363. if (!cnp)
  364. return -ENOMEM;
  365. init_waitqueue_head(&cnp->accept_wait);
  366. init_completion(&cnp->com.wr_wait.completion);
  367. init_completion(&cnp->accept_comp);
  368. INIT_LIST_HEAD(&cnp->np_accept_list);
  369. spin_lock_init(&cnp->np_accept_lock);
  370. kref_init(&cnp->kref);
  371. memcpy(&np->np_sockaddr, ksockaddr,
  372. sizeof(struct sockaddr_storage));
  373. memcpy(&cnp->com.local_addr, &np->np_sockaddr,
  374. sizeof(cnp->com.local_addr));
  375. cnp->np = np;
  376. cnp->com.cdev = NULL;
  377. if (cxgbit_inaddr_any(cnp))
  378. ret = cxgbit_setup_all_np(cnp);
  379. else
  380. ret = cxgbit_setup_cdev_np(cnp);
  381. if (ret) {
  382. cxgbit_put_cnp(cnp);
  383. return -EINVAL;
  384. }
  385. np->np_context = cnp;
  386. cnp->com.state = CSK_STATE_LISTEN;
  387. return 0;
  388. }
  389. static void
  390. cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
  391. struct cxgbit_sock *csk)
  392. {
  393. conn->login_family = np->np_sockaddr.ss_family;
  394. conn->login_sockaddr = csk->com.remote_addr;
  395. conn->local_sockaddr = csk->com.local_addr;
  396. }
  397. int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
  398. {
  399. struct cxgbit_np *cnp = np->np_context;
  400. struct cxgbit_sock *csk;
  401. int ret = 0;
  402. accept_wait:
  403. ret = wait_for_completion_interruptible(&cnp->accept_comp);
  404. if (ret)
  405. return -ENODEV;
  406. spin_lock_bh(&np->np_thread_lock);
  407. if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
  408. spin_unlock_bh(&np->np_thread_lock);
  409. /**
  410. * No point in stalling here when np_thread
  411. * is in state RESET/SHUTDOWN/EXIT - bail
  412. **/
  413. return -ENODEV;
  414. }
  415. spin_unlock_bh(&np->np_thread_lock);
  416. spin_lock_bh(&cnp->np_accept_lock);
  417. if (list_empty(&cnp->np_accept_list)) {
  418. spin_unlock_bh(&cnp->np_accept_lock);
  419. goto accept_wait;
  420. }
  421. csk = list_first_entry(&cnp->np_accept_list,
  422. struct cxgbit_sock,
  423. accept_node);
  424. list_del_init(&csk->accept_node);
  425. spin_unlock_bh(&cnp->np_accept_lock);
  426. conn->context = csk;
  427. csk->conn = conn;
  428. cxgbit_set_conn_info(np, conn, csk);
  429. return 0;
  430. }
  431. static int
  432. __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  433. {
  434. int stid, ret;
  435. bool ipv6 = false;
  436. stid = cxgbit_np_hash_del(cdev, cnp);
  437. if (stid < 0)
  438. return -EINVAL;
  439. if (!test_bit(CDEV_STATE_UP, &cdev->flags))
  440. return -EINVAL;
  441. if (cnp->np->np_sockaddr.ss_family == AF_INET6)
  442. ipv6 = true;
  443. cxgbit_get_cnp(cnp);
  444. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  445. ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
  446. cdev->lldi.rxq_ids[0], ipv6);
  447. if (ret > 0)
  448. ret = net_xmit_errno(ret);
  449. if (ret) {
  450. cxgbit_put_cnp(cnp);
  451. return ret;
  452. }
  453. ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
  454. 0, 10, __func__);
  455. if (ret == -ETIMEDOUT)
  456. return ret;
  457. if (ipv6 && cnp->com.cdev) {
  458. struct sockaddr_in6 *sin6;
  459. sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
  460. cxgb4_clip_release(cdev->lldi.ports[0],
  461. (const u32 *)&sin6->sin6_addr.s6_addr,
  462. 1);
  463. }
  464. cxgb4_free_stid(cdev->lldi.tids, stid,
  465. cnp->com.local_addr.ss_family);
  466. return 0;
  467. }
  468. static void cxgbit_free_all_np(struct cxgbit_np *cnp)
  469. {
  470. struct cxgbit_device *cdev;
  471. int ret;
  472. mutex_lock(&cdev_list_lock);
  473. list_for_each_entry(cdev, &cdev_list_head, list) {
  474. ret = __cxgbit_free_cdev_np(cdev, cnp);
  475. if (ret == -ETIMEDOUT)
  476. break;
  477. }
  478. mutex_unlock(&cdev_list_lock);
  479. }
  480. static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
  481. {
  482. struct cxgbit_device *cdev;
  483. bool found = false;
  484. mutex_lock(&cdev_list_lock);
  485. list_for_each_entry(cdev, &cdev_list_head, list) {
  486. if (cdev == cnp->com.cdev) {
  487. found = true;
  488. break;
  489. }
  490. }
  491. if (!found)
  492. goto out;
  493. __cxgbit_free_cdev_np(cdev, cnp);
  494. out:
  495. mutex_unlock(&cdev_list_lock);
  496. }
  497. void cxgbit_free_np(struct iscsi_np *np)
  498. {
  499. struct cxgbit_np *cnp = np->np_context;
  500. cnp->com.state = CSK_STATE_DEAD;
  501. if (cnp->com.cdev)
  502. cxgbit_free_cdev_np(cnp);
  503. else
  504. cxgbit_free_all_np(cnp);
  505. np->np_context = NULL;
  506. cxgbit_put_cnp(cnp);
  507. }
  508. static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
  509. {
  510. struct sk_buff *skb;
  511. u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
  512. skb = alloc_skb(len, GFP_ATOMIC);
  513. if (!skb)
  514. return;
  515. cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
  516. NULL, NULL);
  517. cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
  518. __skb_queue_tail(&csk->txq, skb);
  519. cxgbit_push_tx_frames(csk);
  520. }
  521. static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
  522. {
  523. pr_debug("%s cxgbit_device %p\n", __func__, handle);
  524. kfree_skb(skb);
  525. }
  526. static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
  527. {
  528. struct cxgbit_device *cdev = handle;
  529. struct cpl_abort_req *req = cplhdr(skb);
  530. pr_debug("%s cdev %p\n", __func__, cdev);
  531. req->cmd = CPL_ABORT_NO_RST;
  532. cxgbit_ofld_send(cdev, skb);
  533. }
  534. static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
  535. {
  536. struct sk_buff *skb;
  537. u32 len = roundup(sizeof(struct cpl_abort_req), 16);
  538. pr_debug("%s: csk %p tid %u; state %d\n",
  539. __func__, csk, csk->tid, csk->com.state);
  540. __skb_queue_purge(&csk->txq);
  541. if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
  542. cxgbit_send_tx_flowc_wr(csk);
  543. skb = __skb_dequeue(&csk->skbq);
  544. cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
  545. csk->com.cdev, cxgbit_abort_arp_failure);
  546. return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
  547. }
  548. void cxgbit_free_conn(struct iscsi_conn *conn)
  549. {
  550. struct cxgbit_sock *csk = conn->context;
  551. bool release = false;
  552. pr_debug("%s: state %d\n",
  553. __func__, csk->com.state);
  554. spin_lock_bh(&csk->lock);
  555. switch (csk->com.state) {
  556. case CSK_STATE_ESTABLISHED:
  557. if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
  558. csk->com.state = CSK_STATE_CLOSING;
  559. cxgbit_send_halfclose(csk);
  560. } else {
  561. csk->com.state = CSK_STATE_ABORTING;
  562. cxgbit_send_abort_req(csk);
  563. }
  564. break;
  565. case CSK_STATE_CLOSING:
  566. csk->com.state = CSK_STATE_MORIBUND;
  567. cxgbit_send_halfclose(csk);
  568. break;
  569. case CSK_STATE_DEAD:
  570. release = true;
  571. break;
  572. default:
  573. pr_err("%s: csk %p; state %d\n",
  574. __func__, csk, csk->com.state);
  575. }
  576. spin_unlock_bh(&csk->lock);
  577. if (release)
  578. cxgbit_put_csk(csk);
  579. }
  580. static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
  581. {
  582. csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
  583. ((csk->com.remote_addr.ss_family == AF_INET) ?
  584. sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
  585. sizeof(struct tcphdr);
  586. csk->mss = csk->emss;
  587. if (TCPOPT_TSTAMP_G(opt))
  588. csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
  589. if (csk->emss < 128)
  590. csk->emss = 128;
  591. if (csk->emss & 7)
  592. pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
  593. TCPOPT_MSS_G(opt), csk->mss, csk->emss);
  594. pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
  595. csk->mss, csk->emss);
  596. }
  597. static void cxgbit_free_skb(struct cxgbit_sock *csk)
  598. {
  599. struct sk_buff *skb;
  600. __skb_queue_purge(&csk->txq);
  601. __skb_queue_purge(&csk->rxq);
  602. __skb_queue_purge(&csk->backlogq);
  603. __skb_queue_purge(&csk->ppodq);
  604. __skb_queue_purge(&csk->skbq);
  605. while ((skb = cxgbit_sock_dequeue_wr(csk)))
  606. kfree_skb(skb);
  607. __kfree_skb(csk->lro_hskb);
  608. }
  609. void _cxgbit_free_csk(struct kref *kref)
  610. {
  611. struct cxgbit_sock *csk;
  612. struct cxgbit_device *cdev;
  613. csk = container_of(kref, struct cxgbit_sock, kref);
  614. pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
  615. if (csk->com.local_addr.ss_family == AF_INET6) {
  616. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  617. &csk->com.local_addr;
  618. cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
  619. (const u32 *)
  620. &sin6->sin6_addr.s6_addr, 1);
  621. }
  622. cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
  623. csk->com.local_addr.ss_family);
  624. dst_release(csk->dst);
  625. cxgb4_l2t_release(csk->l2t);
  626. cdev = csk->com.cdev;
  627. spin_lock_bh(&cdev->cskq.lock);
  628. list_del(&csk->list);
  629. spin_unlock_bh(&cdev->cskq.lock);
  630. cxgbit_free_skb(csk);
  631. cxgbit_put_cdev(cdev);
  632. kfree(csk);
  633. }
  634. static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
  635. {
  636. unsigned int linkspeed;
  637. u8 scale;
  638. linkspeed = pi->link_cfg.speed;
  639. scale = linkspeed / SPEED_10000;
  640. #define CXGBIT_10G_RCV_WIN (256 * 1024)
  641. csk->rcv_win = CXGBIT_10G_RCV_WIN;
  642. if (scale)
  643. csk->rcv_win *= scale;
  644. #define CXGBIT_10G_SND_WIN (256 * 1024)
  645. csk->snd_win = CXGBIT_10G_SND_WIN;
  646. if (scale)
  647. csk->snd_win *= scale;
  648. pr_debug("%s snd_win %d rcv_win %d\n",
  649. __func__, csk->snd_win, csk->rcv_win);
  650. }
  651. #ifdef CONFIG_CHELSIO_T4_DCB
  652. static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
  653. {
  654. return ndev->dcbnl_ops->getstate(ndev);
  655. }
  656. static int cxgbit_select_priority(int pri_mask)
  657. {
  658. if (!pri_mask)
  659. return 0;
  660. return (ffs(pri_mask) - 1);
  661. }
  662. static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
  663. {
  664. int ret;
  665. u8 caps;
  666. struct dcb_app iscsi_dcb_app = {
  667. .protocol = local_port
  668. };
  669. ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
  670. if (ret)
  671. return 0;
  672. if (caps & DCB_CAP_DCBX_VER_IEEE) {
  673. iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
  674. ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
  675. } else if (caps & DCB_CAP_DCBX_VER_CEE) {
  676. iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
  677. ret = dcb_getapp(ndev, &iscsi_dcb_app);
  678. }
  679. pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
  680. return cxgbit_select_priority(ret);
  681. }
  682. #endif
  683. static int
  684. cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
  685. u16 local_port, struct dst_entry *dst,
  686. struct cxgbit_device *cdev)
  687. {
  688. struct neighbour *n;
  689. int ret, step;
  690. struct net_device *ndev;
  691. u16 rxq_idx, port_id;
  692. #ifdef CONFIG_CHELSIO_T4_DCB
  693. u8 priority = 0;
  694. #endif
  695. n = dst_neigh_lookup(dst, peer_ip);
  696. if (!n)
  697. return -ENODEV;
  698. rcu_read_lock();
  699. ret = -ENOMEM;
  700. if (n->dev->flags & IFF_LOOPBACK) {
  701. if (iptype == 4)
  702. ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
  703. else if (IS_ENABLED(CONFIG_IPV6))
  704. ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
  705. else
  706. ndev = NULL;
  707. if (!ndev) {
  708. ret = -ENODEV;
  709. goto out;
  710. }
  711. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
  712. n, ndev, 0);
  713. if (!csk->l2t)
  714. goto out;
  715. csk->mtu = ndev->mtu;
  716. csk->tx_chan = cxgb4_port_chan(ndev);
  717. csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
  718. cxgb4_port_viid(ndev));
  719. step = cdev->lldi.ntxq /
  720. cdev->lldi.nchan;
  721. csk->txq_idx = cxgb4_port_idx(ndev) * step;
  722. step = cdev->lldi.nrxq /
  723. cdev->lldi.nchan;
  724. csk->ctrlq_idx = cxgb4_port_idx(ndev);
  725. csk->rss_qid = cdev->lldi.rxq_ids[
  726. cxgb4_port_idx(ndev) * step];
  727. csk->port_id = cxgb4_port_idx(ndev);
  728. cxgbit_set_tcp_window(csk,
  729. (struct port_info *)netdev_priv(ndev));
  730. } else {
  731. ndev = cxgbit_get_real_dev(n->dev);
  732. if (!ndev) {
  733. ret = -ENODEV;
  734. goto out;
  735. }
  736. #ifdef CONFIG_CHELSIO_T4_DCB
  737. if (cxgbit_get_iscsi_dcb_state(ndev))
  738. priority = cxgbit_get_iscsi_dcb_priority(ndev,
  739. local_port);
  740. csk->dcb_priority = priority;
  741. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
  742. #else
  743. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
  744. #endif
  745. if (!csk->l2t)
  746. goto out;
  747. port_id = cxgb4_port_idx(ndev);
  748. csk->mtu = dst_mtu(dst);
  749. csk->tx_chan = cxgb4_port_chan(ndev);
  750. csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
  751. cxgb4_port_viid(ndev));
  752. step = cdev->lldi.ntxq /
  753. cdev->lldi.nports;
  754. csk->txq_idx = (port_id * step) +
  755. (cdev->selectq[port_id][0]++ % step);
  756. csk->ctrlq_idx = cxgb4_port_idx(ndev);
  757. step = cdev->lldi.nrxq /
  758. cdev->lldi.nports;
  759. rxq_idx = (port_id * step) +
  760. (cdev->selectq[port_id][1]++ % step);
  761. csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
  762. csk->port_id = port_id;
  763. cxgbit_set_tcp_window(csk,
  764. (struct port_info *)netdev_priv(ndev));
  765. }
  766. ret = 0;
  767. out:
  768. rcu_read_unlock();
  769. neigh_release(n);
  770. return ret;
  771. }
  772. int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
  773. {
  774. int ret = 0;
  775. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  776. kfree_skb(skb);
  777. pr_err("%s - device not up - dropping\n", __func__);
  778. return -EIO;
  779. }
  780. ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
  781. if (ret < 0)
  782. kfree_skb(skb);
  783. return ret < 0 ? ret : 0;
  784. }
  785. static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
  786. {
  787. u32 len = roundup(sizeof(struct cpl_tid_release), 16);
  788. struct sk_buff *skb;
  789. skb = alloc_skb(len, GFP_ATOMIC);
  790. if (!skb)
  791. return;
  792. cxgb_mk_tid_release(skb, len, tid, 0);
  793. cxgbit_ofld_send(cdev, skb);
  794. }
  795. int
  796. cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
  797. struct l2t_entry *l2e)
  798. {
  799. int ret = 0;
  800. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  801. kfree_skb(skb);
  802. pr_err("%s - device not up - dropping\n", __func__);
  803. return -EIO;
  804. }
  805. ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
  806. if (ret < 0)
  807. kfree_skb(skb);
  808. return ret < 0 ? ret : 0;
  809. }
  810. static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
  811. {
  812. if (csk->com.state != CSK_STATE_ESTABLISHED) {
  813. __kfree_skb(skb);
  814. return;
  815. }
  816. cxgbit_ofld_send(csk->com.cdev, skb);
  817. }
  818. /*
  819. * CPL connection rx data ack: host ->
  820. * Send RX credits through an RX_DATA_ACK CPL message.
  821. * Returns the number of credits sent.
  822. */
  823. int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
  824. {
  825. struct sk_buff *skb;
  826. u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
  827. u32 credit_dack;
  828. skb = alloc_skb(len, GFP_KERNEL);
  829. if (!skb)
  830. return -1;
  831. credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
  832. RX_CREDITS_V(csk->rx_credits);
  833. cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
  834. credit_dack);
  835. csk->rx_credits = 0;
  836. spin_lock_bh(&csk->lock);
  837. if (csk->lock_owner) {
  838. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
  839. __skb_queue_tail(&csk->backlogq, skb);
  840. spin_unlock_bh(&csk->lock);
  841. return 0;
  842. }
  843. cxgbit_send_rx_credits(csk, skb);
  844. spin_unlock_bh(&csk->lock);
  845. return 0;
  846. }
  847. #define FLOWC_WR_NPARAMS_MIN 9
  848. #define FLOWC_WR_NPARAMS_MAX 11
  849. static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
  850. {
  851. struct sk_buff *skb;
  852. u32 len, flowclen;
  853. u8 i;
  854. flowclen = offsetof(struct fw_flowc_wr,
  855. mnemval[FLOWC_WR_NPARAMS_MAX]);
  856. len = max_t(u32, sizeof(struct cpl_abort_req),
  857. sizeof(struct cpl_abort_rpl));
  858. len = max(len, flowclen);
  859. len = roundup(len, 16);
  860. for (i = 0; i < 3; i++) {
  861. skb = alloc_skb(len, GFP_ATOMIC);
  862. if (!skb)
  863. goto out;
  864. __skb_queue_tail(&csk->skbq, skb);
  865. }
  866. skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
  867. if (!skb)
  868. goto out;
  869. memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
  870. csk->lro_hskb = skb;
  871. return 0;
  872. out:
  873. __skb_queue_purge(&csk->skbq);
  874. return -ENOMEM;
  875. }
  876. static void
  877. cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
  878. {
  879. struct sk_buff *skb;
  880. const struct tcphdr *tcph;
  881. struct cpl_t5_pass_accept_rpl *rpl5;
  882. struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
  883. unsigned int len = roundup(sizeof(*rpl5), 16);
  884. unsigned int mtu_idx;
  885. u64 opt0;
  886. u32 opt2, hlen;
  887. u32 wscale;
  888. u32 win;
  889. pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
  890. skb = alloc_skb(len, GFP_ATOMIC);
  891. if (!skb) {
  892. cxgbit_put_csk(csk);
  893. return;
  894. }
  895. rpl5 = __skb_put_zero(skb, len);
  896. INIT_TP_WR(rpl5, csk->tid);
  897. OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
  898. csk->tid));
  899. cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
  900. req->tcpopt.tstamp,
  901. (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
  902. wscale = cxgb_compute_wscale(csk->rcv_win);
  903. /*
  904. * Specify the largest window that will fit in opt0. The
  905. * remainder will be specified in the rx_data_ack.
  906. */
  907. win = csk->rcv_win >> 10;
  908. if (win > RCV_BUFSIZ_M)
  909. win = RCV_BUFSIZ_M;
  910. opt0 = TCAM_BYPASS_F |
  911. WND_SCALE_V(wscale) |
  912. MSS_IDX_V(mtu_idx) |
  913. L2T_IDX_V(csk->l2t->idx) |
  914. TX_CHAN_V(csk->tx_chan) |
  915. SMAC_SEL_V(csk->smac_idx) |
  916. DSCP_V(csk->tos >> 2) |
  917. ULP_MODE_V(ULP_MODE_ISCSI) |
  918. RCV_BUFSIZ_V(win);
  919. opt2 = RX_CHANNEL_V(0) |
  920. RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
  921. if (!is_t5(lldi->adapter_type))
  922. opt2 |= RX_FC_DISABLE_F;
  923. if (req->tcpopt.tstamp)
  924. opt2 |= TSTAMPS_EN_F;
  925. if (req->tcpopt.sack)
  926. opt2 |= SACK_EN_F;
  927. if (wscale)
  928. opt2 |= WND_SCALE_EN_F;
  929. hlen = ntohl(req->hdr_len);
  930. if (is_t5(lldi->adapter_type))
  931. tcph = (struct tcphdr *)((u8 *)(req + 1) +
  932. ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
  933. else
  934. tcph = (struct tcphdr *)((u8 *)(req + 1) +
  935. T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
  936. if (tcph->ece && tcph->cwr)
  937. opt2 |= CCTRL_ECN_V(1);
  938. opt2 |= RX_COALESCE_V(3);
  939. opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
  940. opt2 |= T5_ISS_F;
  941. rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
  942. opt2 |= T5_OPT_2_VALID_F;
  943. rpl5->opt0 = cpu_to_be64(opt0);
  944. rpl5->opt2 = cpu_to_be32(opt2);
  945. set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
  946. t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
  947. cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
  948. }
  949. static void
  950. cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
  951. {
  952. struct cxgbit_sock *csk = NULL;
  953. struct cxgbit_np *cnp;
  954. struct cpl_pass_accept_req *req = cplhdr(skb);
  955. unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
  956. struct tid_info *t = cdev->lldi.tids;
  957. unsigned int tid = GET_TID(req);
  958. u16 peer_mss = ntohs(req->tcpopt.mss);
  959. unsigned short hdrs;
  960. struct dst_entry *dst;
  961. __u8 local_ip[16], peer_ip[16];
  962. __be16 local_port, peer_port;
  963. int ret;
  964. int iptype;
  965. pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
  966. __func__, cdev, stid, tid);
  967. cnp = lookup_stid(t, stid);
  968. if (!cnp) {
  969. pr_err("%s connect request on invalid stid %d\n",
  970. __func__, stid);
  971. goto rel_skb;
  972. }
  973. if (cnp->com.state != CSK_STATE_LISTEN) {
  974. pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
  975. __func__);
  976. goto reject;
  977. }
  978. csk = lookup_tid(t, tid);
  979. if (csk) {
  980. pr_err("%s csk not null tid %u\n",
  981. __func__, tid);
  982. goto rel_skb;
  983. }
  984. cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
  985. peer_ip, &local_port, &peer_port);
  986. /* Find output route */
  987. if (iptype == 4) {
  988. pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
  989. "lport %d rport %d peer_mss %d\n"
  990. , __func__, cnp, tid,
  991. local_ip, peer_ip, ntohs(local_port),
  992. ntohs(peer_port), peer_mss);
  993. dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
  994. *(__be32 *)local_ip,
  995. *(__be32 *)peer_ip,
  996. local_port, peer_port,
  997. PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
  998. } else {
  999. pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
  1000. "lport %d rport %d peer_mss %d\n"
  1001. , __func__, cnp, tid,
  1002. local_ip, peer_ip, ntohs(local_port),
  1003. ntohs(peer_port), peer_mss);
  1004. dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
  1005. local_ip, peer_ip,
  1006. local_port, peer_port,
  1007. PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
  1008. ((struct sockaddr_in6 *)
  1009. &cnp->com.local_addr)->sin6_scope_id);
  1010. }
  1011. if (!dst) {
  1012. pr_err("%s - failed to find dst entry!\n",
  1013. __func__);
  1014. goto reject;
  1015. }
  1016. csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
  1017. if (!csk) {
  1018. dst_release(dst);
  1019. goto rel_skb;
  1020. }
  1021. ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
  1022. dst, cdev);
  1023. if (ret) {
  1024. pr_err("%s - failed to allocate l2t entry!\n",
  1025. __func__);
  1026. dst_release(dst);
  1027. kfree(csk);
  1028. goto reject;
  1029. }
  1030. kref_init(&csk->kref);
  1031. init_completion(&csk->com.wr_wait.completion);
  1032. INIT_LIST_HEAD(&csk->accept_node);
  1033. hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
  1034. sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
  1035. if (peer_mss && csk->mtu > (peer_mss + hdrs))
  1036. csk->mtu = peer_mss + hdrs;
  1037. csk->com.state = CSK_STATE_CONNECTING;
  1038. csk->com.cdev = cdev;
  1039. csk->cnp = cnp;
  1040. csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
  1041. csk->dst = dst;
  1042. csk->tid = tid;
  1043. csk->wr_cred = cdev->lldi.wr_cred -
  1044. DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
  1045. csk->wr_max_cred = csk->wr_cred;
  1046. csk->wr_una_cred = 0;
  1047. if (iptype == 4) {
  1048. struct sockaddr_in *sin = (struct sockaddr_in *)
  1049. &csk->com.local_addr;
  1050. sin->sin_family = AF_INET;
  1051. sin->sin_port = local_port;
  1052. sin->sin_addr.s_addr = *(__be32 *)local_ip;
  1053. sin = (struct sockaddr_in *)&csk->com.remote_addr;
  1054. sin->sin_family = AF_INET;
  1055. sin->sin_port = peer_port;
  1056. sin->sin_addr.s_addr = *(__be32 *)peer_ip;
  1057. } else {
  1058. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  1059. &csk->com.local_addr;
  1060. sin6->sin6_family = PF_INET6;
  1061. sin6->sin6_port = local_port;
  1062. memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
  1063. cxgb4_clip_get(cdev->lldi.ports[0],
  1064. (const u32 *)&sin6->sin6_addr.s6_addr,
  1065. 1);
  1066. sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
  1067. sin6->sin6_family = PF_INET6;
  1068. sin6->sin6_port = peer_port;
  1069. memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
  1070. }
  1071. skb_queue_head_init(&csk->rxq);
  1072. skb_queue_head_init(&csk->txq);
  1073. skb_queue_head_init(&csk->ppodq);
  1074. skb_queue_head_init(&csk->backlogq);
  1075. skb_queue_head_init(&csk->skbq);
  1076. cxgbit_sock_reset_wr_list(csk);
  1077. spin_lock_init(&csk->lock);
  1078. init_waitqueue_head(&csk->waitq);
  1079. init_waitqueue_head(&csk->ack_waitq);
  1080. csk->lock_owner = false;
  1081. if (cxgbit_alloc_csk_skb(csk)) {
  1082. dst_release(dst);
  1083. kfree(csk);
  1084. goto rel_skb;
  1085. }
  1086. cxgbit_get_cdev(cdev);
  1087. spin_lock(&cdev->cskq.lock);
  1088. list_add_tail(&csk->list, &cdev->cskq.list);
  1089. spin_unlock(&cdev->cskq.lock);
  1090. cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
  1091. cxgbit_pass_accept_rpl(csk, req);
  1092. goto rel_skb;
  1093. reject:
  1094. cxgbit_release_tid(cdev, tid);
  1095. rel_skb:
  1096. __kfree_skb(skb);
  1097. }
  1098. static u32
  1099. cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
  1100. u32 *flowclenp)
  1101. {
  1102. u32 nparams, flowclen16, flowclen;
  1103. nparams = FLOWC_WR_NPARAMS_MIN;
  1104. if (csk->snd_wscale)
  1105. nparams++;
  1106. #ifdef CONFIG_CHELSIO_T4_DCB
  1107. nparams++;
  1108. #endif
  1109. flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
  1110. flowclen16 = DIV_ROUND_UP(flowclen, 16);
  1111. flowclen = flowclen16 * 16;
  1112. /*
  1113. * Return the number of 16-byte credits used by the flowc request.
  1114. * Pass back the nparams and actual flowc length if requested.
  1115. */
  1116. if (nparamsp)
  1117. *nparamsp = nparams;
  1118. if (flowclenp)
  1119. *flowclenp = flowclen;
  1120. return flowclen16;
  1121. }
  1122. u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
  1123. {
  1124. struct cxgbit_device *cdev = csk->com.cdev;
  1125. struct fw_flowc_wr *flowc;
  1126. u32 nparams, flowclen16, flowclen;
  1127. struct sk_buff *skb;
  1128. u8 index;
  1129. #ifdef CONFIG_CHELSIO_T4_DCB
  1130. u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
  1131. #endif
  1132. flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
  1133. skb = __skb_dequeue(&csk->skbq);
  1134. flowc = __skb_put_zero(skb, flowclen);
  1135. flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
  1136. FW_FLOWC_WR_NPARAMS_V(nparams));
  1137. flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
  1138. FW_WR_FLOWID_V(csk->tid));
  1139. flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
  1140. flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
  1141. (csk->com.cdev->lldi.pf));
  1142. flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
  1143. flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
  1144. flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
  1145. flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
  1146. flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
  1147. flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
  1148. flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
  1149. flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
  1150. flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
  1151. flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
  1152. flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
  1153. flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
  1154. flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
  1155. flowc->mnemval[7].val = cpu_to_be32(csk->emss);
  1156. flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
  1157. if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
  1158. flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
  1159. else
  1160. flowc->mnemval[8].val = cpu_to_be32(16384);
  1161. index = 9;
  1162. if (csk->snd_wscale) {
  1163. flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
  1164. flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
  1165. index++;
  1166. }
  1167. #ifdef CONFIG_CHELSIO_T4_DCB
  1168. flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
  1169. if (vlan == VLAN_NONE) {
  1170. pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
  1171. flowc->mnemval[index].val = cpu_to_be32(0);
  1172. } else
  1173. flowc->mnemval[index].val = cpu_to_be32(
  1174. (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
  1175. #endif
  1176. pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
  1177. " rcv_seq = %u; snd_win = %u; emss = %u\n",
  1178. __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
  1179. csk->rcv_nxt, csk->snd_win, csk->emss);
  1180. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
  1181. cxgbit_ofld_send(csk->com.cdev, skb);
  1182. return flowclen16;
  1183. }
  1184. int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
  1185. {
  1186. struct sk_buff *skb;
  1187. struct cpl_set_tcb_field *req;
  1188. u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
  1189. u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
  1190. unsigned int len = roundup(sizeof(*req), 16);
  1191. int ret;
  1192. skb = alloc_skb(len, GFP_KERNEL);
  1193. if (!skb)
  1194. return -ENOMEM;
  1195. /* set up ulp submode */
  1196. req = __skb_put_zero(skb, len);
  1197. INIT_TP_WR(req, csk->tid);
  1198. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
  1199. req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
  1200. req->word_cookie = htons(0);
  1201. req->mask = cpu_to_be64(0x3 << 4);
  1202. req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
  1203. (dcrc ? ULP_CRC_DATA : 0)) << 4);
  1204. set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
  1205. cxgbit_get_csk(csk);
  1206. cxgbit_init_wr_wait(&csk->com.wr_wait);
  1207. cxgbit_ofld_send(csk->com.cdev, skb);
  1208. ret = cxgbit_wait_for_reply(csk->com.cdev,
  1209. &csk->com.wr_wait,
  1210. csk->tid, 5, __func__);
  1211. if (ret)
  1212. return -1;
  1213. return 0;
  1214. }
  1215. int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
  1216. {
  1217. struct sk_buff *skb;
  1218. struct cpl_set_tcb_field *req;
  1219. unsigned int len = roundup(sizeof(*req), 16);
  1220. int ret;
  1221. skb = alloc_skb(len, GFP_KERNEL);
  1222. if (!skb)
  1223. return -ENOMEM;
  1224. req = __skb_put_zero(skb, len);
  1225. INIT_TP_WR(req, csk->tid);
  1226. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
  1227. req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
  1228. req->word_cookie = htons(0);
  1229. req->mask = cpu_to_be64(0x3 << 8);
  1230. req->val = cpu_to_be64(pg_idx << 8);
  1231. set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
  1232. cxgbit_get_csk(csk);
  1233. cxgbit_init_wr_wait(&csk->com.wr_wait);
  1234. cxgbit_ofld_send(csk->com.cdev, skb);
  1235. ret = cxgbit_wait_for_reply(csk->com.cdev,
  1236. &csk->com.wr_wait,
  1237. csk->tid, 5, __func__);
  1238. if (ret)
  1239. return -1;
  1240. return 0;
  1241. }
  1242. static void
  1243. cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1244. {
  1245. struct cpl_pass_open_rpl *rpl = cplhdr(skb);
  1246. struct tid_info *t = cdev->lldi.tids;
  1247. unsigned int stid = GET_TID(rpl);
  1248. struct cxgbit_np *cnp = lookup_stid(t, stid);
  1249. pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
  1250. __func__, cnp, stid, rpl->status);
  1251. if (!cnp) {
  1252. pr_info("%s stid %d lookup failure\n", __func__, stid);
  1253. return;
  1254. }
  1255. cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
  1256. cxgbit_put_cnp(cnp);
  1257. }
  1258. static void
  1259. cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1260. {
  1261. struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
  1262. struct tid_info *t = cdev->lldi.tids;
  1263. unsigned int stid = GET_TID(rpl);
  1264. struct cxgbit_np *cnp = lookup_stid(t, stid);
  1265. pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
  1266. __func__, cnp, stid, rpl->status);
  1267. if (!cnp) {
  1268. pr_info("%s stid %d lookup failure\n", __func__, stid);
  1269. return;
  1270. }
  1271. cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
  1272. cxgbit_put_cnp(cnp);
  1273. }
  1274. static void
  1275. cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
  1276. {
  1277. struct cpl_pass_establish *req = cplhdr(skb);
  1278. struct tid_info *t = cdev->lldi.tids;
  1279. unsigned int tid = GET_TID(req);
  1280. struct cxgbit_sock *csk;
  1281. struct cxgbit_np *cnp;
  1282. u16 tcp_opt = be16_to_cpu(req->tcp_opt);
  1283. u32 snd_isn = be32_to_cpu(req->snd_isn);
  1284. u32 rcv_isn = be32_to_cpu(req->rcv_isn);
  1285. csk = lookup_tid(t, tid);
  1286. if (unlikely(!csk)) {
  1287. pr_err("can't find connection for tid %u.\n", tid);
  1288. goto rel_skb;
  1289. }
  1290. cnp = csk->cnp;
  1291. pr_debug("%s: csk %p; tid %u; cnp %p\n",
  1292. __func__, csk, tid, cnp);
  1293. csk->write_seq = snd_isn;
  1294. csk->snd_una = snd_isn;
  1295. csk->snd_nxt = snd_isn;
  1296. csk->rcv_nxt = rcv_isn;
  1297. if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
  1298. csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
  1299. csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
  1300. cxgbit_set_emss(csk, tcp_opt);
  1301. dst_confirm(csk->dst);
  1302. csk->com.state = CSK_STATE_ESTABLISHED;
  1303. spin_lock_bh(&cnp->np_accept_lock);
  1304. list_add_tail(&csk->accept_node, &cnp->np_accept_list);
  1305. spin_unlock_bh(&cnp->np_accept_lock);
  1306. complete(&cnp->accept_comp);
  1307. rel_skb:
  1308. __kfree_skb(skb);
  1309. }
  1310. static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1311. {
  1312. cxgbit_skcb_flags(skb) = 0;
  1313. spin_lock_bh(&csk->rxq.lock);
  1314. __skb_queue_tail(&csk->rxq, skb);
  1315. spin_unlock_bh(&csk->rxq.lock);
  1316. wake_up(&csk->waitq);
  1317. }
  1318. static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
  1319. {
  1320. pr_debug("%s: csk %p; tid %u; state %d\n",
  1321. __func__, csk, csk->tid, csk->com.state);
  1322. switch (csk->com.state) {
  1323. case CSK_STATE_ESTABLISHED:
  1324. csk->com.state = CSK_STATE_CLOSING;
  1325. cxgbit_queue_rx_skb(csk, skb);
  1326. return;
  1327. case CSK_STATE_CLOSING:
  1328. /* simultaneous close */
  1329. csk->com.state = CSK_STATE_MORIBUND;
  1330. break;
  1331. case CSK_STATE_MORIBUND:
  1332. csk->com.state = CSK_STATE_DEAD;
  1333. cxgbit_put_csk(csk);
  1334. break;
  1335. case CSK_STATE_ABORTING:
  1336. break;
  1337. default:
  1338. pr_info("%s: cpl_peer_close in bad state %d\n",
  1339. __func__, csk->com.state);
  1340. }
  1341. __kfree_skb(skb);
  1342. }
  1343. static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1344. {
  1345. pr_debug("%s: csk %p; tid %u; state %d\n",
  1346. __func__, csk, csk->tid, csk->com.state);
  1347. switch (csk->com.state) {
  1348. case CSK_STATE_CLOSING:
  1349. csk->com.state = CSK_STATE_MORIBUND;
  1350. break;
  1351. case CSK_STATE_MORIBUND:
  1352. csk->com.state = CSK_STATE_DEAD;
  1353. cxgbit_put_csk(csk);
  1354. break;
  1355. case CSK_STATE_ABORTING:
  1356. case CSK_STATE_DEAD:
  1357. break;
  1358. default:
  1359. pr_info("%s: cpl_close_con_rpl in bad state %d\n",
  1360. __func__, csk->com.state);
  1361. }
  1362. __kfree_skb(skb);
  1363. }
  1364. static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
  1365. {
  1366. struct cpl_abort_req_rss *hdr = cplhdr(skb);
  1367. unsigned int tid = GET_TID(hdr);
  1368. struct sk_buff *rpl_skb;
  1369. bool release = false;
  1370. bool wakeup_thread = false;
  1371. u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
  1372. pr_debug("%s: csk %p; tid %u; state %d\n",
  1373. __func__, csk, tid, csk->com.state);
  1374. if (cxgb_is_neg_adv(hdr->status)) {
  1375. pr_err("%s: got neg advise %d on tid %u\n",
  1376. __func__, hdr->status, tid);
  1377. goto rel_skb;
  1378. }
  1379. switch (csk->com.state) {
  1380. case CSK_STATE_CONNECTING:
  1381. case CSK_STATE_MORIBUND:
  1382. csk->com.state = CSK_STATE_DEAD;
  1383. release = true;
  1384. break;
  1385. case CSK_STATE_ESTABLISHED:
  1386. csk->com.state = CSK_STATE_DEAD;
  1387. wakeup_thread = true;
  1388. break;
  1389. case CSK_STATE_CLOSING:
  1390. csk->com.state = CSK_STATE_DEAD;
  1391. if (!csk->conn)
  1392. release = true;
  1393. break;
  1394. case CSK_STATE_ABORTING:
  1395. break;
  1396. default:
  1397. pr_info("%s: cpl_abort_req_rss in bad state %d\n",
  1398. __func__, csk->com.state);
  1399. csk->com.state = CSK_STATE_DEAD;
  1400. }
  1401. __skb_queue_purge(&csk->txq);
  1402. if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
  1403. cxgbit_send_tx_flowc_wr(csk);
  1404. rpl_skb = __skb_dequeue(&csk->skbq);
  1405. cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
  1406. cxgbit_ofld_send(csk->com.cdev, rpl_skb);
  1407. if (wakeup_thread) {
  1408. cxgbit_queue_rx_skb(csk, skb);
  1409. return;
  1410. }
  1411. if (release)
  1412. cxgbit_put_csk(csk);
  1413. rel_skb:
  1414. __kfree_skb(skb);
  1415. }
  1416. static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
  1417. {
  1418. pr_debug("%s: csk %p; tid %u; state %d\n",
  1419. __func__, csk, csk->tid, csk->com.state);
  1420. switch (csk->com.state) {
  1421. case CSK_STATE_ABORTING:
  1422. csk->com.state = CSK_STATE_DEAD;
  1423. cxgbit_put_csk(csk);
  1424. break;
  1425. default:
  1426. pr_info("%s: cpl_abort_rpl_rss in state %d\n",
  1427. __func__, csk->com.state);
  1428. }
  1429. __kfree_skb(skb);
  1430. }
  1431. static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
  1432. {
  1433. const struct sk_buff *skb = csk->wr_pending_head;
  1434. u32 credit = 0;
  1435. if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
  1436. pr_err("csk 0x%p, tid %u, credit %u > %u\n",
  1437. csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
  1438. return true;
  1439. }
  1440. while (skb) {
  1441. credit += (__force u32)skb->csum;
  1442. skb = cxgbit_skcb_tx_wr_next(skb);
  1443. }
  1444. if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
  1445. pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
  1446. csk, csk->tid, csk->wr_cred,
  1447. credit, csk->wr_max_cred);
  1448. return true;
  1449. }
  1450. return false;
  1451. }
  1452. static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
  1453. {
  1454. struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
  1455. u32 credits = rpl->credits;
  1456. u32 snd_una = ntohl(rpl->snd_una);
  1457. csk->wr_cred += credits;
  1458. if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
  1459. csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
  1460. while (credits) {
  1461. struct sk_buff *p = cxgbit_sock_peek_wr(csk);
  1462. const u32 csum = (__force u32)p->csum;
  1463. if (unlikely(!p)) {
  1464. pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
  1465. csk, csk->tid, credits,
  1466. csk->wr_cred, csk->wr_una_cred);
  1467. break;
  1468. }
  1469. if (unlikely(credits < csum)) {
  1470. pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
  1471. csk, csk->tid,
  1472. credits, csk->wr_cred, csk->wr_una_cred,
  1473. csum);
  1474. p->csum = (__force __wsum)(csum - credits);
  1475. break;
  1476. }
  1477. cxgbit_sock_dequeue_wr(csk);
  1478. credits -= csum;
  1479. kfree_skb(p);
  1480. }
  1481. if (unlikely(cxgbit_credit_err(csk))) {
  1482. cxgbit_queue_rx_skb(csk, skb);
  1483. return;
  1484. }
  1485. if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
  1486. if (unlikely(before(snd_una, csk->snd_una))) {
  1487. pr_warn("csk 0x%p,%u, snd_una %u/%u.",
  1488. csk, csk->tid, snd_una,
  1489. csk->snd_una);
  1490. goto rel_skb;
  1491. }
  1492. if (csk->snd_una != snd_una) {
  1493. csk->snd_una = snd_una;
  1494. dst_confirm(csk->dst);
  1495. wake_up(&csk->ack_waitq);
  1496. }
  1497. }
  1498. if (skb_queue_len(&csk->txq))
  1499. cxgbit_push_tx_frames(csk);
  1500. rel_skb:
  1501. __kfree_skb(skb);
  1502. }
  1503. static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1504. {
  1505. struct cxgbit_sock *csk;
  1506. struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
  1507. unsigned int tid = GET_TID(rpl);
  1508. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1509. struct tid_info *t = lldi->tids;
  1510. csk = lookup_tid(t, tid);
  1511. if (unlikely(!csk))
  1512. pr_err("can't find connection for tid %u.\n", tid);
  1513. else
  1514. cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
  1515. cxgbit_put_csk(csk);
  1516. }
  1517. static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
  1518. {
  1519. struct cxgbit_sock *csk;
  1520. struct cpl_rx_data *cpl = cplhdr(skb);
  1521. unsigned int tid = GET_TID(cpl);
  1522. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1523. struct tid_info *t = lldi->tids;
  1524. csk = lookup_tid(t, tid);
  1525. if (unlikely(!csk)) {
  1526. pr_err("can't find conn. for tid %u.\n", tid);
  1527. goto rel_skb;
  1528. }
  1529. cxgbit_queue_rx_skb(csk, skb);
  1530. return;
  1531. rel_skb:
  1532. __kfree_skb(skb);
  1533. }
  1534. static void
  1535. __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1536. {
  1537. spin_lock(&csk->lock);
  1538. if (csk->lock_owner) {
  1539. __skb_queue_tail(&csk->backlogq, skb);
  1540. spin_unlock(&csk->lock);
  1541. return;
  1542. }
  1543. cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
  1544. spin_unlock(&csk->lock);
  1545. }
  1546. static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1547. {
  1548. cxgbit_get_csk(csk);
  1549. __cxgbit_process_rx_cpl(csk, skb);
  1550. cxgbit_put_csk(csk);
  1551. }
  1552. static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1553. {
  1554. struct cxgbit_sock *csk;
  1555. struct cpl_tx_data *cpl = cplhdr(skb);
  1556. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1557. struct tid_info *t = lldi->tids;
  1558. unsigned int tid = GET_TID(cpl);
  1559. u8 opcode = cxgbit_skcb_rx_opcode(skb);
  1560. bool ref = true;
  1561. switch (opcode) {
  1562. case CPL_FW4_ACK:
  1563. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
  1564. ref = false;
  1565. break;
  1566. case CPL_PEER_CLOSE:
  1567. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
  1568. break;
  1569. case CPL_CLOSE_CON_RPL:
  1570. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
  1571. break;
  1572. case CPL_ABORT_REQ_RSS:
  1573. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
  1574. break;
  1575. case CPL_ABORT_RPL_RSS:
  1576. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
  1577. break;
  1578. default:
  1579. goto rel_skb;
  1580. }
  1581. csk = lookup_tid(t, tid);
  1582. if (unlikely(!csk)) {
  1583. pr_err("can't find conn. for tid %u.\n", tid);
  1584. goto rel_skb;
  1585. }
  1586. if (ref)
  1587. cxgbit_process_rx_cpl(csk, skb);
  1588. else
  1589. __cxgbit_process_rx_cpl(csk, skb);
  1590. return;
  1591. rel_skb:
  1592. __kfree_skb(skb);
  1593. }
  1594. cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
  1595. [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl,
  1596. [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
  1597. [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req,
  1598. [CPL_PASS_ESTABLISH] = cxgbit_pass_establish,
  1599. [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl,
  1600. [CPL_RX_DATA] = cxgbit_rx_data,
  1601. [CPL_FW4_ACK] = cxgbit_rx_cpl,
  1602. [CPL_PEER_CLOSE] = cxgbit_rx_cpl,
  1603. [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl,
  1604. [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl,
  1605. [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl,
  1606. };