cxgbit_cm.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003
  1. /*
  2. * Copyright (c) 2016 Chelsio Communications, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/list.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/timer.h>
  13. #include <linux/notifier.h>
  14. #include <linux/inetdevice.h>
  15. #include <linux/ip.h>
  16. #include <linux/tcp.h>
  17. #include <linux/if_vlan.h>
  18. #include <net/neighbour.h>
  19. #include <net/netevent.h>
  20. #include <net/route.h>
  21. #include <net/tcp.h>
  22. #include <net/ip6_route.h>
  23. #include <net/addrconf.h>
  24. #include <libcxgb_cm.h>
  25. #include "cxgbit.h"
  26. #include "clip_tbl.h"
  27. static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  28. {
  29. wr_waitp->ret = 0;
  30. reinit_completion(&wr_waitp->completion);
  31. }
  32. static void
  33. cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  34. {
  35. if (ret == CPL_ERR_NONE)
  36. wr_waitp->ret = 0;
  37. else
  38. wr_waitp->ret = -EIO;
  39. if (wr_waitp->ret)
  40. pr_err("%s: err:%u", func, ret);
  41. complete(&wr_waitp->completion);
  42. }
  43. static int
  44. cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  45. struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  46. const char *func)
  47. {
  48. int ret;
  49. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  50. wr_waitp->ret = -EIO;
  51. goto out;
  52. }
  53. ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  54. if (!ret) {
  55. pr_info("%s - Device %s not responding tid %u\n",
  56. func, pci_name(cdev->lldi.pdev), tid);
  57. wr_waitp->ret = -ETIMEDOUT;
  58. }
  59. out:
  60. if (wr_waitp->ret)
  61. pr_info("%s: FW reply %d tid %u\n",
  62. pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  63. return wr_waitp->ret;
  64. }
  65. static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  66. {
  67. return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  68. }
  69. static struct np_info *
  70. cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  71. unsigned int stid)
  72. {
  73. struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  74. if (p) {
  75. int bucket = cxgbit_np_hashfn(cnp);
  76. p->cnp = cnp;
  77. p->stid = stid;
  78. spin_lock(&cdev->np_lock);
  79. p->next = cdev->np_hash_tab[bucket];
  80. cdev->np_hash_tab[bucket] = p;
  81. spin_unlock(&cdev->np_lock);
  82. }
  83. return p;
  84. }
  85. static int
  86. cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  87. {
  88. int stid = -1, bucket = cxgbit_np_hashfn(cnp);
  89. struct np_info *p;
  90. spin_lock(&cdev->np_lock);
  91. for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
  92. if (p->cnp == cnp) {
  93. stid = p->stid;
  94. break;
  95. }
  96. }
  97. spin_unlock(&cdev->np_lock);
  98. return stid;
  99. }
  100. static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  101. {
  102. int stid = -1, bucket = cxgbit_np_hashfn(cnp);
  103. struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
  104. spin_lock(&cdev->np_lock);
  105. for (p = *prev; p; prev = &p->next, p = p->next) {
  106. if (p->cnp == cnp) {
  107. stid = p->stid;
  108. *prev = p->next;
  109. kfree(p);
  110. break;
  111. }
  112. }
  113. spin_unlock(&cdev->np_lock);
  114. return stid;
  115. }
  116. void _cxgbit_free_cnp(struct kref *kref)
  117. {
  118. struct cxgbit_np *cnp;
  119. cnp = container_of(kref, struct cxgbit_np, kref);
  120. kfree(cnp);
  121. }
  122. static int
  123. cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
  124. struct cxgbit_np *cnp)
  125. {
  126. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  127. &cnp->com.local_addr;
  128. int addr_type;
  129. int ret;
  130. pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
  131. __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
  132. addr_type = ipv6_addr_type((const struct in6_addr *)
  133. &sin6->sin6_addr);
  134. if (addr_type != IPV6_ADDR_ANY) {
  135. ret = cxgb4_clip_get(cdev->lldi.ports[0],
  136. (const u32 *)&sin6->sin6_addr.s6_addr, 1);
  137. if (ret) {
  138. pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
  139. sin6->sin6_addr.s6_addr, ret);
  140. return -ENOMEM;
  141. }
  142. }
  143. cxgbit_get_cnp(cnp);
  144. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  145. ret = cxgb4_create_server6(cdev->lldi.ports[0],
  146. stid, &sin6->sin6_addr,
  147. sin6->sin6_port,
  148. cdev->lldi.rxq_ids[0]);
  149. if (!ret)
  150. ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
  151. 0, 10, __func__);
  152. else if (ret > 0)
  153. ret = net_xmit_errno(ret);
  154. else
  155. cxgbit_put_cnp(cnp);
  156. if (ret) {
  157. if (ret != -ETIMEDOUT)
  158. cxgb4_clip_release(cdev->lldi.ports[0],
  159. (const u32 *)&sin6->sin6_addr.s6_addr, 1);
  160. pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
  161. ret, stid, sin6->sin6_addr.s6_addr,
  162. ntohs(sin6->sin6_port));
  163. }
  164. return ret;
  165. }
  166. static int
  167. cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
  168. struct cxgbit_np *cnp)
  169. {
  170. struct sockaddr_in *sin = (struct sockaddr_in *)
  171. &cnp->com.local_addr;
  172. int ret;
  173. pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
  174. __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
  175. cxgbit_get_cnp(cnp);
  176. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  177. ret = cxgb4_create_server(cdev->lldi.ports[0],
  178. stid, sin->sin_addr.s_addr,
  179. sin->sin_port, 0,
  180. cdev->lldi.rxq_ids[0]);
  181. if (!ret)
  182. ret = cxgbit_wait_for_reply(cdev,
  183. &cnp->com.wr_wait,
  184. 0, 10, __func__);
  185. else if (ret > 0)
  186. ret = net_xmit_errno(ret);
  187. else
  188. cxgbit_put_cnp(cnp);
  189. if (ret)
  190. pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
  191. ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
  192. return ret;
  193. }
  194. struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
  195. {
  196. struct cxgbit_device *cdev;
  197. u8 i;
  198. list_for_each_entry(cdev, &cdev_list_head, list) {
  199. struct cxgb4_lld_info *lldi = &cdev->lldi;
  200. for (i = 0; i < lldi->nports; i++) {
  201. if (lldi->ports[i] == ndev) {
  202. if (port_id)
  203. *port_id = i;
  204. return cdev;
  205. }
  206. }
  207. }
  208. return NULL;
  209. }
  210. static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
  211. {
  212. if (ndev->priv_flags & IFF_BONDING) {
  213. pr_err("Bond devices are not supported. Interface:%s\n",
  214. ndev->name);
  215. return NULL;
  216. }
  217. if (is_vlan_dev(ndev))
  218. return vlan_dev_real_dev(ndev);
  219. return ndev;
  220. }
  221. static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
  222. {
  223. struct net_device *ndev;
  224. ndev = __ip_dev_find(&init_net, saddr, false);
  225. if (!ndev)
  226. return NULL;
  227. return cxgbit_get_real_dev(ndev);
  228. }
  229. static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
  230. {
  231. struct net_device *ndev = NULL;
  232. bool found = false;
  233. if (IS_ENABLED(CONFIG_IPV6)) {
  234. for_each_netdev_rcu(&init_net, ndev)
  235. if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
  236. found = true;
  237. break;
  238. }
  239. }
  240. if (!found)
  241. return NULL;
  242. return cxgbit_get_real_dev(ndev);
  243. }
  244. static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
  245. {
  246. struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
  247. int ss_family = sockaddr->ss_family;
  248. struct net_device *ndev = NULL;
  249. struct cxgbit_device *cdev = NULL;
  250. rcu_read_lock();
  251. if (ss_family == AF_INET) {
  252. struct sockaddr_in *sin;
  253. sin = (struct sockaddr_in *)sockaddr;
  254. ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
  255. } else if (ss_family == AF_INET6) {
  256. struct sockaddr_in6 *sin6;
  257. sin6 = (struct sockaddr_in6 *)sockaddr;
  258. ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
  259. }
  260. if (!ndev)
  261. goto out;
  262. cdev = cxgbit_find_device(ndev, NULL);
  263. out:
  264. rcu_read_unlock();
  265. return cdev;
  266. }
  267. static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
  268. {
  269. struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
  270. int ss_family = sockaddr->ss_family;
  271. int addr_type;
  272. if (ss_family == AF_INET) {
  273. struct sockaddr_in *sin;
  274. sin = (struct sockaddr_in *)sockaddr;
  275. if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
  276. return true;
  277. } else if (ss_family == AF_INET6) {
  278. struct sockaddr_in6 *sin6;
  279. sin6 = (struct sockaddr_in6 *)sockaddr;
  280. addr_type = ipv6_addr_type((const struct in6_addr *)
  281. &sin6->sin6_addr);
  282. if (addr_type == IPV6_ADDR_ANY)
  283. return true;
  284. }
  285. return false;
  286. }
  287. static int
  288. __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  289. {
  290. int stid, ret;
  291. int ss_family = cnp->com.local_addr.ss_family;
  292. if (!test_bit(CDEV_STATE_UP, &cdev->flags))
  293. return -EINVAL;
  294. stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
  295. if (stid < 0)
  296. return -EINVAL;
  297. if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
  298. cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
  299. return -EINVAL;
  300. }
  301. if (ss_family == AF_INET)
  302. ret = cxgbit_create_server4(cdev, stid, cnp);
  303. else
  304. ret = cxgbit_create_server6(cdev, stid, cnp);
  305. if (ret) {
  306. if (ret != -ETIMEDOUT)
  307. cxgb4_free_stid(cdev->lldi.tids, stid,
  308. ss_family);
  309. cxgbit_np_hash_del(cdev, cnp);
  310. return ret;
  311. }
  312. return ret;
  313. }
  314. static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
  315. {
  316. struct cxgbit_device *cdev;
  317. int ret = -1;
  318. mutex_lock(&cdev_list_lock);
  319. cdev = cxgbit_find_np_cdev(cnp);
  320. if (!cdev)
  321. goto out;
  322. if (cxgbit_np_hash_find(cdev, cnp) >= 0)
  323. goto out;
  324. if (__cxgbit_setup_cdev_np(cdev, cnp))
  325. goto out;
  326. cnp->com.cdev = cdev;
  327. ret = 0;
  328. out:
  329. mutex_unlock(&cdev_list_lock);
  330. return ret;
  331. }
  332. static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
  333. {
  334. struct cxgbit_device *cdev;
  335. int ret;
  336. u32 count = 0;
  337. mutex_lock(&cdev_list_lock);
  338. list_for_each_entry(cdev, &cdev_list_head, list) {
  339. if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
  340. mutex_unlock(&cdev_list_lock);
  341. return -1;
  342. }
  343. }
  344. list_for_each_entry(cdev, &cdev_list_head, list) {
  345. ret = __cxgbit_setup_cdev_np(cdev, cnp);
  346. if (ret == -ETIMEDOUT)
  347. break;
  348. if (ret != 0)
  349. continue;
  350. count++;
  351. }
  352. mutex_unlock(&cdev_list_lock);
  353. return count ? 0 : -1;
  354. }
  355. int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
  356. {
  357. struct cxgbit_np *cnp;
  358. int ret;
  359. if ((ksockaddr->ss_family != AF_INET) &&
  360. (ksockaddr->ss_family != AF_INET6))
  361. return -EINVAL;
  362. cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
  363. if (!cnp)
  364. return -ENOMEM;
  365. init_waitqueue_head(&cnp->accept_wait);
  366. init_completion(&cnp->com.wr_wait.completion);
  367. init_completion(&cnp->accept_comp);
  368. INIT_LIST_HEAD(&cnp->np_accept_list);
  369. spin_lock_init(&cnp->np_accept_lock);
  370. kref_init(&cnp->kref);
  371. memcpy(&np->np_sockaddr, ksockaddr,
  372. sizeof(struct sockaddr_storage));
  373. memcpy(&cnp->com.local_addr, &np->np_sockaddr,
  374. sizeof(cnp->com.local_addr));
  375. cnp->np = np;
  376. cnp->com.cdev = NULL;
  377. if (cxgbit_inaddr_any(cnp))
  378. ret = cxgbit_setup_all_np(cnp);
  379. else
  380. ret = cxgbit_setup_cdev_np(cnp);
  381. if (ret) {
  382. cxgbit_put_cnp(cnp);
  383. return -EINVAL;
  384. }
  385. np->np_context = cnp;
  386. cnp->com.state = CSK_STATE_LISTEN;
  387. return 0;
  388. }
  389. static void
  390. cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
  391. struct cxgbit_sock *csk)
  392. {
  393. conn->login_family = np->np_sockaddr.ss_family;
  394. conn->login_sockaddr = csk->com.remote_addr;
  395. conn->local_sockaddr = csk->com.local_addr;
  396. }
  397. int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
  398. {
  399. struct cxgbit_np *cnp = np->np_context;
  400. struct cxgbit_sock *csk;
  401. int ret = 0;
  402. accept_wait:
  403. ret = wait_for_completion_interruptible(&cnp->accept_comp);
  404. if (ret)
  405. return -ENODEV;
  406. spin_lock_bh(&np->np_thread_lock);
  407. if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
  408. spin_unlock_bh(&np->np_thread_lock);
  409. /**
  410. * No point in stalling here when np_thread
  411. * is in state RESET/SHUTDOWN/EXIT - bail
  412. **/
  413. return -ENODEV;
  414. }
  415. spin_unlock_bh(&np->np_thread_lock);
  416. spin_lock_bh(&cnp->np_accept_lock);
  417. if (list_empty(&cnp->np_accept_list)) {
  418. spin_unlock_bh(&cnp->np_accept_lock);
  419. goto accept_wait;
  420. }
  421. csk = list_first_entry(&cnp->np_accept_list,
  422. struct cxgbit_sock,
  423. accept_node);
  424. list_del_init(&csk->accept_node);
  425. spin_unlock_bh(&cnp->np_accept_lock);
  426. conn->context = csk;
  427. csk->conn = conn;
  428. cxgbit_set_conn_info(np, conn, csk);
  429. return 0;
  430. }
  431. static int
  432. __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  433. {
  434. int stid, ret;
  435. bool ipv6 = false;
  436. stid = cxgbit_np_hash_del(cdev, cnp);
  437. if (stid < 0)
  438. return -EINVAL;
  439. if (!test_bit(CDEV_STATE_UP, &cdev->flags))
  440. return -EINVAL;
  441. if (cnp->np->np_sockaddr.ss_family == AF_INET6)
  442. ipv6 = true;
  443. cxgbit_get_cnp(cnp);
  444. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  445. ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
  446. cdev->lldi.rxq_ids[0], ipv6);
  447. if (ret > 0)
  448. ret = net_xmit_errno(ret);
  449. if (ret) {
  450. cxgbit_put_cnp(cnp);
  451. return ret;
  452. }
  453. ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
  454. 0, 10, __func__);
  455. if (ret == -ETIMEDOUT)
  456. return ret;
  457. if (ipv6 && cnp->com.cdev) {
  458. struct sockaddr_in6 *sin6;
  459. sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
  460. cxgb4_clip_release(cdev->lldi.ports[0],
  461. (const u32 *)&sin6->sin6_addr.s6_addr,
  462. 1);
  463. }
  464. cxgb4_free_stid(cdev->lldi.tids, stid,
  465. cnp->com.local_addr.ss_family);
  466. return 0;
  467. }
  468. static void cxgbit_free_all_np(struct cxgbit_np *cnp)
  469. {
  470. struct cxgbit_device *cdev;
  471. int ret;
  472. mutex_lock(&cdev_list_lock);
  473. list_for_each_entry(cdev, &cdev_list_head, list) {
  474. ret = __cxgbit_free_cdev_np(cdev, cnp);
  475. if (ret == -ETIMEDOUT)
  476. break;
  477. }
  478. mutex_unlock(&cdev_list_lock);
  479. }
  480. static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
  481. {
  482. struct cxgbit_device *cdev;
  483. bool found = false;
  484. mutex_lock(&cdev_list_lock);
  485. list_for_each_entry(cdev, &cdev_list_head, list) {
  486. if (cdev == cnp->com.cdev) {
  487. found = true;
  488. break;
  489. }
  490. }
  491. if (!found)
  492. goto out;
  493. __cxgbit_free_cdev_np(cdev, cnp);
  494. out:
  495. mutex_unlock(&cdev_list_lock);
  496. }
  497. static void __cxgbit_free_conn(struct cxgbit_sock *csk);
  498. void cxgbit_free_np(struct iscsi_np *np)
  499. {
  500. struct cxgbit_np *cnp = np->np_context;
  501. struct cxgbit_sock *csk, *tmp;
  502. cnp->com.state = CSK_STATE_DEAD;
  503. if (cnp->com.cdev)
  504. cxgbit_free_cdev_np(cnp);
  505. else
  506. cxgbit_free_all_np(cnp);
  507. spin_lock_bh(&cnp->np_accept_lock);
  508. list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
  509. list_del_init(&csk->accept_node);
  510. __cxgbit_free_conn(csk);
  511. }
  512. spin_unlock_bh(&cnp->np_accept_lock);
  513. np->np_context = NULL;
  514. cxgbit_put_cnp(cnp);
  515. }
  516. static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
  517. {
  518. struct sk_buff *skb;
  519. u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
  520. skb = alloc_skb(len, GFP_ATOMIC);
  521. if (!skb)
  522. return;
  523. cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
  524. NULL, NULL);
  525. cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
  526. __skb_queue_tail(&csk->txq, skb);
  527. cxgbit_push_tx_frames(csk);
  528. }
  529. static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
  530. {
  531. pr_debug("%s cxgbit_device %p\n", __func__, handle);
  532. kfree_skb(skb);
  533. }
  534. static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
  535. {
  536. struct cxgbit_device *cdev = handle;
  537. struct cpl_abort_req *req = cplhdr(skb);
  538. pr_debug("%s cdev %p\n", __func__, cdev);
  539. req->cmd = CPL_ABORT_NO_RST;
  540. cxgbit_ofld_send(cdev, skb);
  541. }
  542. static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
  543. {
  544. struct sk_buff *skb;
  545. u32 len = roundup(sizeof(struct cpl_abort_req), 16);
  546. pr_debug("%s: csk %p tid %u; state %d\n",
  547. __func__, csk, csk->tid, csk->com.state);
  548. __skb_queue_purge(&csk->txq);
  549. if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
  550. cxgbit_send_tx_flowc_wr(csk);
  551. skb = __skb_dequeue(&csk->skbq);
  552. cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
  553. csk->com.cdev, cxgbit_abort_arp_failure);
  554. return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
  555. }
  556. static void
  557. __cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
  558. {
  559. __kfree_skb(skb);
  560. if (csk->com.state != CSK_STATE_ESTABLISHED)
  561. goto no_abort;
  562. set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
  563. csk->com.state = CSK_STATE_ABORTING;
  564. cxgbit_send_abort_req(csk);
  565. return;
  566. no_abort:
  567. cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
  568. cxgbit_put_csk(csk);
  569. }
  570. void cxgbit_abort_conn(struct cxgbit_sock *csk)
  571. {
  572. struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
  573. cxgbit_get_csk(csk);
  574. cxgbit_init_wr_wait(&csk->com.wr_wait);
  575. spin_lock_bh(&csk->lock);
  576. if (csk->lock_owner) {
  577. cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
  578. __skb_queue_tail(&csk->backlogq, skb);
  579. } else {
  580. __cxgbit_abort_conn(csk, skb);
  581. }
  582. spin_unlock_bh(&csk->lock);
  583. cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
  584. csk->tid, 600, __func__);
  585. }
  586. static void __cxgbit_free_conn(struct cxgbit_sock *csk)
  587. {
  588. struct iscsi_conn *conn = csk->conn;
  589. bool release = false;
  590. pr_debug("%s: state %d\n",
  591. __func__, csk->com.state);
  592. spin_lock_bh(&csk->lock);
  593. switch (csk->com.state) {
  594. case CSK_STATE_ESTABLISHED:
  595. if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
  596. csk->com.state = CSK_STATE_CLOSING;
  597. cxgbit_send_halfclose(csk);
  598. } else {
  599. csk->com.state = CSK_STATE_ABORTING;
  600. cxgbit_send_abort_req(csk);
  601. }
  602. break;
  603. case CSK_STATE_CLOSING:
  604. csk->com.state = CSK_STATE_MORIBUND;
  605. cxgbit_send_halfclose(csk);
  606. break;
  607. case CSK_STATE_DEAD:
  608. release = true;
  609. break;
  610. default:
  611. pr_err("%s: csk %p; state %d\n",
  612. __func__, csk, csk->com.state);
  613. }
  614. spin_unlock_bh(&csk->lock);
  615. if (release)
  616. cxgbit_put_csk(csk);
  617. }
  618. void cxgbit_free_conn(struct iscsi_conn *conn)
  619. {
  620. __cxgbit_free_conn(conn->context);
  621. }
  622. static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
  623. {
  624. csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
  625. ((csk->com.remote_addr.ss_family == AF_INET) ?
  626. sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
  627. sizeof(struct tcphdr);
  628. csk->mss = csk->emss;
  629. if (TCPOPT_TSTAMP_G(opt))
  630. csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
  631. if (csk->emss < 128)
  632. csk->emss = 128;
  633. if (csk->emss & 7)
  634. pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
  635. TCPOPT_MSS_G(opt), csk->mss, csk->emss);
  636. pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
  637. csk->mss, csk->emss);
  638. }
  639. static void cxgbit_free_skb(struct cxgbit_sock *csk)
  640. {
  641. struct sk_buff *skb;
  642. __skb_queue_purge(&csk->txq);
  643. __skb_queue_purge(&csk->rxq);
  644. __skb_queue_purge(&csk->backlogq);
  645. __skb_queue_purge(&csk->ppodq);
  646. __skb_queue_purge(&csk->skbq);
  647. while ((skb = cxgbit_sock_dequeue_wr(csk)))
  648. kfree_skb(skb);
  649. __kfree_skb(csk->lro_hskb);
  650. }
  651. void _cxgbit_free_csk(struct kref *kref)
  652. {
  653. struct cxgbit_sock *csk;
  654. struct cxgbit_device *cdev;
  655. csk = container_of(kref, struct cxgbit_sock, kref);
  656. pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
  657. if (csk->com.local_addr.ss_family == AF_INET6) {
  658. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  659. &csk->com.local_addr;
  660. cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
  661. (const u32 *)
  662. &sin6->sin6_addr.s6_addr, 1);
  663. }
  664. cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
  665. csk->com.local_addr.ss_family);
  666. dst_release(csk->dst);
  667. cxgb4_l2t_release(csk->l2t);
  668. cdev = csk->com.cdev;
  669. spin_lock_bh(&cdev->cskq.lock);
  670. list_del(&csk->list);
  671. spin_unlock_bh(&cdev->cskq.lock);
  672. cxgbit_free_skb(csk);
  673. cxgbit_put_cnp(csk->cnp);
  674. cxgbit_put_cdev(cdev);
  675. kfree(csk);
  676. }
  677. static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
  678. {
  679. unsigned int linkspeed;
  680. u8 scale;
  681. linkspeed = pi->link_cfg.speed;
  682. scale = linkspeed / SPEED_10000;
  683. #define CXGBIT_10G_RCV_WIN (256 * 1024)
  684. csk->rcv_win = CXGBIT_10G_RCV_WIN;
  685. if (scale)
  686. csk->rcv_win *= scale;
  687. #define CXGBIT_10G_SND_WIN (256 * 1024)
  688. csk->snd_win = CXGBIT_10G_SND_WIN;
  689. if (scale)
  690. csk->snd_win *= scale;
  691. pr_debug("%s snd_win %d rcv_win %d\n",
  692. __func__, csk->snd_win, csk->rcv_win);
  693. }
  694. #ifdef CONFIG_CHELSIO_T4_DCB
  695. static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
  696. {
  697. return ndev->dcbnl_ops->getstate(ndev);
  698. }
  699. static int cxgbit_select_priority(int pri_mask)
  700. {
  701. if (!pri_mask)
  702. return 0;
  703. return (ffs(pri_mask) - 1);
  704. }
  705. static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
  706. {
  707. int ret;
  708. u8 caps;
  709. struct dcb_app iscsi_dcb_app = {
  710. .protocol = local_port
  711. };
  712. ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
  713. if (ret)
  714. return 0;
  715. if (caps & DCB_CAP_DCBX_VER_IEEE) {
  716. iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
  717. ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
  718. } else if (caps & DCB_CAP_DCBX_VER_CEE) {
  719. iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
  720. ret = dcb_getapp(ndev, &iscsi_dcb_app);
  721. }
  722. pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
  723. return cxgbit_select_priority(ret);
  724. }
  725. #endif
  726. static int
  727. cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
  728. u16 local_port, struct dst_entry *dst,
  729. struct cxgbit_device *cdev)
  730. {
  731. struct neighbour *n;
  732. int ret, step;
  733. struct net_device *ndev;
  734. u16 rxq_idx, port_id;
  735. #ifdef CONFIG_CHELSIO_T4_DCB
  736. u8 priority = 0;
  737. #endif
  738. n = dst_neigh_lookup(dst, peer_ip);
  739. if (!n)
  740. return -ENODEV;
  741. rcu_read_lock();
  742. if (!(n->nud_state & NUD_VALID))
  743. neigh_event_send(n, NULL);
  744. ret = -ENOMEM;
  745. if (n->dev->flags & IFF_LOOPBACK) {
  746. if (iptype == 4)
  747. ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
  748. else if (IS_ENABLED(CONFIG_IPV6))
  749. ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
  750. else
  751. ndev = NULL;
  752. if (!ndev) {
  753. ret = -ENODEV;
  754. goto out;
  755. }
  756. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
  757. n, ndev, 0);
  758. if (!csk->l2t)
  759. goto out;
  760. csk->mtu = ndev->mtu;
  761. csk->tx_chan = cxgb4_port_chan(ndev);
  762. csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
  763. cxgb4_port_viid(ndev));
  764. step = cdev->lldi.ntxq /
  765. cdev->lldi.nchan;
  766. csk->txq_idx = cxgb4_port_idx(ndev) * step;
  767. step = cdev->lldi.nrxq /
  768. cdev->lldi.nchan;
  769. csk->ctrlq_idx = cxgb4_port_idx(ndev);
  770. csk->rss_qid = cdev->lldi.rxq_ids[
  771. cxgb4_port_idx(ndev) * step];
  772. csk->port_id = cxgb4_port_idx(ndev);
  773. cxgbit_set_tcp_window(csk,
  774. (struct port_info *)netdev_priv(ndev));
  775. } else {
  776. ndev = cxgbit_get_real_dev(n->dev);
  777. if (!ndev) {
  778. ret = -ENODEV;
  779. goto out;
  780. }
  781. #ifdef CONFIG_CHELSIO_T4_DCB
  782. if (cxgbit_get_iscsi_dcb_state(ndev))
  783. priority = cxgbit_get_iscsi_dcb_priority(ndev,
  784. local_port);
  785. csk->dcb_priority = priority;
  786. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
  787. #else
  788. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
  789. #endif
  790. if (!csk->l2t)
  791. goto out;
  792. port_id = cxgb4_port_idx(ndev);
  793. csk->mtu = dst_mtu(dst);
  794. csk->tx_chan = cxgb4_port_chan(ndev);
  795. csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
  796. cxgb4_port_viid(ndev));
  797. step = cdev->lldi.ntxq /
  798. cdev->lldi.nports;
  799. csk->txq_idx = (port_id * step) +
  800. (cdev->selectq[port_id][0]++ % step);
  801. csk->ctrlq_idx = cxgb4_port_idx(ndev);
  802. step = cdev->lldi.nrxq /
  803. cdev->lldi.nports;
  804. rxq_idx = (port_id * step) +
  805. (cdev->selectq[port_id][1]++ % step);
  806. csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
  807. csk->port_id = port_id;
  808. cxgbit_set_tcp_window(csk,
  809. (struct port_info *)netdev_priv(ndev));
  810. }
  811. ret = 0;
  812. out:
  813. rcu_read_unlock();
  814. neigh_release(n);
  815. return ret;
  816. }
  817. int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
  818. {
  819. int ret = 0;
  820. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  821. kfree_skb(skb);
  822. pr_err("%s - device not up - dropping\n", __func__);
  823. return -EIO;
  824. }
  825. ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
  826. if (ret < 0)
  827. kfree_skb(skb);
  828. return ret < 0 ? ret : 0;
  829. }
  830. static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
  831. {
  832. u32 len = roundup(sizeof(struct cpl_tid_release), 16);
  833. struct sk_buff *skb;
  834. skb = alloc_skb(len, GFP_ATOMIC);
  835. if (!skb)
  836. return;
  837. cxgb_mk_tid_release(skb, len, tid, 0);
  838. cxgbit_ofld_send(cdev, skb);
  839. }
  840. int
  841. cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
  842. struct l2t_entry *l2e)
  843. {
  844. int ret = 0;
  845. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  846. kfree_skb(skb);
  847. pr_err("%s - device not up - dropping\n", __func__);
  848. return -EIO;
  849. }
  850. ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
  851. if (ret < 0)
  852. kfree_skb(skb);
  853. return ret < 0 ? ret : 0;
  854. }
  855. static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
  856. {
  857. if (csk->com.state != CSK_STATE_ESTABLISHED) {
  858. __kfree_skb(skb);
  859. return;
  860. }
  861. cxgbit_ofld_send(csk->com.cdev, skb);
  862. }
  863. /*
  864. * CPL connection rx data ack: host ->
  865. * Send RX credits through an RX_DATA_ACK CPL message.
  866. * Returns the number of credits sent.
  867. */
  868. int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
  869. {
  870. struct sk_buff *skb;
  871. u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
  872. u32 credit_dack;
  873. skb = alloc_skb(len, GFP_KERNEL);
  874. if (!skb)
  875. return -1;
  876. credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
  877. RX_CREDITS_V(csk->rx_credits);
  878. cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
  879. credit_dack);
  880. csk->rx_credits = 0;
  881. spin_lock_bh(&csk->lock);
  882. if (csk->lock_owner) {
  883. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
  884. __skb_queue_tail(&csk->backlogq, skb);
  885. spin_unlock_bh(&csk->lock);
  886. return 0;
  887. }
  888. cxgbit_send_rx_credits(csk, skb);
  889. spin_unlock_bh(&csk->lock);
  890. return 0;
  891. }
  892. #define FLOWC_WR_NPARAMS_MIN 9
  893. #define FLOWC_WR_NPARAMS_MAX 11
  894. static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
  895. {
  896. struct sk_buff *skb;
  897. u32 len, flowclen;
  898. u8 i;
  899. flowclen = offsetof(struct fw_flowc_wr,
  900. mnemval[FLOWC_WR_NPARAMS_MAX]);
  901. len = max_t(u32, sizeof(struct cpl_abort_req),
  902. sizeof(struct cpl_abort_rpl));
  903. len = max(len, flowclen);
  904. len = roundup(len, 16);
  905. for (i = 0; i < 3; i++) {
  906. skb = alloc_skb(len, GFP_ATOMIC);
  907. if (!skb)
  908. goto out;
  909. __skb_queue_tail(&csk->skbq, skb);
  910. }
  911. skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
  912. if (!skb)
  913. goto out;
  914. memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
  915. csk->lro_hskb = skb;
  916. return 0;
  917. out:
  918. __skb_queue_purge(&csk->skbq);
  919. return -ENOMEM;
  920. }
  921. static void
  922. cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
  923. {
  924. struct sk_buff *skb;
  925. const struct tcphdr *tcph;
  926. struct cpl_t5_pass_accept_rpl *rpl5;
  927. struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
  928. unsigned int len = roundup(sizeof(*rpl5), 16);
  929. unsigned int mtu_idx;
  930. u64 opt0;
  931. u32 opt2, hlen;
  932. u32 wscale;
  933. u32 win;
  934. pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
  935. skb = alloc_skb(len, GFP_ATOMIC);
  936. if (!skb) {
  937. cxgbit_put_csk(csk);
  938. return;
  939. }
  940. rpl5 = __skb_put_zero(skb, len);
  941. INIT_TP_WR(rpl5, csk->tid);
  942. OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
  943. csk->tid));
  944. cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
  945. req->tcpopt.tstamp,
  946. (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
  947. wscale = cxgb_compute_wscale(csk->rcv_win);
  948. /*
  949. * Specify the largest window that will fit in opt0. The
  950. * remainder will be specified in the rx_data_ack.
  951. */
  952. win = csk->rcv_win >> 10;
  953. if (win > RCV_BUFSIZ_M)
  954. win = RCV_BUFSIZ_M;
  955. opt0 = TCAM_BYPASS_F |
  956. WND_SCALE_V(wscale) |
  957. MSS_IDX_V(mtu_idx) |
  958. L2T_IDX_V(csk->l2t->idx) |
  959. TX_CHAN_V(csk->tx_chan) |
  960. SMAC_SEL_V(csk->smac_idx) |
  961. DSCP_V(csk->tos >> 2) |
  962. ULP_MODE_V(ULP_MODE_ISCSI) |
  963. RCV_BUFSIZ_V(win);
  964. opt2 = RX_CHANNEL_V(0) |
  965. RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
  966. if (!is_t5(lldi->adapter_type))
  967. opt2 |= RX_FC_DISABLE_F;
  968. if (req->tcpopt.tstamp)
  969. opt2 |= TSTAMPS_EN_F;
  970. if (req->tcpopt.sack)
  971. opt2 |= SACK_EN_F;
  972. if (wscale)
  973. opt2 |= WND_SCALE_EN_F;
  974. hlen = ntohl(req->hdr_len);
  975. if (is_t5(lldi->adapter_type))
  976. tcph = (struct tcphdr *)((u8 *)(req + 1) +
  977. ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
  978. else
  979. tcph = (struct tcphdr *)((u8 *)(req + 1) +
  980. T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
  981. if (tcph->ece && tcph->cwr)
  982. opt2 |= CCTRL_ECN_V(1);
  983. opt2 |= RX_COALESCE_V(3);
  984. opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
  985. opt2 |= T5_ISS_F;
  986. rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
  987. opt2 |= T5_OPT_2_VALID_F;
  988. rpl5->opt0 = cpu_to_be64(opt0);
  989. rpl5->opt2 = cpu_to_be32(opt2);
  990. set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
  991. t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
  992. cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
  993. }
  994. static void
  995. cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
  996. {
  997. struct cxgbit_sock *csk = NULL;
  998. struct cxgbit_np *cnp;
  999. struct cpl_pass_accept_req *req = cplhdr(skb);
  1000. unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
  1001. struct tid_info *t = cdev->lldi.tids;
  1002. unsigned int tid = GET_TID(req);
  1003. u16 peer_mss = ntohs(req->tcpopt.mss);
  1004. unsigned short hdrs;
  1005. struct dst_entry *dst;
  1006. __u8 local_ip[16], peer_ip[16];
  1007. __be16 local_port, peer_port;
  1008. int ret;
  1009. int iptype;
  1010. pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
  1011. __func__, cdev, stid, tid);
  1012. cnp = lookup_stid(t, stid);
  1013. if (!cnp) {
  1014. pr_err("%s connect request on invalid stid %d\n",
  1015. __func__, stid);
  1016. goto rel_skb;
  1017. }
  1018. if (cnp->com.state != CSK_STATE_LISTEN) {
  1019. pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
  1020. __func__);
  1021. goto reject;
  1022. }
  1023. csk = lookup_tid(t, tid);
  1024. if (csk) {
  1025. pr_err("%s csk not null tid %u\n",
  1026. __func__, tid);
  1027. goto rel_skb;
  1028. }
  1029. cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
  1030. peer_ip, &local_port, &peer_port);
  1031. /* Find output route */
  1032. if (iptype == 4) {
  1033. pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
  1034. "lport %d rport %d peer_mss %d\n"
  1035. , __func__, cnp, tid,
  1036. local_ip, peer_ip, ntohs(local_port),
  1037. ntohs(peer_port), peer_mss);
  1038. dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
  1039. *(__be32 *)local_ip,
  1040. *(__be32 *)peer_ip,
  1041. local_port, peer_port,
  1042. PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
  1043. } else {
  1044. pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
  1045. "lport %d rport %d peer_mss %d\n"
  1046. , __func__, cnp, tid,
  1047. local_ip, peer_ip, ntohs(local_port),
  1048. ntohs(peer_port), peer_mss);
  1049. dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
  1050. local_ip, peer_ip,
  1051. local_port, peer_port,
  1052. PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
  1053. ((struct sockaddr_in6 *)
  1054. &cnp->com.local_addr)->sin6_scope_id);
  1055. }
  1056. if (!dst) {
  1057. pr_err("%s - failed to find dst entry!\n",
  1058. __func__);
  1059. goto reject;
  1060. }
  1061. csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
  1062. if (!csk) {
  1063. dst_release(dst);
  1064. goto rel_skb;
  1065. }
  1066. ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
  1067. dst, cdev);
  1068. if (ret) {
  1069. pr_err("%s - failed to allocate l2t entry!\n",
  1070. __func__);
  1071. dst_release(dst);
  1072. kfree(csk);
  1073. goto reject;
  1074. }
  1075. kref_init(&csk->kref);
  1076. init_completion(&csk->com.wr_wait.completion);
  1077. INIT_LIST_HEAD(&csk->accept_node);
  1078. hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
  1079. sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
  1080. if (peer_mss && csk->mtu > (peer_mss + hdrs))
  1081. csk->mtu = peer_mss + hdrs;
  1082. csk->com.state = CSK_STATE_CONNECTING;
  1083. csk->com.cdev = cdev;
  1084. csk->cnp = cnp;
  1085. csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
  1086. csk->dst = dst;
  1087. csk->tid = tid;
  1088. csk->wr_cred = cdev->lldi.wr_cred -
  1089. DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
  1090. csk->wr_max_cred = csk->wr_cred;
  1091. csk->wr_una_cred = 0;
  1092. if (iptype == 4) {
  1093. struct sockaddr_in *sin = (struct sockaddr_in *)
  1094. &csk->com.local_addr;
  1095. sin->sin_family = AF_INET;
  1096. sin->sin_port = local_port;
  1097. sin->sin_addr.s_addr = *(__be32 *)local_ip;
  1098. sin = (struct sockaddr_in *)&csk->com.remote_addr;
  1099. sin->sin_family = AF_INET;
  1100. sin->sin_port = peer_port;
  1101. sin->sin_addr.s_addr = *(__be32 *)peer_ip;
  1102. } else {
  1103. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  1104. &csk->com.local_addr;
  1105. sin6->sin6_family = PF_INET6;
  1106. sin6->sin6_port = local_port;
  1107. memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
  1108. cxgb4_clip_get(cdev->lldi.ports[0],
  1109. (const u32 *)&sin6->sin6_addr.s6_addr,
  1110. 1);
  1111. sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
  1112. sin6->sin6_family = PF_INET6;
  1113. sin6->sin6_port = peer_port;
  1114. memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
  1115. }
  1116. skb_queue_head_init(&csk->rxq);
  1117. skb_queue_head_init(&csk->txq);
  1118. skb_queue_head_init(&csk->ppodq);
  1119. skb_queue_head_init(&csk->backlogq);
  1120. skb_queue_head_init(&csk->skbq);
  1121. cxgbit_sock_reset_wr_list(csk);
  1122. spin_lock_init(&csk->lock);
  1123. init_waitqueue_head(&csk->waitq);
  1124. init_waitqueue_head(&csk->ack_waitq);
  1125. csk->lock_owner = false;
  1126. if (cxgbit_alloc_csk_skb(csk)) {
  1127. dst_release(dst);
  1128. kfree(csk);
  1129. goto rel_skb;
  1130. }
  1131. cxgbit_get_cnp(cnp);
  1132. cxgbit_get_cdev(cdev);
  1133. spin_lock(&cdev->cskq.lock);
  1134. list_add_tail(&csk->list, &cdev->cskq.list);
  1135. spin_unlock(&cdev->cskq.lock);
  1136. cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
  1137. cxgbit_pass_accept_rpl(csk, req);
  1138. goto rel_skb;
  1139. reject:
  1140. cxgbit_release_tid(cdev, tid);
  1141. rel_skb:
  1142. __kfree_skb(skb);
  1143. }
  1144. static u32
  1145. cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
  1146. u32 *flowclenp)
  1147. {
  1148. u32 nparams, flowclen16, flowclen;
  1149. nparams = FLOWC_WR_NPARAMS_MIN;
  1150. if (csk->snd_wscale)
  1151. nparams++;
  1152. #ifdef CONFIG_CHELSIO_T4_DCB
  1153. nparams++;
  1154. #endif
  1155. flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
  1156. flowclen16 = DIV_ROUND_UP(flowclen, 16);
  1157. flowclen = flowclen16 * 16;
  1158. /*
  1159. * Return the number of 16-byte credits used by the flowc request.
  1160. * Pass back the nparams and actual flowc length if requested.
  1161. */
  1162. if (nparamsp)
  1163. *nparamsp = nparams;
  1164. if (flowclenp)
  1165. *flowclenp = flowclen;
  1166. return flowclen16;
  1167. }
  1168. u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
  1169. {
  1170. struct cxgbit_device *cdev = csk->com.cdev;
  1171. struct fw_flowc_wr *flowc;
  1172. u32 nparams, flowclen16, flowclen;
  1173. struct sk_buff *skb;
  1174. u8 index;
  1175. #ifdef CONFIG_CHELSIO_T4_DCB
  1176. u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
  1177. #endif
  1178. flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
  1179. skb = __skb_dequeue(&csk->skbq);
  1180. flowc = __skb_put_zero(skb, flowclen);
  1181. flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
  1182. FW_FLOWC_WR_NPARAMS_V(nparams));
  1183. flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
  1184. FW_WR_FLOWID_V(csk->tid));
  1185. flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
  1186. flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
  1187. (csk->com.cdev->lldi.pf));
  1188. flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
  1189. flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
  1190. flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
  1191. flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
  1192. flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
  1193. flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
  1194. flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
  1195. flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
  1196. flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
  1197. flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
  1198. flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
  1199. flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
  1200. flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
  1201. flowc->mnemval[7].val = cpu_to_be32(csk->emss);
  1202. flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
  1203. if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
  1204. flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
  1205. else
  1206. flowc->mnemval[8].val = cpu_to_be32(16384);
  1207. index = 9;
  1208. if (csk->snd_wscale) {
  1209. flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
  1210. flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
  1211. index++;
  1212. }
  1213. #ifdef CONFIG_CHELSIO_T4_DCB
  1214. flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
  1215. if (vlan == VLAN_NONE) {
  1216. pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
  1217. flowc->mnemval[index].val = cpu_to_be32(0);
  1218. } else
  1219. flowc->mnemval[index].val = cpu_to_be32(
  1220. (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
  1221. #endif
  1222. pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
  1223. " rcv_seq = %u; snd_win = %u; emss = %u\n",
  1224. __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
  1225. csk->rcv_nxt, csk->snd_win, csk->emss);
  1226. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
  1227. cxgbit_ofld_send(csk->com.cdev, skb);
  1228. return flowclen16;
  1229. }
  1230. int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
  1231. {
  1232. struct sk_buff *skb;
  1233. struct cpl_set_tcb_field *req;
  1234. u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
  1235. u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
  1236. unsigned int len = roundup(sizeof(*req), 16);
  1237. int ret;
  1238. skb = alloc_skb(len, GFP_KERNEL);
  1239. if (!skb)
  1240. return -ENOMEM;
  1241. /* set up ulp submode */
  1242. req = __skb_put_zero(skb, len);
  1243. INIT_TP_WR(req, csk->tid);
  1244. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
  1245. req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
  1246. req->word_cookie = htons(0);
  1247. req->mask = cpu_to_be64(0x3 << 4);
  1248. req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
  1249. (dcrc ? ULP_CRC_DATA : 0)) << 4);
  1250. set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
  1251. cxgbit_get_csk(csk);
  1252. cxgbit_init_wr_wait(&csk->com.wr_wait);
  1253. cxgbit_ofld_send(csk->com.cdev, skb);
  1254. ret = cxgbit_wait_for_reply(csk->com.cdev,
  1255. &csk->com.wr_wait,
  1256. csk->tid, 5, __func__);
  1257. if (ret)
  1258. return -1;
  1259. return 0;
  1260. }
  1261. int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
  1262. {
  1263. struct sk_buff *skb;
  1264. struct cpl_set_tcb_field *req;
  1265. unsigned int len = roundup(sizeof(*req), 16);
  1266. int ret;
  1267. skb = alloc_skb(len, GFP_KERNEL);
  1268. if (!skb)
  1269. return -ENOMEM;
  1270. req = __skb_put_zero(skb, len);
  1271. INIT_TP_WR(req, csk->tid);
  1272. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
  1273. req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
  1274. req->word_cookie = htons(0);
  1275. req->mask = cpu_to_be64(0x3 << 8);
  1276. req->val = cpu_to_be64(pg_idx << 8);
  1277. set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
  1278. cxgbit_get_csk(csk);
  1279. cxgbit_init_wr_wait(&csk->com.wr_wait);
  1280. cxgbit_ofld_send(csk->com.cdev, skb);
  1281. ret = cxgbit_wait_for_reply(csk->com.cdev,
  1282. &csk->com.wr_wait,
  1283. csk->tid, 5, __func__);
  1284. if (ret)
  1285. return -1;
  1286. return 0;
  1287. }
  1288. static void
  1289. cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1290. {
  1291. struct cpl_pass_open_rpl *rpl = cplhdr(skb);
  1292. struct tid_info *t = cdev->lldi.tids;
  1293. unsigned int stid = GET_TID(rpl);
  1294. struct cxgbit_np *cnp = lookup_stid(t, stid);
  1295. pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
  1296. __func__, cnp, stid, rpl->status);
  1297. if (!cnp) {
  1298. pr_info("%s stid %d lookup failure\n", __func__, stid);
  1299. goto rel_skb;
  1300. }
  1301. cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
  1302. cxgbit_put_cnp(cnp);
  1303. rel_skb:
  1304. __kfree_skb(skb);
  1305. }
  1306. static void
  1307. cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1308. {
  1309. struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
  1310. struct tid_info *t = cdev->lldi.tids;
  1311. unsigned int stid = GET_TID(rpl);
  1312. struct cxgbit_np *cnp = lookup_stid(t, stid);
  1313. pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
  1314. __func__, cnp, stid, rpl->status);
  1315. if (!cnp) {
  1316. pr_info("%s stid %d lookup failure\n", __func__, stid);
  1317. goto rel_skb;
  1318. }
  1319. cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
  1320. cxgbit_put_cnp(cnp);
  1321. rel_skb:
  1322. __kfree_skb(skb);
  1323. }
  1324. static void
  1325. cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
  1326. {
  1327. struct cpl_pass_establish *req = cplhdr(skb);
  1328. struct tid_info *t = cdev->lldi.tids;
  1329. unsigned int tid = GET_TID(req);
  1330. struct cxgbit_sock *csk;
  1331. struct cxgbit_np *cnp;
  1332. u16 tcp_opt = be16_to_cpu(req->tcp_opt);
  1333. u32 snd_isn = be32_to_cpu(req->snd_isn);
  1334. u32 rcv_isn = be32_to_cpu(req->rcv_isn);
  1335. csk = lookup_tid(t, tid);
  1336. if (unlikely(!csk)) {
  1337. pr_err("can't find connection for tid %u.\n", tid);
  1338. goto rel_skb;
  1339. }
  1340. cnp = csk->cnp;
  1341. pr_debug("%s: csk %p; tid %u; cnp %p\n",
  1342. __func__, csk, tid, cnp);
  1343. csk->write_seq = snd_isn;
  1344. csk->snd_una = snd_isn;
  1345. csk->snd_nxt = snd_isn;
  1346. csk->rcv_nxt = rcv_isn;
  1347. if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
  1348. csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
  1349. csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
  1350. cxgbit_set_emss(csk, tcp_opt);
  1351. dst_confirm(csk->dst);
  1352. csk->com.state = CSK_STATE_ESTABLISHED;
  1353. spin_lock_bh(&cnp->np_accept_lock);
  1354. list_add_tail(&csk->accept_node, &cnp->np_accept_list);
  1355. spin_unlock_bh(&cnp->np_accept_lock);
  1356. complete(&cnp->accept_comp);
  1357. rel_skb:
  1358. __kfree_skb(skb);
  1359. }
  1360. static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1361. {
  1362. cxgbit_skcb_flags(skb) = 0;
  1363. spin_lock_bh(&csk->rxq.lock);
  1364. __skb_queue_tail(&csk->rxq, skb);
  1365. spin_unlock_bh(&csk->rxq.lock);
  1366. wake_up(&csk->waitq);
  1367. }
  1368. static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
  1369. {
  1370. pr_debug("%s: csk %p; tid %u; state %d\n",
  1371. __func__, csk, csk->tid, csk->com.state);
  1372. switch (csk->com.state) {
  1373. case CSK_STATE_ESTABLISHED:
  1374. csk->com.state = CSK_STATE_CLOSING;
  1375. cxgbit_queue_rx_skb(csk, skb);
  1376. return;
  1377. case CSK_STATE_CLOSING:
  1378. /* simultaneous close */
  1379. csk->com.state = CSK_STATE_MORIBUND;
  1380. break;
  1381. case CSK_STATE_MORIBUND:
  1382. csk->com.state = CSK_STATE_DEAD;
  1383. cxgbit_put_csk(csk);
  1384. break;
  1385. case CSK_STATE_ABORTING:
  1386. break;
  1387. default:
  1388. pr_info("%s: cpl_peer_close in bad state %d\n",
  1389. __func__, csk->com.state);
  1390. }
  1391. __kfree_skb(skb);
  1392. }
  1393. static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1394. {
  1395. pr_debug("%s: csk %p; tid %u; state %d\n",
  1396. __func__, csk, csk->tid, csk->com.state);
  1397. switch (csk->com.state) {
  1398. case CSK_STATE_CLOSING:
  1399. csk->com.state = CSK_STATE_MORIBUND;
  1400. break;
  1401. case CSK_STATE_MORIBUND:
  1402. csk->com.state = CSK_STATE_DEAD;
  1403. cxgbit_put_csk(csk);
  1404. break;
  1405. case CSK_STATE_ABORTING:
  1406. case CSK_STATE_DEAD:
  1407. break;
  1408. default:
  1409. pr_info("%s: cpl_close_con_rpl in bad state %d\n",
  1410. __func__, csk->com.state);
  1411. }
  1412. __kfree_skb(skb);
  1413. }
  1414. static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
  1415. {
  1416. struct cpl_abort_req_rss *hdr = cplhdr(skb);
  1417. unsigned int tid = GET_TID(hdr);
  1418. struct sk_buff *rpl_skb;
  1419. bool release = false;
  1420. bool wakeup_thread = false;
  1421. u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
  1422. pr_debug("%s: csk %p; tid %u; state %d\n",
  1423. __func__, csk, tid, csk->com.state);
  1424. if (cxgb_is_neg_adv(hdr->status)) {
  1425. pr_err("%s: got neg advise %d on tid %u\n",
  1426. __func__, hdr->status, tid);
  1427. goto rel_skb;
  1428. }
  1429. switch (csk->com.state) {
  1430. case CSK_STATE_CONNECTING:
  1431. case CSK_STATE_MORIBUND:
  1432. csk->com.state = CSK_STATE_DEAD;
  1433. release = true;
  1434. break;
  1435. case CSK_STATE_ESTABLISHED:
  1436. csk->com.state = CSK_STATE_DEAD;
  1437. wakeup_thread = true;
  1438. break;
  1439. case CSK_STATE_CLOSING:
  1440. csk->com.state = CSK_STATE_DEAD;
  1441. if (!csk->conn)
  1442. release = true;
  1443. break;
  1444. case CSK_STATE_ABORTING:
  1445. break;
  1446. default:
  1447. pr_info("%s: cpl_abort_req_rss in bad state %d\n",
  1448. __func__, csk->com.state);
  1449. csk->com.state = CSK_STATE_DEAD;
  1450. }
  1451. __skb_queue_purge(&csk->txq);
  1452. if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
  1453. cxgbit_send_tx_flowc_wr(csk);
  1454. rpl_skb = __skb_dequeue(&csk->skbq);
  1455. cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
  1456. cxgbit_ofld_send(csk->com.cdev, rpl_skb);
  1457. if (wakeup_thread) {
  1458. cxgbit_queue_rx_skb(csk, skb);
  1459. return;
  1460. }
  1461. if (release)
  1462. cxgbit_put_csk(csk);
  1463. rel_skb:
  1464. __kfree_skb(skb);
  1465. }
  1466. static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
  1467. {
  1468. struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
  1469. pr_debug("%s: csk %p; tid %u; state %d\n",
  1470. __func__, csk, csk->tid, csk->com.state);
  1471. switch (csk->com.state) {
  1472. case CSK_STATE_ABORTING:
  1473. csk->com.state = CSK_STATE_DEAD;
  1474. if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
  1475. cxgbit_wake_up(&csk->com.wr_wait, __func__,
  1476. rpl->status);
  1477. cxgbit_put_csk(csk);
  1478. break;
  1479. default:
  1480. pr_info("%s: cpl_abort_rpl_rss in state %d\n",
  1481. __func__, csk->com.state);
  1482. }
  1483. __kfree_skb(skb);
  1484. }
  1485. static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
  1486. {
  1487. const struct sk_buff *skb = csk->wr_pending_head;
  1488. u32 credit = 0;
  1489. if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
  1490. pr_err("csk 0x%p, tid %u, credit %u > %u\n",
  1491. csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
  1492. return true;
  1493. }
  1494. while (skb) {
  1495. credit += (__force u32)skb->csum;
  1496. skb = cxgbit_skcb_tx_wr_next(skb);
  1497. }
  1498. if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
  1499. pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
  1500. csk, csk->tid, csk->wr_cred,
  1501. credit, csk->wr_max_cred);
  1502. return true;
  1503. }
  1504. return false;
  1505. }
  1506. static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
  1507. {
  1508. struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
  1509. u32 credits = rpl->credits;
  1510. u32 snd_una = ntohl(rpl->snd_una);
  1511. csk->wr_cred += credits;
  1512. if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
  1513. csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
  1514. while (credits) {
  1515. struct sk_buff *p = cxgbit_sock_peek_wr(csk);
  1516. const u32 csum = (__force u32)p->csum;
  1517. if (unlikely(!p)) {
  1518. pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
  1519. csk, csk->tid, credits,
  1520. csk->wr_cred, csk->wr_una_cred);
  1521. break;
  1522. }
  1523. if (unlikely(credits < csum)) {
  1524. pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
  1525. csk, csk->tid,
  1526. credits, csk->wr_cred, csk->wr_una_cred,
  1527. csum);
  1528. p->csum = (__force __wsum)(csum - credits);
  1529. break;
  1530. }
  1531. cxgbit_sock_dequeue_wr(csk);
  1532. credits -= csum;
  1533. kfree_skb(p);
  1534. }
  1535. if (unlikely(cxgbit_credit_err(csk))) {
  1536. cxgbit_queue_rx_skb(csk, skb);
  1537. return;
  1538. }
  1539. if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
  1540. if (unlikely(before(snd_una, csk->snd_una))) {
  1541. pr_warn("csk 0x%p,%u, snd_una %u/%u.",
  1542. csk, csk->tid, snd_una,
  1543. csk->snd_una);
  1544. goto rel_skb;
  1545. }
  1546. if (csk->snd_una != snd_una) {
  1547. csk->snd_una = snd_una;
  1548. dst_confirm(csk->dst);
  1549. wake_up(&csk->ack_waitq);
  1550. }
  1551. }
  1552. if (skb_queue_len(&csk->txq))
  1553. cxgbit_push_tx_frames(csk);
  1554. rel_skb:
  1555. __kfree_skb(skb);
  1556. }
  1557. static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1558. {
  1559. struct cxgbit_sock *csk;
  1560. struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
  1561. unsigned int tid = GET_TID(rpl);
  1562. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1563. struct tid_info *t = lldi->tids;
  1564. csk = lookup_tid(t, tid);
  1565. if (unlikely(!csk)) {
  1566. pr_err("can't find connection for tid %u.\n", tid);
  1567. goto rel_skb;
  1568. } else {
  1569. cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
  1570. }
  1571. cxgbit_put_csk(csk);
  1572. rel_skb:
  1573. __kfree_skb(skb);
  1574. }
  1575. static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
  1576. {
  1577. struct cxgbit_sock *csk;
  1578. struct cpl_rx_data *cpl = cplhdr(skb);
  1579. unsigned int tid = GET_TID(cpl);
  1580. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1581. struct tid_info *t = lldi->tids;
  1582. csk = lookup_tid(t, tid);
  1583. if (unlikely(!csk)) {
  1584. pr_err("can't find conn. for tid %u.\n", tid);
  1585. goto rel_skb;
  1586. }
  1587. cxgbit_queue_rx_skb(csk, skb);
  1588. return;
  1589. rel_skb:
  1590. __kfree_skb(skb);
  1591. }
  1592. static void
  1593. __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1594. {
  1595. spin_lock(&csk->lock);
  1596. if (csk->lock_owner) {
  1597. __skb_queue_tail(&csk->backlogq, skb);
  1598. spin_unlock(&csk->lock);
  1599. return;
  1600. }
  1601. cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
  1602. spin_unlock(&csk->lock);
  1603. }
  1604. static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1605. {
  1606. cxgbit_get_csk(csk);
  1607. __cxgbit_process_rx_cpl(csk, skb);
  1608. cxgbit_put_csk(csk);
  1609. }
  1610. static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1611. {
  1612. struct cxgbit_sock *csk;
  1613. struct cpl_tx_data *cpl = cplhdr(skb);
  1614. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1615. struct tid_info *t = lldi->tids;
  1616. unsigned int tid = GET_TID(cpl);
  1617. u8 opcode = cxgbit_skcb_rx_opcode(skb);
  1618. bool ref = true;
  1619. switch (opcode) {
  1620. case CPL_FW4_ACK:
  1621. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
  1622. ref = false;
  1623. break;
  1624. case CPL_PEER_CLOSE:
  1625. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
  1626. break;
  1627. case CPL_CLOSE_CON_RPL:
  1628. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
  1629. break;
  1630. case CPL_ABORT_REQ_RSS:
  1631. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
  1632. break;
  1633. case CPL_ABORT_RPL_RSS:
  1634. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
  1635. break;
  1636. default:
  1637. goto rel_skb;
  1638. }
  1639. csk = lookup_tid(t, tid);
  1640. if (unlikely(!csk)) {
  1641. pr_err("can't find conn. for tid %u.\n", tid);
  1642. goto rel_skb;
  1643. }
  1644. if (ref)
  1645. cxgbit_process_rx_cpl(csk, skb);
  1646. else
  1647. __cxgbit_process_rx_cpl(csk, skb);
  1648. return;
  1649. rel_skb:
  1650. __kfree_skb(skb);
  1651. }
  1652. cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
  1653. [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl,
  1654. [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
  1655. [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req,
  1656. [CPL_PASS_ESTABLISH] = cxgbit_pass_establish,
  1657. [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl,
  1658. [CPL_RX_DATA] = cxgbit_rx_data,
  1659. [CPL_FW4_ACK] = cxgbit_rx_cpl,
  1660. [CPL_PEER_CLOSE] = cxgbit_rx_cpl,
  1661. [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl,
  1662. [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl,
  1663. [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl,
  1664. };