chtls_io.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822
  1. /*
  2. * Copyright (c) 2018 Chelsio Communications, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * Written by: Atul Gupta (atul.gupta@chelsio.com)
  9. */
  10. #include <linux/module.h>
  11. #include <linux/list.h>
  12. #include <linux/workqueue.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/timer.h>
  15. #include <linux/notifier.h>
  16. #include <linux/inetdevice.h>
  17. #include <linux/ip.h>
  18. #include <linux/tcp.h>
  19. #include <linux/sched/signal.h>
  20. #include <net/tcp.h>
  21. #include <net/busy_poll.h>
  22. #include <crypto/aes.h>
  23. #include "chtls.h"
  24. #include "chtls_cm.h"
  25. static bool is_tls_tx(struct chtls_sock *csk)
  26. {
  27. return csk->tlshws.txkey >= 0;
  28. }
  29. static bool is_tls_rx(struct chtls_sock *csk)
  30. {
  31. return csk->tlshws.rxkey >= 0;
  32. }
  33. static int data_sgl_len(const struct sk_buff *skb)
  34. {
  35. unsigned int cnt;
  36. cnt = skb_shinfo(skb)->nr_frags;
  37. return sgl_len(cnt) * 8;
  38. }
  39. static int nos_ivs(struct sock *sk, unsigned int size)
  40. {
  41. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  42. return DIV_ROUND_UP(size, csk->tlshws.mfs);
  43. }
  44. static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
  45. {
  46. int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
  47. int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb);
  48. if ((hlen + KEY_ON_MEM_SZ + ivs_size) <
  49. MAX_IMM_OFLD_TX_DATA_WR_LEN) {
  50. ULP_SKB_CB(skb)->ulp.tls.iv = 1;
  51. return 1;
  52. }
  53. ULP_SKB_CB(skb)->ulp.tls.iv = 0;
  54. return 0;
  55. }
  56. static int max_ivs_size(struct sock *sk, int size)
  57. {
  58. return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE;
  59. }
  60. static int ivs_size(struct sock *sk, const struct sk_buff *skb)
  61. {
  62. return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
  63. CIPHER_BLOCK_SIZE) : 0;
  64. }
  65. static int flowc_wr_credits(int nparams, int *flowclenp)
  66. {
  67. int flowclen16, flowclen;
  68. flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
  69. flowclen16 = DIV_ROUND_UP(flowclen, 16);
  70. flowclen = flowclen16 * 16;
  71. if (flowclenp)
  72. *flowclenp = flowclen;
  73. return flowclen16;
  74. }
  75. static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
  76. struct fw_flowc_wr *flowc,
  77. int flowclen)
  78. {
  79. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  80. struct sk_buff *skb;
  81. skb = alloc_skb(flowclen, GFP_ATOMIC);
  82. if (!skb)
  83. return NULL;
  84. memcpy(__skb_put(skb, flowclen), flowc, flowclen);
  85. skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
  86. return skb;
  87. }
  88. static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
  89. int flowclen)
  90. {
  91. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  92. struct tcp_sock *tp = tcp_sk(sk);
  93. struct sk_buff *skb;
  94. int flowclen16;
  95. int ret;
  96. flowclen16 = flowclen / 16;
  97. if (csk_flag(sk, CSK_TX_DATA_SENT)) {
  98. skb = create_flowc_wr_skb(sk, flowc, flowclen);
  99. if (!skb)
  100. return -ENOMEM;
  101. skb_entail(sk, skb,
  102. ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
  103. return 0;
  104. }
  105. ret = cxgb4_immdata_send(csk->egress_dev,
  106. csk->txq_idx,
  107. flowc, flowclen);
  108. if (!ret)
  109. return flowclen16;
  110. skb = create_flowc_wr_skb(sk, flowc, flowclen);
  111. if (!skb)
  112. return -ENOMEM;
  113. send_or_defer(sk, tp, skb, 0);
  114. return flowclen16;
  115. }
  116. static u8 tcp_state_to_flowc_state(u8 state)
  117. {
  118. switch (state) {
  119. case TCP_ESTABLISHED:
  120. return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
  121. case TCP_CLOSE_WAIT:
  122. return FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT;
  123. case TCP_FIN_WAIT1:
  124. return FW_FLOWC_MNEM_TCPSTATE_FINWAIT1;
  125. case TCP_CLOSING:
  126. return FW_FLOWC_MNEM_TCPSTATE_CLOSING;
  127. case TCP_LAST_ACK:
  128. return FW_FLOWC_MNEM_TCPSTATE_LASTACK;
  129. case TCP_FIN_WAIT2:
  130. return FW_FLOWC_MNEM_TCPSTATE_FINWAIT2;
  131. }
  132. return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
  133. }
  134. int send_tx_flowc_wr(struct sock *sk, int compl,
  135. u32 snd_nxt, u32 rcv_nxt)
  136. {
  137. struct flowc_packed {
  138. struct fw_flowc_wr fc;
  139. struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
  140. } __packed sflowc;
  141. int nparams, paramidx, flowclen16, flowclen;
  142. struct fw_flowc_wr *flowc;
  143. struct chtls_sock *csk;
  144. struct tcp_sock *tp;
  145. csk = rcu_dereference_sk_user_data(sk);
  146. tp = tcp_sk(sk);
  147. memset(&sflowc, 0, sizeof(sflowc));
  148. flowc = &sflowc.fc;
  149. #define FLOWC_PARAM(__m, __v) \
  150. do { \
  151. flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \
  152. flowc->mnemval[paramidx].val = cpu_to_be32(__v); \
  153. paramidx++; \
  154. } while (0)
  155. paramidx = 0;
  156. FLOWC_PARAM(PFNVFN, FW_PFVF_CMD_PFN_V(csk->cdev->lldi->pf));
  157. FLOWC_PARAM(CH, csk->tx_chan);
  158. FLOWC_PARAM(PORT, csk->tx_chan);
  159. FLOWC_PARAM(IQID, csk->rss_qid);
  160. FLOWC_PARAM(SNDNXT, tp->snd_nxt);
  161. FLOWC_PARAM(RCVNXT, tp->rcv_nxt);
  162. FLOWC_PARAM(SNDBUF, csk->sndbuf);
  163. FLOWC_PARAM(MSS, tp->mss_cache);
  164. FLOWC_PARAM(TCPSTATE, tcp_state_to_flowc_state(sk->sk_state));
  165. if (SND_WSCALE(tp))
  166. FLOWC_PARAM(RCV_SCALE, SND_WSCALE(tp));
  167. if (csk->ulp_mode == ULP_MODE_TLS)
  168. FLOWC_PARAM(ULD_MODE, ULP_MODE_TLS);
  169. if (csk->tlshws.fcplenmax)
  170. FLOWC_PARAM(TXDATAPLEN_MAX, csk->tlshws.fcplenmax);
  171. nparams = paramidx;
  172. #undef FLOWC_PARAM
  173. flowclen16 = flowc_wr_credits(nparams, &flowclen);
  174. flowc->op_to_nparams =
  175. cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
  176. FW_WR_COMPL_V(compl) |
  177. FW_FLOWC_WR_NPARAMS_V(nparams));
  178. flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
  179. FW_WR_FLOWID_V(csk->tid));
  180. return send_flowc_wr(sk, flowc, flowclen);
  181. }
  182. /* Copy IVs to WR */
  183. static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb)
  184. {
  185. struct chtls_sock *csk;
  186. unsigned char *iv_loc;
  187. struct chtls_hws *hws;
  188. unsigned char *ivs;
  189. u16 number_of_ivs;
  190. struct page *page;
  191. int err = 0;
  192. csk = rcu_dereference_sk_user_data(sk);
  193. hws = &csk->tlshws;
  194. number_of_ivs = nos_ivs(sk, skb->len);
  195. if (number_of_ivs > MAX_IVS_PAGE) {
  196. pr_warn("MAX IVs in PAGE exceeded %d\n", number_of_ivs);
  197. return -ENOMEM;
  198. }
  199. /* generate the IVs */
  200. ivs = kmalloc(number_of_ivs * CIPHER_BLOCK_SIZE, GFP_ATOMIC);
  201. if (!ivs)
  202. return -ENOMEM;
  203. get_random_bytes(ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
  204. if (skb_ulp_tls_iv_imm(skb)) {
  205. /* send the IVs as immediate data in the WR */
  206. iv_loc = (unsigned char *)__skb_push(skb, number_of_ivs *
  207. CIPHER_BLOCK_SIZE);
  208. if (iv_loc)
  209. memcpy(iv_loc, ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
  210. hws->ivsize = number_of_ivs * CIPHER_BLOCK_SIZE;
  211. } else {
  212. /* Send the IVs as sgls */
  213. /* Already accounted IV DSGL for credits */
  214. skb_shinfo(skb)->nr_frags--;
  215. page = alloc_pages(sk->sk_allocation | __GFP_COMP, 0);
  216. if (!page) {
  217. pr_info("%s : Page allocation for IVs failed\n",
  218. __func__);
  219. err = -ENOMEM;
  220. goto out;
  221. }
  222. memcpy(page_address(page), ivs, number_of_ivs *
  223. CIPHER_BLOCK_SIZE);
  224. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0,
  225. number_of_ivs * CIPHER_BLOCK_SIZE);
  226. hws->ivsize = 0;
  227. }
  228. out:
  229. kfree(ivs);
  230. return err;
  231. }
  232. /* Copy Key to WR */
  233. static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb)
  234. {
  235. struct ulptx_sc_memrd *sc_memrd;
  236. struct chtls_sock *csk;
  237. struct chtls_dev *cdev;
  238. struct ulptx_idata *sc;
  239. struct chtls_hws *hws;
  240. u32 immdlen;
  241. int kaddr;
  242. csk = rcu_dereference_sk_user_data(sk);
  243. hws = &csk->tlshws;
  244. cdev = csk->cdev;
  245. immdlen = sizeof(*sc) + sizeof(*sc_memrd);
  246. kaddr = keyid_to_addr(cdev->kmap.start, hws->txkey);
  247. sc = (struct ulptx_idata *)__skb_push(skb, immdlen);
  248. if (sc) {
  249. sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
  250. sc->len = htonl(0);
  251. sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
  252. sc_memrd->cmd_to_len =
  253. htonl(ULPTX_CMD_V(ULP_TX_SC_MEMRD) |
  254. ULP_TX_SC_MORE_V(1) |
  255. ULPTX_LEN16_V(hws->keylen >> 4));
  256. sc_memrd->addr = htonl(kaddr);
  257. }
  258. }
  259. static u64 tlstx_incr_seqnum(struct chtls_hws *hws)
  260. {
  261. return hws->tx_seq_no++;
  262. }
  263. static bool is_sg_request(const struct sk_buff *skb)
  264. {
  265. return skb->peeked ||
  266. (skb->len > MAX_IMM_ULPTX_WR_LEN);
  267. }
  268. /*
  269. * Returns true if an sk_buff carries urgent data.
  270. */
  271. static bool skb_urgent(struct sk_buff *skb)
  272. {
  273. return ULP_SKB_CB(skb)->flags & ULPCB_FLAG_URG;
  274. }
  275. /* TLS content type for CPL SFO */
  276. static unsigned char tls_content_type(unsigned char content_type)
  277. {
  278. switch (content_type) {
  279. case TLS_HDR_TYPE_CCS:
  280. return CPL_TX_TLS_SFO_TYPE_CCS;
  281. case TLS_HDR_TYPE_ALERT:
  282. return CPL_TX_TLS_SFO_TYPE_ALERT;
  283. case TLS_HDR_TYPE_HANDSHAKE:
  284. return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
  285. case TLS_HDR_TYPE_HEARTBEAT:
  286. return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
  287. }
  288. return CPL_TX_TLS_SFO_TYPE_DATA;
  289. }
  290. static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
  291. int dlen, int tls_immd, u32 credits,
  292. int expn, int pdus)
  293. {
  294. struct fw_tlstx_data_wr *req_wr;
  295. struct cpl_tx_tls_sfo *req_cpl;
  296. unsigned int wr_ulp_mode_force;
  297. struct tls_scmd *updated_scmd;
  298. unsigned char data_type;
  299. struct chtls_sock *csk;
  300. struct net_device *dev;
  301. struct chtls_hws *hws;
  302. struct tls_scmd *scmd;
  303. struct adapter *adap;
  304. unsigned char *req;
  305. int immd_len;
  306. int iv_imm;
  307. int len;
  308. csk = rcu_dereference_sk_user_data(sk);
  309. iv_imm = skb_ulp_tls_iv_imm(skb);
  310. dev = csk->egress_dev;
  311. adap = netdev2adap(dev);
  312. hws = &csk->tlshws;
  313. scmd = &hws->scmd;
  314. len = dlen + expn;
  315. dlen = (dlen < hws->mfs) ? dlen : hws->mfs;
  316. atomic_inc(&adap->chcr_stats.tls_pdu_tx);
  317. updated_scmd = scmd;
  318. updated_scmd->seqno_numivs &= 0xffffff80;
  319. updated_scmd->seqno_numivs |= SCMD_NUM_IVS_V(pdus);
  320. hws->scmd = *updated_scmd;
  321. req = (unsigned char *)__skb_push(skb, sizeof(struct cpl_tx_tls_sfo));
  322. req_cpl = (struct cpl_tx_tls_sfo *)req;
  323. req = (unsigned char *)__skb_push(skb, (sizeof(struct
  324. fw_tlstx_data_wr)));
  325. req_wr = (struct fw_tlstx_data_wr *)req;
  326. immd_len = (tls_immd ? dlen : 0);
  327. req_wr->op_to_immdlen =
  328. htonl(FW_WR_OP_V(FW_TLSTX_DATA_WR) |
  329. FW_TLSTX_DATA_WR_COMPL_V(1) |
  330. FW_TLSTX_DATA_WR_IMMDLEN_V(immd_len));
  331. req_wr->flowid_len16 = htonl(FW_TLSTX_DATA_WR_FLOWID_V(csk->tid) |
  332. FW_TLSTX_DATA_WR_LEN16_V(credits));
  333. wr_ulp_mode_force = TX_ULP_MODE_V(ULP_MODE_TLS);
  334. if (is_sg_request(skb))
  335. wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
  336. ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
  337. FW_OFLD_TX_DATA_WR_SHOVE_F);
  338. req_wr->lsodisable_to_flags =
  339. htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
  340. FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
  341. T6_TX_FORCE_F | wr_ulp_mode_force |
  342. TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
  343. skb_queue_empty(&csk->txq)));
  344. req_wr->ctxloc_to_exp =
  345. htonl(FW_TLSTX_DATA_WR_NUMIVS_V(pdus) |
  346. FW_TLSTX_DATA_WR_EXP_V(expn) |
  347. FW_TLSTX_DATA_WR_CTXLOC_V(CHTLS_KEY_CONTEXT_DDR) |
  348. FW_TLSTX_DATA_WR_IVDSGL_V(!iv_imm) |
  349. FW_TLSTX_DATA_WR_KEYSIZE_V(hws->keylen >> 4));
  350. /* Fill in the length */
  351. req_wr->plen = htonl(len);
  352. req_wr->mfs = htons(hws->mfs);
  353. req_wr->adjustedplen_pkd =
  354. htons(FW_TLSTX_DATA_WR_ADJUSTEDPLEN_V(hws->adjustlen));
  355. req_wr->expinplenmax_pkd =
  356. htons(FW_TLSTX_DATA_WR_EXPINPLENMAX_V(hws->expansion));
  357. req_wr->pdusinplenmax_pkd =
  358. FW_TLSTX_DATA_WR_PDUSINPLENMAX_V(hws->pdus);
  359. req_wr->r10 = 0;
  360. data_type = tls_content_type(ULP_SKB_CB(skb)->ulp.tls.type);
  361. req_cpl->op_to_seg_len = htonl(CPL_TX_TLS_SFO_OPCODE_V(CPL_TX_TLS_SFO) |
  362. CPL_TX_TLS_SFO_DATA_TYPE_V(data_type) |
  363. CPL_TX_TLS_SFO_CPL_LEN_V(2) |
  364. CPL_TX_TLS_SFO_SEG_LEN_V(dlen));
  365. req_cpl->pld_len = htonl(len - expn);
  366. req_cpl->type_protover = htonl(CPL_TX_TLS_SFO_TYPE_V
  367. ((data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT) ?
  368. TLS_HDR_TYPE_HEARTBEAT : 0) |
  369. CPL_TX_TLS_SFO_PROTOVER_V(0));
  370. /* create the s-command */
  371. req_cpl->r1_lo = 0;
  372. req_cpl->seqno_numivs = cpu_to_be32(hws->scmd.seqno_numivs);
  373. req_cpl->ivgen_hdrlen = cpu_to_be32(hws->scmd.ivgen_hdrlen);
  374. req_cpl->scmd1 = cpu_to_be64(tlstx_incr_seqnum(hws));
  375. }
  376. /*
  377. * Calculate the TLS data expansion size
  378. */
  379. static int chtls_expansion_size(struct sock *sk, int data_len,
  380. int fullpdu,
  381. unsigned short *pducnt)
  382. {
  383. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  384. struct chtls_hws *hws = &csk->tlshws;
  385. struct tls_scmd *scmd = &hws->scmd;
  386. int fragsize = hws->mfs;
  387. int expnsize = 0;
  388. int fragleft;
  389. int fragcnt;
  390. int expppdu;
  391. if (SCMD_CIPH_MODE_G(scmd->seqno_numivs) ==
  392. SCMD_CIPH_MODE_AES_GCM) {
  393. expppdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
  394. TLS_HEADER_LENGTH;
  395. if (fullpdu) {
  396. *pducnt = data_len / (expppdu + fragsize);
  397. if (*pducnt > 32)
  398. *pducnt = 32;
  399. else if (!*pducnt)
  400. *pducnt = 1;
  401. expnsize = (*pducnt) * expppdu;
  402. return expnsize;
  403. }
  404. fragcnt = (data_len / fragsize);
  405. expnsize = fragcnt * expppdu;
  406. fragleft = data_len % fragsize;
  407. if (fragleft > 0)
  408. expnsize += expppdu;
  409. }
  410. return expnsize;
  411. }
  412. /* WR with IV, KEY and CPL SFO added */
  413. static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb,
  414. int tls_tx_imm, int tls_len, u32 credits)
  415. {
  416. unsigned short pdus_per_ulp = 0;
  417. struct chtls_sock *csk;
  418. struct chtls_hws *hws;
  419. int expn_sz;
  420. int pdus;
  421. csk = rcu_dereference_sk_user_data(sk);
  422. hws = &csk->tlshws;
  423. pdus = DIV_ROUND_UP(tls_len, hws->mfs);
  424. expn_sz = chtls_expansion_size(sk, tls_len, 0, NULL);
  425. if (!hws->compute) {
  426. hws->expansion = chtls_expansion_size(sk,
  427. hws->fcplenmax,
  428. 1, &pdus_per_ulp);
  429. hws->pdus = pdus_per_ulp;
  430. hws->adjustlen = hws->pdus *
  431. ((hws->expansion / hws->pdus) + hws->mfs);
  432. hws->compute = 1;
  433. }
  434. if (tls_copy_ivs(sk, skb))
  435. return;
  436. tls_copy_tx_key(sk, skb);
  437. tls_tx_data_wr(sk, skb, tls_len, tls_tx_imm, credits, expn_sz, pdus);
  438. hws->tx_seq_no += (pdus - 1);
  439. }
  440. static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
  441. unsigned int immdlen, int len,
  442. u32 credits, u32 compl)
  443. {
  444. struct fw_ofld_tx_data_wr *req;
  445. unsigned int wr_ulp_mode_force;
  446. struct chtls_sock *csk;
  447. unsigned int opcode;
  448. csk = rcu_dereference_sk_user_data(sk);
  449. opcode = FW_OFLD_TX_DATA_WR;
  450. req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
  451. req->op_to_immdlen = htonl(WR_OP_V(opcode) |
  452. FW_WR_COMPL_V(compl) |
  453. FW_WR_IMMDLEN_V(immdlen));
  454. req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
  455. FW_WR_LEN16_V(credits));
  456. wr_ulp_mode_force = TX_ULP_MODE_V(csk->ulp_mode);
  457. if (is_sg_request(skb))
  458. wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
  459. ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
  460. FW_OFLD_TX_DATA_WR_SHOVE_F);
  461. req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
  462. FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
  463. FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag
  464. (sk, CSK_TX_MORE_DATA)) &&
  465. skb_queue_empty(&csk->txq)));
  466. req->plen = htonl(len);
  467. }
  468. static int chtls_wr_size(struct chtls_sock *csk, const struct sk_buff *skb,
  469. bool size)
  470. {
  471. int wr_size;
  472. wr_size = TLS_WR_CPL_LEN;
  473. wr_size += KEY_ON_MEM_SZ;
  474. wr_size += ivs_size(csk->sk, skb);
  475. if (size)
  476. return wr_size;
  477. /* frags counted for IV dsgl */
  478. if (!skb_ulp_tls_iv_imm(skb))
  479. skb_shinfo(skb)->nr_frags++;
  480. return wr_size;
  481. }
  482. static bool is_ofld_imm(struct chtls_sock *csk, const struct sk_buff *skb)
  483. {
  484. int length = skb->len;
  485. if (skb->peeked || skb->len > MAX_IMM_ULPTX_WR_LEN)
  486. return false;
  487. if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
  488. /* Check TLS header len for Immediate */
  489. if (csk->ulp_mode == ULP_MODE_TLS &&
  490. skb_ulp_tls_inline(skb))
  491. length += chtls_wr_size(csk, skb, true);
  492. else
  493. length += sizeof(struct fw_ofld_tx_data_wr);
  494. return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
  495. }
  496. return true;
  497. }
  498. static unsigned int calc_tx_flits(const struct sk_buff *skb,
  499. unsigned int immdlen)
  500. {
  501. unsigned int flits, cnt;
  502. flits = immdlen / 8; /* headers */
  503. cnt = skb_shinfo(skb)->nr_frags;
  504. if (skb_tail_pointer(skb) != skb_transport_header(skb))
  505. cnt++;
  506. return flits + sgl_len(cnt);
  507. }
  508. static void arp_failure_discard(void *handle, struct sk_buff *skb)
  509. {
  510. kfree_skb(skb);
  511. }
  512. int chtls_push_frames(struct chtls_sock *csk, int comp)
  513. {
  514. struct chtls_hws *hws = &csk->tlshws;
  515. struct tcp_sock *tp;
  516. struct sk_buff *skb;
  517. int total_size = 0;
  518. struct sock *sk;
  519. int wr_size;
  520. wr_size = sizeof(struct fw_ofld_tx_data_wr);
  521. sk = csk->sk;
  522. tp = tcp_sk(sk);
  523. if (unlikely(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE)))
  524. return 0;
  525. if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
  526. return 0;
  527. while (csk->wr_credits && (skb = skb_peek(&csk->txq)) &&
  528. (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_HOLD) ||
  529. skb_queue_len(&csk->txq) > 1)) {
  530. unsigned int credit_len = skb->len;
  531. unsigned int credits_needed;
  532. unsigned int completion = 0;
  533. int tls_len = skb->len;/* TLS data len before IV/key */
  534. unsigned int immdlen;
  535. int len = skb->len; /* length [ulp bytes] inserted by hw */
  536. int flowclen16 = 0;
  537. int tls_tx_imm = 0;
  538. immdlen = skb->len;
  539. if (!is_ofld_imm(csk, skb)) {
  540. immdlen = skb_transport_offset(skb);
  541. if (skb_ulp_tls_inline(skb))
  542. wr_size = chtls_wr_size(csk, skb, false);
  543. credit_len = 8 * calc_tx_flits(skb, immdlen);
  544. } else {
  545. if (skb_ulp_tls_inline(skb)) {
  546. wr_size = chtls_wr_size(csk, skb, false);
  547. tls_tx_imm = 1;
  548. }
  549. }
  550. if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR))
  551. credit_len += wr_size;
  552. credits_needed = DIV_ROUND_UP(credit_len, 16);
  553. if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
  554. flowclen16 = send_tx_flowc_wr(sk, 1, tp->snd_nxt,
  555. tp->rcv_nxt);
  556. if (flowclen16 <= 0)
  557. break;
  558. csk->wr_credits -= flowclen16;
  559. csk->wr_unacked += flowclen16;
  560. csk->wr_nondata += flowclen16;
  561. csk_set_flag(csk, CSK_TX_DATA_SENT);
  562. }
  563. if (csk->wr_credits < credits_needed) {
  564. if (skb_ulp_tls_inline(skb) &&
  565. !skb_ulp_tls_iv_imm(skb))
  566. skb_shinfo(skb)->nr_frags--;
  567. break;
  568. }
  569. __skb_unlink(skb, &csk->txq);
  570. skb_set_queue_mapping(skb, (csk->txq_idx << 1) |
  571. CPL_PRIORITY_DATA);
  572. if (hws->ofld)
  573. hws->txqid = (skb->queue_mapping >> 1);
  574. skb->csum = (__force __wsum)(credits_needed + csk->wr_nondata);
  575. csk->wr_credits -= credits_needed;
  576. csk->wr_unacked += credits_needed;
  577. csk->wr_nondata = 0;
  578. enqueue_wr(csk, skb);
  579. if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
  580. if ((comp && csk->wr_unacked == credits_needed) ||
  581. (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) ||
  582. csk->wr_unacked >= csk->wr_max_credits / 2) {
  583. completion = 1;
  584. csk->wr_unacked = 0;
  585. }
  586. if (skb_ulp_tls_inline(skb))
  587. make_tlstx_data_wr(sk, skb, tls_tx_imm,
  588. tls_len, credits_needed);
  589. else
  590. make_tx_data_wr(sk, skb, immdlen, len,
  591. credits_needed, completion);
  592. tp->snd_nxt += len;
  593. tp->lsndtime = tcp_time_stamp(tp);
  594. if (completion)
  595. ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
  596. } else {
  597. struct cpl_close_con_req *req = cplhdr(skb);
  598. unsigned int cmd = CPL_OPCODE_G(ntohl
  599. (OPCODE_TID(req)));
  600. if (cmd == CPL_CLOSE_CON_REQ)
  601. csk_set_flag(csk,
  602. CSK_CLOSE_CON_REQUESTED);
  603. if ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) &&
  604. (csk->wr_unacked >= csk->wr_max_credits / 2)) {
  605. req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
  606. csk->wr_unacked = 0;
  607. }
  608. }
  609. total_size += skb->truesize;
  610. if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_BARRIER)
  611. csk_set_flag(csk, CSK_TX_WAIT_IDLE);
  612. t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
  613. cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
  614. }
  615. sk->sk_wmem_queued -= total_size;
  616. return total_size;
  617. }
  618. static void mark_urg(struct tcp_sock *tp, int flags,
  619. struct sk_buff *skb)
  620. {
  621. if (unlikely(flags & MSG_OOB)) {
  622. tp->snd_up = tp->write_seq;
  623. ULP_SKB_CB(skb)->flags = ULPCB_FLAG_URG |
  624. ULPCB_FLAG_BARRIER |
  625. ULPCB_FLAG_NO_APPEND |
  626. ULPCB_FLAG_NEED_HDR;
  627. }
  628. }
  629. /*
  630. * Returns true if a connection should send more data to TCP engine
  631. */
  632. static bool should_push(struct sock *sk)
  633. {
  634. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  635. struct chtls_dev *cdev = csk->cdev;
  636. struct tcp_sock *tp = tcp_sk(sk);
  637. /*
  638. * If we've released our offload resources there's nothing to do ...
  639. */
  640. if (!cdev)
  641. return false;
  642. /*
  643. * If there aren't any work requests in flight, or there isn't enough
  644. * data in flight, or Nagle is off then send the current TX_DATA
  645. * otherwise hold it and wait to accumulate more data.
  646. */
  647. return csk->wr_credits == csk->wr_max_credits ||
  648. (tp->nonagle & TCP_NAGLE_OFF);
  649. }
  650. /*
  651. * Returns true if a TCP socket is corked.
  652. */
  653. static bool corked(const struct tcp_sock *tp, int flags)
  654. {
  655. return (flags & MSG_MORE) || (tp->nonagle & TCP_NAGLE_CORK);
  656. }
  657. /*
  658. * Returns true if a send should try to push new data.
  659. */
  660. static bool send_should_push(struct sock *sk, int flags)
  661. {
  662. return should_push(sk) && !corked(tcp_sk(sk), flags);
  663. }
  664. void chtls_tcp_push(struct sock *sk, int flags)
  665. {
  666. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  667. int qlen = skb_queue_len(&csk->txq);
  668. if (likely(qlen)) {
  669. struct sk_buff *skb = skb_peek_tail(&csk->txq);
  670. struct tcp_sock *tp = tcp_sk(sk);
  671. mark_urg(tp, flags, skb);
  672. if (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) &&
  673. corked(tp, flags)) {
  674. ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_HOLD;
  675. return;
  676. }
  677. ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_HOLD;
  678. if (qlen == 1 &&
  679. ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
  680. should_push(sk)))
  681. chtls_push_frames(csk, 1);
  682. }
  683. }
  684. /*
  685. * Calculate the size for a new send sk_buff. It's maximum size so we can
  686. * pack lots of data into it, unless we plan to send it immediately, in which
  687. * case we size it more tightly.
  688. *
  689. * Note: we don't bother compensating for MSS < PAGE_SIZE because it doesn't
  690. * arise in normal cases and when it does we are just wasting memory.
  691. */
  692. static int select_size(struct sock *sk, int io_len, int flags, int len)
  693. {
  694. const int pgbreak = SKB_MAX_HEAD(len);
  695. /*
  696. * If the data wouldn't fit in the main body anyway, put only the
  697. * header in the main body so it can use immediate data and place all
  698. * the payload in page fragments.
  699. */
  700. if (io_len > pgbreak)
  701. return 0;
  702. /*
  703. * If we will be accumulating payload get a large main body.
  704. */
  705. if (!send_should_push(sk, flags))
  706. return pgbreak;
  707. return io_len;
  708. }
  709. void skb_entail(struct sock *sk, struct sk_buff *skb, int flags)
  710. {
  711. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  712. struct tcp_sock *tp = tcp_sk(sk);
  713. ULP_SKB_CB(skb)->seq = tp->write_seq;
  714. ULP_SKB_CB(skb)->flags = flags;
  715. __skb_queue_tail(&csk->txq, skb);
  716. sk->sk_wmem_queued += skb->truesize;
  717. if (TCP_PAGE(sk) && TCP_OFF(sk)) {
  718. put_page(TCP_PAGE(sk));
  719. TCP_PAGE(sk) = NULL;
  720. TCP_OFF(sk) = 0;
  721. }
  722. }
  723. static struct sk_buff *get_tx_skb(struct sock *sk, int size)
  724. {
  725. struct sk_buff *skb;
  726. skb = alloc_skb(size + TX_HEADER_LEN, sk->sk_allocation);
  727. if (likely(skb)) {
  728. skb_reserve(skb, TX_HEADER_LEN);
  729. skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
  730. skb_reset_transport_header(skb);
  731. }
  732. return skb;
  733. }
  734. static struct sk_buff *get_record_skb(struct sock *sk, int size, bool zcopy)
  735. {
  736. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  737. struct sk_buff *skb;
  738. skb = alloc_skb(((zcopy ? 0 : size) + TX_TLSHDR_LEN +
  739. KEY_ON_MEM_SZ + max_ivs_size(sk, size)),
  740. sk->sk_allocation);
  741. if (likely(skb)) {
  742. skb_reserve(skb, (TX_TLSHDR_LEN +
  743. KEY_ON_MEM_SZ + max_ivs_size(sk, size)));
  744. skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
  745. skb_reset_transport_header(skb);
  746. ULP_SKB_CB(skb)->ulp.tls.ofld = 1;
  747. ULP_SKB_CB(skb)->ulp.tls.type = csk->tlshws.type;
  748. }
  749. return skb;
  750. }
  751. static void tx_skb_finalize(struct sk_buff *skb)
  752. {
  753. struct ulp_skb_cb *cb = ULP_SKB_CB(skb);
  754. if (!(cb->flags & ULPCB_FLAG_NO_HDR))
  755. cb->flags = ULPCB_FLAG_NEED_HDR;
  756. cb->flags |= ULPCB_FLAG_NO_APPEND;
  757. }
  758. static void push_frames_if_head(struct sock *sk)
  759. {
  760. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  761. if (skb_queue_len(&csk->txq) == 1)
  762. chtls_push_frames(csk, 1);
  763. }
  764. static int chtls_skb_copy_to_page_nocache(struct sock *sk,
  765. struct iov_iter *from,
  766. struct sk_buff *skb,
  767. struct page *page,
  768. int off, int copy)
  769. {
  770. int err;
  771. err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) +
  772. off, copy, skb->len);
  773. if (err)
  774. return err;
  775. skb->len += copy;
  776. skb->data_len += copy;
  777. skb->truesize += copy;
  778. sk->sk_wmem_queued += copy;
  779. return 0;
  780. }
  781. /* Read TLS header to find content type and data length */
  782. static u16 tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
  783. {
  784. if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr))
  785. return -EFAULT;
  786. return (__force u16)cpu_to_be16(thdr->length);
  787. }
  788. int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  789. {
  790. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  791. struct chtls_dev *cdev = csk->cdev;
  792. struct tcp_sock *tp = tcp_sk(sk);
  793. struct sk_buff *skb;
  794. int mss, flags, err;
  795. int recordsz = 0;
  796. int copied = 0;
  797. int hdrlen = 0;
  798. long timeo;
  799. lock_sock(sk);
  800. flags = msg->msg_flags;
  801. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  802. if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
  803. err = sk_stream_wait_connect(sk, &timeo);
  804. if (err)
  805. goto out_err;
  806. }
  807. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  808. err = -EPIPE;
  809. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  810. goto out_err;
  811. mss = csk->mss;
  812. csk_set_flag(csk, CSK_TX_MORE_DATA);
  813. while (msg_data_left(msg)) {
  814. int copy = 0;
  815. skb = skb_peek_tail(&csk->txq);
  816. if (skb) {
  817. copy = mss - skb->len;
  818. skb->ip_summed = CHECKSUM_UNNECESSARY;
  819. }
  820. if (is_tls_tx(csk) && !csk->tlshws.txleft) {
  821. struct tls_hdr hdr;
  822. recordsz = tls_header_read(&hdr, &msg->msg_iter);
  823. size -= TLS_HEADER_LENGTH;
  824. hdrlen += TLS_HEADER_LENGTH;
  825. csk->tlshws.txleft = recordsz;
  826. csk->tlshws.type = hdr.type;
  827. if (skb)
  828. ULP_SKB_CB(skb)->ulp.tls.type = hdr.type;
  829. }
  830. if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
  831. copy <= 0) {
  832. new_buf:
  833. if (skb) {
  834. tx_skb_finalize(skb);
  835. push_frames_if_head(sk);
  836. }
  837. if (is_tls_tx(csk)) {
  838. skb = get_record_skb(sk,
  839. select_size(sk,
  840. recordsz,
  841. flags,
  842. TX_TLSHDR_LEN),
  843. false);
  844. } else {
  845. skb = get_tx_skb(sk,
  846. select_size(sk, size, flags,
  847. TX_HEADER_LEN));
  848. }
  849. if (unlikely(!skb))
  850. goto wait_for_memory;
  851. skb->ip_summed = CHECKSUM_UNNECESSARY;
  852. copy = mss;
  853. }
  854. if (copy > size)
  855. copy = size;
  856. if (skb_tailroom(skb) > 0) {
  857. copy = min(copy, skb_tailroom(skb));
  858. if (is_tls_tx(csk))
  859. copy = min_t(int, copy, csk->tlshws.txleft);
  860. err = skb_add_data_nocache(sk, skb,
  861. &msg->msg_iter, copy);
  862. if (err)
  863. goto do_fault;
  864. } else {
  865. int i = skb_shinfo(skb)->nr_frags;
  866. struct page *page = TCP_PAGE(sk);
  867. int pg_size = PAGE_SIZE;
  868. int off = TCP_OFF(sk);
  869. bool merge;
  870. if (page)
  871. pg_size <<= compound_order(page);
  872. if (off < pg_size &&
  873. skb_can_coalesce(skb, i, page, off)) {
  874. merge = 1;
  875. goto copy;
  876. }
  877. merge = 0;
  878. if (i == (is_tls_tx(csk) ? (MAX_SKB_FRAGS - 1) :
  879. MAX_SKB_FRAGS))
  880. goto new_buf;
  881. if (page && off == pg_size) {
  882. put_page(page);
  883. TCP_PAGE(sk) = page = NULL;
  884. pg_size = PAGE_SIZE;
  885. }
  886. if (!page) {
  887. gfp_t gfp = sk->sk_allocation;
  888. int order = cdev->send_page_order;
  889. if (order) {
  890. page = alloc_pages(gfp | __GFP_COMP |
  891. __GFP_NOWARN |
  892. __GFP_NORETRY,
  893. order);
  894. if (page)
  895. pg_size <<=
  896. compound_order(page);
  897. }
  898. if (!page) {
  899. page = alloc_page(gfp);
  900. pg_size = PAGE_SIZE;
  901. }
  902. if (!page)
  903. goto wait_for_memory;
  904. off = 0;
  905. }
  906. copy:
  907. if (copy > pg_size - off)
  908. copy = pg_size - off;
  909. if (is_tls_tx(csk))
  910. copy = min_t(int, copy, csk->tlshws.txleft);
  911. err = chtls_skb_copy_to_page_nocache(sk, &msg->msg_iter,
  912. skb, page,
  913. off, copy);
  914. if (unlikely(err)) {
  915. if (!TCP_PAGE(sk)) {
  916. TCP_PAGE(sk) = page;
  917. TCP_OFF(sk) = 0;
  918. }
  919. goto do_fault;
  920. }
  921. /* Update the skb. */
  922. if (merge) {
  923. skb_shinfo(skb)->frags[i - 1].size += copy;
  924. } else {
  925. skb_fill_page_desc(skb, i, page, off, copy);
  926. if (off + copy < pg_size) {
  927. /* space left keep page */
  928. get_page(page);
  929. TCP_PAGE(sk) = page;
  930. } else {
  931. TCP_PAGE(sk) = NULL;
  932. }
  933. }
  934. TCP_OFF(sk) = off + copy;
  935. }
  936. if (unlikely(skb->len == mss))
  937. tx_skb_finalize(skb);
  938. tp->write_seq += copy;
  939. copied += copy;
  940. size -= copy;
  941. if (is_tls_tx(csk))
  942. csk->tlshws.txleft -= copy;
  943. if (corked(tp, flags) &&
  944. (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
  945. ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
  946. if (size == 0)
  947. goto out;
  948. if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)
  949. push_frames_if_head(sk);
  950. continue;
  951. wait_for_memory:
  952. err = sk_stream_wait_memory(sk, &timeo);
  953. if (err)
  954. goto do_error;
  955. }
  956. out:
  957. csk_reset_flag(csk, CSK_TX_MORE_DATA);
  958. if (copied)
  959. chtls_tcp_push(sk, flags);
  960. done:
  961. release_sock(sk);
  962. return copied + hdrlen;
  963. do_fault:
  964. if (!skb->len) {
  965. __skb_unlink(skb, &csk->txq);
  966. sk->sk_wmem_queued -= skb->truesize;
  967. __kfree_skb(skb);
  968. }
  969. do_error:
  970. if (copied)
  971. goto out;
  972. out_err:
  973. if (csk_conn_inline(csk))
  974. csk_reset_flag(csk, CSK_TX_MORE_DATA);
  975. copied = sk_stream_error(sk, flags, err);
  976. goto done;
  977. }
  978. int chtls_sendpage(struct sock *sk, struct page *page,
  979. int offset, size_t size, int flags)
  980. {
  981. struct chtls_sock *csk;
  982. int mss, err, copied;
  983. struct tcp_sock *tp;
  984. long timeo;
  985. tp = tcp_sk(sk);
  986. copied = 0;
  987. csk = rcu_dereference_sk_user_data(sk);
  988. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  989. err = sk_stream_wait_connect(sk, &timeo);
  990. if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
  991. err != 0)
  992. goto out_err;
  993. mss = csk->mss;
  994. csk_set_flag(csk, CSK_TX_MORE_DATA);
  995. while (size > 0) {
  996. struct sk_buff *skb = skb_peek_tail(&csk->txq);
  997. int copy, i;
  998. copy = mss - skb->len;
  999. if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
  1000. copy <= 0) {
  1001. new_buf:
  1002. if (is_tls_tx(csk)) {
  1003. skb = get_record_skb(sk,
  1004. select_size(sk, size,
  1005. flags,
  1006. TX_TLSHDR_LEN),
  1007. true);
  1008. } else {
  1009. skb = get_tx_skb(sk, 0);
  1010. }
  1011. if (!skb)
  1012. goto do_error;
  1013. copy = mss;
  1014. }
  1015. if (copy > size)
  1016. copy = size;
  1017. i = skb_shinfo(skb)->nr_frags;
  1018. if (skb_can_coalesce(skb, i, page, offset)) {
  1019. skb_shinfo(skb)->frags[i - 1].size += copy;
  1020. } else if (i < MAX_SKB_FRAGS) {
  1021. get_page(page);
  1022. skb_fill_page_desc(skb, i, page, offset, copy);
  1023. } else {
  1024. tx_skb_finalize(skb);
  1025. push_frames_if_head(sk);
  1026. goto new_buf;
  1027. }
  1028. skb->len += copy;
  1029. if (skb->len == mss)
  1030. tx_skb_finalize(skb);
  1031. skb->data_len += copy;
  1032. skb->truesize += copy;
  1033. sk->sk_wmem_queued += copy;
  1034. tp->write_seq += copy;
  1035. copied += copy;
  1036. offset += copy;
  1037. size -= copy;
  1038. if (corked(tp, flags) &&
  1039. (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
  1040. ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
  1041. if (!size)
  1042. break;
  1043. if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
  1044. push_frames_if_head(sk);
  1045. continue;
  1046. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1047. }
  1048. out:
  1049. csk_reset_flag(csk, CSK_TX_MORE_DATA);
  1050. if (copied)
  1051. chtls_tcp_push(sk, flags);
  1052. done:
  1053. release_sock(sk);
  1054. return copied;
  1055. do_error:
  1056. if (copied)
  1057. goto out;
  1058. out_err:
  1059. if (csk_conn_inline(csk))
  1060. csk_reset_flag(csk, CSK_TX_MORE_DATA);
  1061. copied = sk_stream_error(sk, flags, err);
  1062. goto done;
  1063. }
  1064. static void chtls_select_window(struct sock *sk)
  1065. {
  1066. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  1067. struct tcp_sock *tp = tcp_sk(sk);
  1068. unsigned int wnd = tp->rcv_wnd;
  1069. wnd = max_t(unsigned int, wnd, tcp_full_space(sk));
  1070. wnd = max_t(unsigned int, MIN_RCV_WND, wnd);
  1071. if (wnd > MAX_RCV_WND)
  1072. wnd = MAX_RCV_WND;
  1073. /*
  1074. * Check if we need to grow the receive window in response to an increase in
  1075. * the socket's receive buffer size. Some applications increase the buffer
  1076. * size dynamically and rely on the window to grow accordingly.
  1077. */
  1078. if (wnd > tp->rcv_wnd) {
  1079. tp->rcv_wup -= wnd - tp->rcv_wnd;
  1080. tp->rcv_wnd = wnd;
  1081. /* Mark the receive window as updated */
  1082. csk_reset_flag(csk, CSK_UPDATE_RCV_WND);
  1083. }
  1084. }
  1085. /*
  1086. * Send RX credits through an RX_DATA_ACK CPL message. We are permitted
  1087. * to return without sending the message in case we cannot allocate
  1088. * an sk_buff. Returns the number of credits sent.
  1089. */
  1090. static u32 send_rx_credits(struct chtls_sock *csk, u32 credits)
  1091. {
  1092. struct cpl_rx_data_ack *req;
  1093. struct sk_buff *skb;
  1094. skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
  1095. if (!skb)
  1096. return 0;
  1097. __skb_put(skb, sizeof(*req));
  1098. req = (struct cpl_rx_data_ack *)skb->head;
  1099. set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
  1100. INIT_TP_WR(req, csk->tid);
  1101. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
  1102. csk->tid));
  1103. req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) |
  1104. RX_FORCE_ACK_F);
  1105. cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
  1106. return credits;
  1107. }
  1108. #define CREDIT_RETURN_STATE (TCPF_ESTABLISHED | \
  1109. TCPF_FIN_WAIT1 | \
  1110. TCPF_FIN_WAIT2)
  1111. /*
  1112. * Called after some received data has been read. It returns RX credits
  1113. * to the HW for the amount of data processed.
  1114. */
  1115. static void chtls_cleanup_rbuf(struct sock *sk, int copied)
  1116. {
  1117. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  1118. struct tcp_sock *tp;
  1119. int must_send;
  1120. u32 credits;
  1121. u32 thres;
  1122. thres = 15 * 1024;
  1123. if (!sk_in_state(sk, CREDIT_RETURN_STATE))
  1124. return;
  1125. chtls_select_window(sk);
  1126. tp = tcp_sk(sk);
  1127. credits = tp->copied_seq - tp->rcv_wup;
  1128. if (unlikely(!credits))
  1129. return;
  1130. /*
  1131. * For coalescing to work effectively ensure the receive window has
  1132. * at least 16KB left.
  1133. */
  1134. must_send = credits + 16384 >= tp->rcv_wnd;
  1135. if (must_send || credits >= thres)
  1136. tp->rcv_wup += send_rx_credits(csk, credits);
  1137. }
  1138. static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
  1139. int nonblock, int flags, int *addr_len)
  1140. {
  1141. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  1142. struct net_device *dev = csk->egress_dev;
  1143. struct chtls_hws *hws = &csk->tlshws;
  1144. struct tcp_sock *tp = tcp_sk(sk);
  1145. struct adapter *adap;
  1146. unsigned long avail;
  1147. int buffers_freed;
  1148. int copied = 0;
  1149. int request;
  1150. int target;
  1151. long timeo;
  1152. adap = netdev2adap(dev);
  1153. buffers_freed = 0;
  1154. timeo = sock_rcvtimeo(sk, nonblock);
  1155. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  1156. request = len;
  1157. if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
  1158. chtls_cleanup_rbuf(sk, copied);
  1159. do {
  1160. struct sk_buff *skb;
  1161. u32 offset = 0;
  1162. if (unlikely(tp->urg_data &&
  1163. tp->urg_seq == tp->copied_seq)) {
  1164. if (copied)
  1165. break;
  1166. if (signal_pending(current)) {
  1167. copied = timeo ? sock_intr_errno(timeo) :
  1168. -EAGAIN;
  1169. break;
  1170. }
  1171. }
  1172. skb = skb_peek(&sk->sk_receive_queue);
  1173. if (skb)
  1174. goto found_ok_skb;
  1175. if (csk->wr_credits &&
  1176. skb_queue_len(&csk->txq) &&
  1177. chtls_push_frames(csk, csk->wr_credits ==
  1178. csk->wr_max_credits))
  1179. sk->sk_write_space(sk);
  1180. if (copied >= target && !sk->sk_backlog.tail)
  1181. break;
  1182. if (copied) {
  1183. if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
  1184. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  1185. signal_pending(current))
  1186. break;
  1187. if (!timeo)
  1188. break;
  1189. } else {
  1190. if (sock_flag(sk, SOCK_DONE))
  1191. break;
  1192. if (sk->sk_err) {
  1193. copied = sock_error(sk);
  1194. break;
  1195. }
  1196. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1197. break;
  1198. if (sk->sk_state == TCP_CLOSE) {
  1199. copied = -ENOTCONN;
  1200. break;
  1201. }
  1202. if (!timeo) {
  1203. copied = -EAGAIN;
  1204. break;
  1205. }
  1206. if (signal_pending(current)) {
  1207. copied = sock_intr_errno(timeo);
  1208. break;
  1209. }
  1210. }
  1211. if (sk->sk_backlog.tail) {
  1212. release_sock(sk);
  1213. lock_sock(sk);
  1214. chtls_cleanup_rbuf(sk, copied);
  1215. continue;
  1216. }
  1217. if (copied >= target)
  1218. break;
  1219. chtls_cleanup_rbuf(sk, copied);
  1220. sk_wait_data(sk, &timeo, NULL);
  1221. continue;
  1222. found_ok_skb:
  1223. if (!skb->len) {
  1224. skb_dst_set(skb, NULL);
  1225. __skb_unlink(skb, &sk->sk_receive_queue);
  1226. kfree_skb(skb);
  1227. if (!copied && !timeo) {
  1228. copied = -EAGAIN;
  1229. break;
  1230. }
  1231. if (copied < target) {
  1232. release_sock(sk);
  1233. lock_sock(sk);
  1234. continue;
  1235. }
  1236. break;
  1237. }
  1238. offset = hws->copied_seq;
  1239. avail = skb->len - offset;
  1240. if (len < avail)
  1241. avail = len;
  1242. if (unlikely(tp->urg_data)) {
  1243. u32 urg_offset = tp->urg_seq - tp->copied_seq;
  1244. if (urg_offset < avail) {
  1245. if (urg_offset) {
  1246. avail = urg_offset;
  1247. } else if (!sock_flag(sk, SOCK_URGINLINE)) {
  1248. /* First byte is urgent, skip */
  1249. tp->copied_seq++;
  1250. offset++;
  1251. avail--;
  1252. if (!avail)
  1253. goto skip_copy;
  1254. }
  1255. }
  1256. }
  1257. if (hws->rstate == TLS_RCV_ST_READ_BODY) {
  1258. if (skb_copy_datagram_msg(skb, offset,
  1259. msg, avail)) {
  1260. if (!copied) {
  1261. copied = -EFAULT;
  1262. break;
  1263. }
  1264. }
  1265. } else {
  1266. struct tlsrx_cmp_hdr *tls_hdr_pkt =
  1267. (struct tlsrx_cmp_hdr *)skb->data;
  1268. if ((tls_hdr_pkt->res_to_mac_error &
  1269. TLSRX_HDR_PKT_ERROR_M))
  1270. tls_hdr_pkt->type = 0x7F;
  1271. /* CMP pld len is for recv seq */
  1272. hws->rcvpld = skb->hdr_len;
  1273. if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
  1274. if (!copied) {
  1275. copied = -EFAULT;
  1276. break;
  1277. }
  1278. }
  1279. }
  1280. copied += avail;
  1281. len -= avail;
  1282. hws->copied_seq += avail;
  1283. skip_copy:
  1284. if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
  1285. tp->urg_data = 0;
  1286. if (hws->rstate == TLS_RCV_ST_READ_BODY &&
  1287. (avail + offset) >= skb->len) {
  1288. if (likely(skb))
  1289. chtls_free_skb(sk, skb);
  1290. buffers_freed++;
  1291. hws->rstate = TLS_RCV_ST_READ_HEADER;
  1292. atomic_inc(&adap->chcr_stats.tls_pdu_rx);
  1293. tp->copied_seq += hws->rcvpld;
  1294. hws->copied_seq = 0;
  1295. if (copied >= target &&
  1296. !skb_peek(&sk->sk_receive_queue))
  1297. break;
  1298. } else {
  1299. if (likely(skb)) {
  1300. if (ULP_SKB_CB(skb)->flags &
  1301. ULPCB_FLAG_TLS_ND)
  1302. hws->rstate =
  1303. TLS_RCV_ST_READ_HEADER;
  1304. else
  1305. hws->rstate =
  1306. TLS_RCV_ST_READ_BODY;
  1307. chtls_free_skb(sk, skb);
  1308. }
  1309. buffers_freed++;
  1310. tp->copied_seq += avail;
  1311. hws->copied_seq = 0;
  1312. }
  1313. } while (len > 0);
  1314. if (buffers_freed)
  1315. chtls_cleanup_rbuf(sk, copied);
  1316. release_sock(sk);
  1317. return copied;
  1318. }
  1319. /*
  1320. * Peek at data in a socket's receive buffer.
  1321. */
  1322. static int peekmsg(struct sock *sk, struct msghdr *msg,
  1323. size_t len, int nonblock, int flags)
  1324. {
  1325. struct tcp_sock *tp = tcp_sk(sk);
  1326. u32 peek_seq, offset;
  1327. struct sk_buff *skb;
  1328. int copied = 0;
  1329. size_t avail; /* amount of available data in current skb */
  1330. long timeo;
  1331. lock_sock(sk);
  1332. timeo = sock_rcvtimeo(sk, nonblock);
  1333. peek_seq = tp->copied_seq;
  1334. do {
  1335. if (unlikely(tp->urg_data && tp->urg_seq == peek_seq)) {
  1336. if (copied)
  1337. break;
  1338. if (signal_pending(current)) {
  1339. copied = timeo ? sock_intr_errno(timeo) :
  1340. -EAGAIN;
  1341. break;
  1342. }
  1343. }
  1344. skb_queue_walk(&sk->sk_receive_queue, skb) {
  1345. offset = peek_seq - ULP_SKB_CB(skb)->seq;
  1346. if (offset < skb->len)
  1347. goto found_ok_skb;
  1348. }
  1349. /* empty receive queue */
  1350. if (copied)
  1351. break;
  1352. if (sock_flag(sk, SOCK_DONE))
  1353. break;
  1354. if (sk->sk_err) {
  1355. copied = sock_error(sk);
  1356. break;
  1357. }
  1358. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1359. break;
  1360. if (sk->sk_state == TCP_CLOSE) {
  1361. copied = -ENOTCONN;
  1362. break;
  1363. }
  1364. if (!timeo) {
  1365. copied = -EAGAIN;
  1366. break;
  1367. }
  1368. if (signal_pending(current)) {
  1369. copied = sock_intr_errno(timeo);
  1370. break;
  1371. }
  1372. if (sk->sk_backlog.tail) {
  1373. /* Do not sleep, just process backlog. */
  1374. release_sock(sk);
  1375. lock_sock(sk);
  1376. } else {
  1377. sk_wait_data(sk, &timeo, NULL);
  1378. }
  1379. if (unlikely(peek_seq != tp->copied_seq)) {
  1380. if (net_ratelimit())
  1381. pr_info("TCP(%s:%d), race in MSG_PEEK.\n",
  1382. current->comm, current->pid);
  1383. peek_seq = tp->copied_seq;
  1384. }
  1385. continue;
  1386. found_ok_skb:
  1387. avail = skb->len - offset;
  1388. if (len < avail)
  1389. avail = len;
  1390. /*
  1391. * Do we have urgent data here? We need to skip over the
  1392. * urgent byte.
  1393. */
  1394. if (unlikely(tp->urg_data)) {
  1395. u32 urg_offset = tp->urg_seq - peek_seq;
  1396. if (urg_offset < avail) {
  1397. /*
  1398. * The amount of data we are preparing to copy
  1399. * contains urgent data.
  1400. */
  1401. if (!urg_offset) { /* First byte is urgent */
  1402. if (!sock_flag(sk, SOCK_URGINLINE)) {
  1403. peek_seq++;
  1404. offset++;
  1405. avail--;
  1406. }
  1407. if (!avail)
  1408. continue;
  1409. } else {
  1410. /* stop short of the urgent data */
  1411. avail = urg_offset;
  1412. }
  1413. }
  1414. }
  1415. /*
  1416. * If MSG_TRUNC is specified the data is discarded.
  1417. */
  1418. if (likely(!(flags & MSG_TRUNC)))
  1419. if (skb_copy_datagram_msg(skb, offset, msg, len)) {
  1420. if (!copied) {
  1421. copied = -EFAULT;
  1422. break;
  1423. }
  1424. }
  1425. peek_seq += avail;
  1426. copied += avail;
  1427. len -= avail;
  1428. } while (len > 0);
  1429. release_sock(sk);
  1430. return copied;
  1431. }
  1432. int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
  1433. int nonblock, int flags, int *addr_len)
  1434. {
  1435. struct tcp_sock *tp = tcp_sk(sk);
  1436. struct chtls_sock *csk;
  1437. struct chtls_hws *hws;
  1438. unsigned long avail; /* amount of available data in current skb */
  1439. int buffers_freed;
  1440. int copied = 0;
  1441. int request;
  1442. long timeo;
  1443. int target; /* Read at least this many bytes */
  1444. buffers_freed = 0;
  1445. if (unlikely(flags & MSG_OOB))
  1446. return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
  1447. addr_len);
  1448. if (unlikely(flags & MSG_PEEK))
  1449. return peekmsg(sk, msg, len, nonblock, flags);
  1450. if (sk_can_busy_loop(sk) &&
  1451. skb_queue_empty(&sk->sk_receive_queue) &&
  1452. sk->sk_state == TCP_ESTABLISHED)
  1453. sk_busy_loop(sk, nonblock);
  1454. lock_sock(sk);
  1455. csk = rcu_dereference_sk_user_data(sk);
  1456. hws = &csk->tlshws;
  1457. if (is_tls_rx(csk))
  1458. return chtls_pt_recvmsg(sk, msg, len, nonblock,
  1459. flags, addr_len);
  1460. timeo = sock_rcvtimeo(sk, nonblock);
  1461. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  1462. request = len;
  1463. if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
  1464. chtls_cleanup_rbuf(sk, copied);
  1465. do {
  1466. struct sk_buff *skb;
  1467. u32 offset;
  1468. if (unlikely(tp->urg_data && tp->urg_seq == tp->copied_seq)) {
  1469. if (copied)
  1470. break;
  1471. if (signal_pending(current)) {
  1472. copied = timeo ? sock_intr_errno(timeo) :
  1473. -EAGAIN;
  1474. break;
  1475. }
  1476. }
  1477. skb = skb_peek(&sk->sk_receive_queue);
  1478. if (skb)
  1479. goto found_ok_skb;
  1480. if (csk->wr_credits &&
  1481. skb_queue_len(&csk->txq) &&
  1482. chtls_push_frames(csk, csk->wr_credits ==
  1483. csk->wr_max_credits))
  1484. sk->sk_write_space(sk);
  1485. if (copied >= target && !sk->sk_backlog.tail)
  1486. break;
  1487. if (copied) {
  1488. if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
  1489. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  1490. signal_pending(current))
  1491. break;
  1492. } else {
  1493. if (sock_flag(sk, SOCK_DONE))
  1494. break;
  1495. if (sk->sk_err) {
  1496. copied = sock_error(sk);
  1497. break;
  1498. }
  1499. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1500. break;
  1501. if (sk->sk_state == TCP_CLOSE) {
  1502. copied = -ENOTCONN;
  1503. break;
  1504. }
  1505. if (!timeo) {
  1506. copied = -EAGAIN;
  1507. break;
  1508. }
  1509. if (signal_pending(current)) {
  1510. copied = sock_intr_errno(timeo);
  1511. break;
  1512. }
  1513. }
  1514. if (sk->sk_backlog.tail) {
  1515. release_sock(sk);
  1516. lock_sock(sk);
  1517. chtls_cleanup_rbuf(sk, copied);
  1518. continue;
  1519. }
  1520. if (copied >= target)
  1521. break;
  1522. chtls_cleanup_rbuf(sk, copied);
  1523. sk_wait_data(sk, &timeo, NULL);
  1524. continue;
  1525. found_ok_skb:
  1526. if (!skb->len) {
  1527. chtls_kfree_skb(sk, skb);
  1528. if (!copied && !timeo) {
  1529. copied = -EAGAIN;
  1530. break;
  1531. }
  1532. if (copied < target)
  1533. continue;
  1534. break;
  1535. }
  1536. offset = tp->copied_seq - ULP_SKB_CB(skb)->seq;
  1537. avail = skb->len - offset;
  1538. if (len < avail)
  1539. avail = len;
  1540. if (unlikely(tp->urg_data)) {
  1541. u32 urg_offset = tp->urg_seq - tp->copied_seq;
  1542. if (urg_offset < avail) {
  1543. if (urg_offset) {
  1544. avail = urg_offset;
  1545. } else if (!sock_flag(sk, SOCK_URGINLINE)) {
  1546. tp->copied_seq++;
  1547. offset++;
  1548. avail--;
  1549. if (!avail)
  1550. goto skip_copy;
  1551. }
  1552. }
  1553. }
  1554. if (likely(!(flags & MSG_TRUNC))) {
  1555. if (skb_copy_datagram_msg(skb, offset,
  1556. msg, avail)) {
  1557. if (!copied) {
  1558. copied = -EFAULT;
  1559. break;
  1560. }
  1561. }
  1562. }
  1563. tp->copied_seq += avail;
  1564. copied += avail;
  1565. len -= avail;
  1566. skip_copy:
  1567. if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
  1568. tp->urg_data = 0;
  1569. if (avail + offset >= skb->len) {
  1570. if (likely(skb))
  1571. chtls_free_skb(sk, skb);
  1572. buffers_freed++;
  1573. if (copied >= target &&
  1574. !skb_peek(&sk->sk_receive_queue))
  1575. break;
  1576. }
  1577. } while (len > 0);
  1578. if (buffers_freed)
  1579. chtls_cleanup_rbuf(sk, copied);
  1580. release_sock(sk);
  1581. return copied;
  1582. }