chtls_io.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875
  1. /*
  2. * Copyright (c) 2018 Chelsio Communications, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * Written by: Atul Gupta (atul.gupta@chelsio.com)
  9. */
  10. #include <linux/module.h>
  11. #include <linux/list.h>
  12. #include <linux/workqueue.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/timer.h>
  15. #include <linux/notifier.h>
  16. #include <linux/inetdevice.h>
  17. #include <linux/ip.h>
  18. #include <linux/tcp.h>
  19. #include <linux/sched/signal.h>
  20. #include <net/tcp.h>
  21. #include <net/busy_poll.h>
  22. #include <crypto/aes.h>
  23. #include "chtls.h"
  24. #include "chtls_cm.h"
  25. static bool is_tls_tx(struct chtls_sock *csk)
  26. {
  27. return csk->tlshws.txkey >= 0;
  28. }
  29. static bool is_tls_rx(struct chtls_sock *csk)
  30. {
  31. return csk->tlshws.rxkey >= 0;
  32. }
  33. static int data_sgl_len(const struct sk_buff *skb)
  34. {
  35. unsigned int cnt;
  36. cnt = skb_shinfo(skb)->nr_frags;
  37. return sgl_len(cnt) * 8;
  38. }
  39. static int nos_ivs(struct sock *sk, unsigned int size)
  40. {
  41. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  42. return DIV_ROUND_UP(size, csk->tlshws.mfs);
  43. }
  44. static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
  45. {
  46. int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
  47. int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb);
  48. if ((hlen + KEY_ON_MEM_SZ + ivs_size) <
  49. MAX_IMM_OFLD_TX_DATA_WR_LEN) {
  50. ULP_SKB_CB(skb)->ulp.tls.iv = 1;
  51. return 1;
  52. }
  53. ULP_SKB_CB(skb)->ulp.tls.iv = 0;
  54. return 0;
  55. }
  56. static int max_ivs_size(struct sock *sk, int size)
  57. {
  58. return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE;
  59. }
  60. static int ivs_size(struct sock *sk, const struct sk_buff *skb)
  61. {
  62. return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
  63. CIPHER_BLOCK_SIZE) : 0;
  64. }
  65. static int flowc_wr_credits(int nparams, int *flowclenp)
  66. {
  67. int flowclen16, flowclen;
  68. flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
  69. flowclen16 = DIV_ROUND_UP(flowclen, 16);
  70. flowclen = flowclen16 * 16;
  71. if (flowclenp)
  72. *flowclenp = flowclen;
  73. return flowclen16;
  74. }
  75. static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
  76. struct fw_flowc_wr *flowc,
  77. int flowclen)
  78. {
  79. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  80. struct sk_buff *skb;
  81. skb = alloc_skb(flowclen, GFP_ATOMIC);
  82. if (!skb)
  83. return NULL;
  84. memcpy(__skb_put(skb, flowclen), flowc, flowclen);
  85. skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
  86. return skb;
  87. }
  88. static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
  89. int flowclen)
  90. {
  91. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  92. struct tcp_sock *tp = tcp_sk(sk);
  93. struct sk_buff *skb;
  94. int flowclen16;
  95. int ret;
  96. flowclen16 = flowclen / 16;
  97. if (csk_flag(sk, CSK_TX_DATA_SENT)) {
  98. skb = create_flowc_wr_skb(sk, flowc, flowclen);
  99. if (!skb)
  100. return -ENOMEM;
  101. skb_entail(sk, skb,
  102. ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
  103. return 0;
  104. }
  105. ret = cxgb4_immdata_send(csk->egress_dev,
  106. csk->txq_idx,
  107. flowc, flowclen);
  108. if (!ret)
  109. return flowclen16;
  110. skb = create_flowc_wr_skb(sk, flowc, flowclen);
  111. if (!skb)
  112. return -ENOMEM;
  113. send_or_defer(sk, tp, skb, 0);
  114. return flowclen16;
  115. }
  116. static u8 tcp_state_to_flowc_state(u8 state)
  117. {
  118. switch (state) {
  119. case TCP_ESTABLISHED:
  120. return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
  121. case TCP_CLOSE_WAIT:
  122. return FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT;
  123. case TCP_FIN_WAIT1:
  124. return FW_FLOWC_MNEM_TCPSTATE_FINWAIT1;
  125. case TCP_CLOSING:
  126. return FW_FLOWC_MNEM_TCPSTATE_CLOSING;
  127. case TCP_LAST_ACK:
  128. return FW_FLOWC_MNEM_TCPSTATE_LASTACK;
  129. case TCP_FIN_WAIT2:
  130. return FW_FLOWC_MNEM_TCPSTATE_FINWAIT2;
  131. }
  132. return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
  133. }
  134. int send_tx_flowc_wr(struct sock *sk, int compl,
  135. u32 snd_nxt, u32 rcv_nxt)
  136. {
  137. struct flowc_packed {
  138. struct fw_flowc_wr fc;
  139. struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
  140. } __packed sflowc;
  141. int nparams, paramidx, flowclen16, flowclen;
  142. struct fw_flowc_wr *flowc;
  143. struct chtls_sock *csk;
  144. struct tcp_sock *tp;
  145. csk = rcu_dereference_sk_user_data(sk);
  146. tp = tcp_sk(sk);
  147. memset(&sflowc, 0, sizeof(sflowc));
  148. flowc = &sflowc.fc;
  149. #define FLOWC_PARAM(__m, __v) \
  150. do { \
  151. flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \
  152. flowc->mnemval[paramidx].val = cpu_to_be32(__v); \
  153. paramidx++; \
  154. } while (0)
  155. paramidx = 0;
  156. FLOWC_PARAM(PFNVFN, FW_PFVF_CMD_PFN_V(csk->cdev->lldi->pf));
  157. FLOWC_PARAM(CH, csk->tx_chan);
  158. FLOWC_PARAM(PORT, csk->tx_chan);
  159. FLOWC_PARAM(IQID, csk->rss_qid);
  160. FLOWC_PARAM(SNDNXT, tp->snd_nxt);
  161. FLOWC_PARAM(RCVNXT, tp->rcv_nxt);
  162. FLOWC_PARAM(SNDBUF, csk->sndbuf);
  163. FLOWC_PARAM(MSS, tp->mss_cache);
  164. FLOWC_PARAM(TCPSTATE, tcp_state_to_flowc_state(sk->sk_state));
  165. if (SND_WSCALE(tp))
  166. FLOWC_PARAM(RCV_SCALE, SND_WSCALE(tp));
  167. if (csk->ulp_mode == ULP_MODE_TLS)
  168. FLOWC_PARAM(ULD_MODE, ULP_MODE_TLS);
  169. if (csk->tlshws.fcplenmax)
  170. FLOWC_PARAM(TXDATAPLEN_MAX, csk->tlshws.fcplenmax);
  171. nparams = paramidx;
  172. #undef FLOWC_PARAM
  173. flowclen16 = flowc_wr_credits(nparams, &flowclen);
  174. flowc->op_to_nparams =
  175. cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
  176. FW_WR_COMPL_V(compl) |
  177. FW_FLOWC_WR_NPARAMS_V(nparams));
  178. flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
  179. FW_WR_FLOWID_V(csk->tid));
  180. return send_flowc_wr(sk, flowc, flowclen);
  181. }
  182. /* Copy IVs to WR */
  183. static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb)
  184. {
  185. struct chtls_sock *csk;
  186. unsigned char *iv_loc;
  187. struct chtls_hws *hws;
  188. unsigned char *ivs;
  189. u16 number_of_ivs;
  190. struct page *page;
  191. int err = 0;
  192. csk = rcu_dereference_sk_user_data(sk);
  193. hws = &csk->tlshws;
  194. number_of_ivs = nos_ivs(sk, skb->len);
  195. if (number_of_ivs > MAX_IVS_PAGE) {
  196. pr_warn("MAX IVs in PAGE exceeded %d\n", number_of_ivs);
  197. return -ENOMEM;
  198. }
  199. /* generate the IVs */
  200. ivs = kmalloc_array(CIPHER_BLOCK_SIZE, number_of_ivs, GFP_ATOMIC);
  201. if (!ivs)
  202. return -ENOMEM;
  203. get_random_bytes(ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
  204. if (skb_ulp_tls_iv_imm(skb)) {
  205. /* send the IVs as immediate data in the WR */
  206. iv_loc = (unsigned char *)__skb_push(skb, number_of_ivs *
  207. CIPHER_BLOCK_SIZE);
  208. if (iv_loc)
  209. memcpy(iv_loc, ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
  210. hws->ivsize = number_of_ivs * CIPHER_BLOCK_SIZE;
  211. } else {
  212. /* Send the IVs as sgls */
  213. /* Already accounted IV DSGL for credits */
  214. skb_shinfo(skb)->nr_frags--;
  215. page = alloc_pages(sk->sk_allocation | __GFP_COMP, 0);
  216. if (!page) {
  217. pr_info("%s : Page allocation for IVs failed\n",
  218. __func__);
  219. err = -ENOMEM;
  220. goto out;
  221. }
  222. memcpy(page_address(page), ivs, number_of_ivs *
  223. CIPHER_BLOCK_SIZE);
  224. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0,
  225. number_of_ivs * CIPHER_BLOCK_SIZE);
  226. hws->ivsize = 0;
  227. }
  228. out:
  229. kfree(ivs);
  230. return err;
  231. }
  232. /* Copy Key to WR */
  233. static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb)
  234. {
  235. struct ulptx_sc_memrd *sc_memrd;
  236. struct chtls_sock *csk;
  237. struct chtls_dev *cdev;
  238. struct ulptx_idata *sc;
  239. struct chtls_hws *hws;
  240. u32 immdlen;
  241. int kaddr;
  242. csk = rcu_dereference_sk_user_data(sk);
  243. hws = &csk->tlshws;
  244. cdev = csk->cdev;
  245. immdlen = sizeof(*sc) + sizeof(*sc_memrd);
  246. kaddr = keyid_to_addr(cdev->kmap.start, hws->txkey);
  247. sc = (struct ulptx_idata *)__skb_push(skb, immdlen);
  248. if (sc) {
  249. sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
  250. sc->len = htonl(0);
  251. sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
  252. sc_memrd->cmd_to_len =
  253. htonl(ULPTX_CMD_V(ULP_TX_SC_MEMRD) |
  254. ULP_TX_SC_MORE_V(1) |
  255. ULPTX_LEN16_V(hws->keylen >> 4));
  256. sc_memrd->addr = htonl(kaddr);
  257. }
  258. }
  259. static u64 tlstx_incr_seqnum(struct chtls_hws *hws)
  260. {
  261. return hws->tx_seq_no++;
  262. }
  263. static bool is_sg_request(const struct sk_buff *skb)
  264. {
  265. return skb->peeked ||
  266. (skb->len > MAX_IMM_ULPTX_WR_LEN);
  267. }
  268. /*
  269. * Returns true if an sk_buff carries urgent data.
  270. */
  271. static bool skb_urgent(struct sk_buff *skb)
  272. {
  273. return ULP_SKB_CB(skb)->flags & ULPCB_FLAG_URG;
  274. }
  275. /* TLS content type for CPL SFO */
  276. static unsigned char tls_content_type(unsigned char content_type)
  277. {
  278. switch (content_type) {
  279. case TLS_HDR_TYPE_CCS:
  280. return CPL_TX_TLS_SFO_TYPE_CCS;
  281. case TLS_HDR_TYPE_ALERT:
  282. return CPL_TX_TLS_SFO_TYPE_ALERT;
  283. case TLS_HDR_TYPE_HANDSHAKE:
  284. return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
  285. case TLS_HDR_TYPE_HEARTBEAT:
  286. return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
  287. }
  288. return CPL_TX_TLS_SFO_TYPE_DATA;
  289. }
  290. static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
  291. int dlen, int tls_immd, u32 credits,
  292. int expn, int pdus)
  293. {
  294. struct fw_tlstx_data_wr *req_wr;
  295. struct cpl_tx_tls_sfo *req_cpl;
  296. unsigned int wr_ulp_mode_force;
  297. struct tls_scmd *updated_scmd;
  298. unsigned char data_type;
  299. struct chtls_sock *csk;
  300. struct net_device *dev;
  301. struct chtls_hws *hws;
  302. struct tls_scmd *scmd;
  303. struct adapter *adap;
  304. unsigned char *req;
  305. int immd_len;
  306. int iv_imm;
  307. int len;
  308. csk = rcu_dereference_sk_user_data(sk);
  309. iv_imm = skb_ulp_tls_iv_imm(skb);
  310. dev = csk->egress_dev;
  311. adap = netdev2adap(dev);
  312. hws = &csk->tlshws;
  313. scmd = &hws->scmd;
  314. len = dlen + expn;
  315. dlen = (dlen < hws->mfs) ? dlen : hws->mfs;
  316. atomic_inc(&adap->chcr_stats.tls_pdu_tx);
  317. updated_scmd = scmd;
  318. updated_scmd->seqno_numivs &= 0xffffff80;
  319. updated_scmd->seqno_numivs |= SCMD_NUM_IVS_V(pdus);
  320. hws->scmd = *updated_scmd;
  321. req = (unsigned char *)__skb_push(skb, sizeof(struct cpl_tx_tls_sfo));
  322. req_cpl = (struct cpl_tx_tls_sfo *)req;
  323. req = (unsigned char *)__skb_push(skb, (sizeof(struct
  324. fw_tlstx_data_wr)));
  325. req_wr = (struct fw_tlstx_data_wr *)req;
  326. immd_len = (tls_immd ? dlen : 0);
  327. req_wr->op_to_immdlen =
  328. htonl(FW_WR_OP_V(FW_TLSTX_DATA_WR) |
  329. FW_TLSTX_DATA_WR_COMPL_V(1) |
  330. FW_TLSTX_DATA_WR_IMMDLEN_V(immd_len));
  331. req_wr->flowid_len16 = htonl(FW_TLSTX_DATA_WR_FLOWID_V(csk->tid) |
  332. FW_TLSTX_DATA_WR_LEN16_V(credits));
  333. wr_ulp_mode_force = TX_ULP_MODE_V(ULP_MODE_TLS);
  334. if (is_sg_request(skb))
  335. wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
  336. ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
  337. FW_OFLD_TX_DATA_WR_SHOVE_F);
  338. req_wr->lsodisable_to_flags =
  339. htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
  340. FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
  341. T6_TX_FORCE_F | wr_ulp_mode_force |
  342. TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
  343. skb_queue_empty(&csk->txq)));
  344. req_wr->ctxloc_to_exp =
  345. htonl(FW_TLSTX_DATA_WR_NUMIVS_V(pdus) |
  346. FW_TLSTX_DATA_WR_EXP_V(expn) |
  347. FW_TLSTX_DATA_WR_CTXLOC_V(CHTLS_KEY_CONTEXT_DDR) |
  348. FW_TLSTX_DATA_WR_IVDSGL_V(!iv_imm) |
  349. FW_TLSTX_DATA_WR_KEYSIZE_V(hws->keylen >> 4));
  350. /* Fill in the length */
  351. req_wr->plen = htonl(len);
  352. req_wr->mfs = htons(hws->mfs);
  353. req_wr->adjustedplen_pkd =
  354. htons(FW_TLSTX_DATA_WR_ADJUSTEDPLEN_V(hws->adjustlen));
  355. req_wr->expinplenmax_pkd =
  356. htons(FW_TLSTX_DATA_WR_EXPINPLENMAX_V(hws->expansion));
  357. req_wr->pdusinplenmax_pkd =
  358. FW_TLSTX_DATA_WR_PDUSINPLENMAX_V(hws->pdus);
  359. req_wr->r10 = 0;
  360. data_type = tls_content_type(ULP_SKB_CB(skb)->ulp.tls.type);
  361. req_cpl->op_to_seg_len = htonl(CPL_TX_TLS_SFO_OPCODE_V(CPL_TX_TLS_SFO) |
  362. CPL_TX_TLS_SFO_DATA_TYPE_V(data_type) |
  363. CPL_TX_TLS_SFO_CPL_LEN_V(2) |
  364. CPL_TX_TLS_SFO_SEG_LEN_V(dlen));
  365. req_cpl->pld_len = htonl(len - expn);
  366. req_cpl->type_protover = htonl(CPL_TX_TLS_SFO_TYPE_V
  367. ((data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT) ?
  368. TLS_HDR_TYPE_HEARTBEAT : 0) |
  369. CPL_TX_TLS_SFO_PROTOVER_V(0));
  370. /* create the s-command */
  371. req_cpl->r1_lo = 0;
  372. req_cpl->seqno_numivs = cpu_to_be32(hws->scmd.seqno_numivs);
  373. req_cpl->ivgen_hdrlen = cpu_to_be32(hws->scmd.ivgen_hdrlen);
  374. req_cpl->scmd1 = cpu_to_be64(tlstx_incr_seqnum(hws));
  375. }
  376. /*
  377. * Calculate the TLS data expansion size
  378. */
  379. static int chtls_expansion_size(struct sock *sk, int data_len,
  380. int fullpdu,
  381. unsigned short *pducnt)
  382. {
  383. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  384. struct chtls_hws *hws = &csk->tlshws;
  385. struct tls_scmd *scmd = &hws->scmd;
  386. int fragsize = hws->mfs;
  387. int expnsize = 0;
  388. int fragleft;
  389. int fragcnt;
  390. int expppdu;
  391. if (SCMD_CIPH_MODE_G(scmd->seqno_numivs) ==
  392. SCMD_CIPH_MODE_AES_GCM) {
  393. expppdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
  394. TLS_HEADER_LENGTH;
  395. if (fullpdu) {
  396. *pducnt = data_len / (expppdu + fragsize);
  397. if (*pducnt > 32)
  398. *pducnt = 32;
  399. else if (!*pducnt)
  400. *pducnt = 1;
  401. expnsize = (*pducnt) * expppdu;
  402. return expnsize;
  403. }
  404. fragcnt = (data_len / fragsize);
  405. expnsize = fragcnt * expppdu;
  406. fragleft = data_len % fragsize;
  407. if (fragleft > 0)
  408. expnsize += expppdu;
  409. }
  410. return expnsize;
  411. }
  412. /* WR with IV, KEY and CPL SFO added */
  413. static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb,
  414. int tls_tx_imm, int tls_len, u32 credits)
  415. {
  416. unsigned short pdus_per_ulp = 0;
  417. struct chtls_sock *csk;
  418. struct chtls_hws *hws;
  419. int expn_sz;
  420. int pdus;
  421. csk = rcu_dereference_sk_user_data(sk);
  422. hws = &csk->tlshws;
  423. pdus = DIV_ROUND_UP(tls_len, hws->mfs);
  424. expn_sz = chtls_expansion_size(sk, tls_len, 0, NULL);
  425. if (!hws->compute) {
  426. hws->expansion = chtls_expansion_size(sk,
  427. hws->fcplenmax,
  428. 1, &pdus_per_ulp);
  429. hws->pdus = pdus_per_ulp;
  430. hws->adjustlen = hws->pdus *
  431. ((hws->expansion / hws->pdus) + hws->mfs);
  432. hws->compute = 1;
  433. }
  434. if (tls_copy_ivs(sk, skb))
  435. return;
  436. tls_copy_tx_key(sk, skb);
  437. tls_tx_data_wr(sk, skb, tls_len, tls_tx_imm, credits, expn_sz, pdus);
  438. hws->tx_seq_no += (pdus - 1);
  439. }
  440. static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
  441. unsigned int immdlen, int len,
  442. u32 credits, u32 compl)
  443. {
  444. struct fw_ofld_tx_data_wr *req;
  445. unsigned int wr_ulp_mode_force;
  446. struct chtls_sock *csk;
  447. unsigned int opcode;
  448. csk = rcu_dereference_sk_user_data(sk);
  449. opcode = FW_OFLD_TX_DATA_WR;
  450. req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
  451. req->op_to_immdlen = htonl(WR_OP_V(opcode) |
  452. FW_WR_COMPL_V(compl) |
  453. FW_WR_IMMDLEN_V(immdlen));
  454. req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
  455. FW_WR_LEN16_V(credits));
  456. wr_ulp_mode_force = TX_ULP_MODE_V(csk->ulp_mode);
  457. if (is_sg_request(skb))
  458. wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
  459. ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
  460. FW_OFLD_TX_DATA_WR_SHOVE_F);
  461. req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
  462. FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
  463. FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag
  464. (sk, CSK_TX_MORE_DATA)) &&
  465. skb_queue_empty(&csk->txq)));
  466. req->plen = htonl(len);
  467. }
  468. static int chtls_wr_size(struct chtls_sock *csk, const struct sk_buff *skb,
  469. bool size)
  470. {
  471. int wr_size;
  472. wr_size = TLS_WR_CPL_LEN;
  473. wr_size += KEY_ON_MEM_SZ;
  474. wr_size += ivs_size(csk->sk, skb);
  475. if (size)
  476. return wr_size;
  477. /* frags counted for IV dsgl */
  478. if (!skb_ulp_tls_iv_imm(skb))
  479. skb_shinfo(skb)->nr_frags++;
  480. return wr_size;
  481. }
  482. static bool is_ofld_imm(struct chtls_sock *csk, const struct sk_buff *skb)
  483. {
  484. int length = skb->len;
  485. if (skb->peeked || skb->len > MAX_IMM_ULPTX_WR_LEN)
  486. return false;
  487. if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
  488. /* Check TLS header len for Immediate */
  489. if (csk->ulp_mode == ULP_MODE_TLS &&
  490. skb_ulp_tls_inline(skb))
  491. length += chtls_wr_size(csk, skb, true);
  492. else
  493. length += sizeof(struct fw_ofld_tx_data_wr);
  494. return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
  495. }
  496. return true;
  497. }
  498. static unsigned int calc_tx_flits(const struct sk_buff *skb,
  499. unsigned int immdlen)
  500. {
  501. unsigned int flits, cnt;
  502. flits = immdlen / 8; /* headers */
  503. cnt = skb_shinfo(skb)->nr_frags;
  504. if (skb_tail_pointer(skb) != skb_transport_header(skb))
  505. cnt++;
  506. return flits + sgl_len(cnt);
  507. }
  508. static void arp_failure_discard(void *handle, struct sk_buff *skb)
  509. {
  510. kfree_skb(skb);
  511. }
  512. int chtls_push_frames(struct chtls_sock *csk, int comp)
  513. {
  514. struct chtls_hws *hws = &csk->tlshws;
  515. struct tcp_sock *tp;
  516. struct sk_buff *skb;
  517. int total_size = 0;
  518. struct sock *sk;
  519. int wr_size;
  520. wr_size = sizeof(struct fw_ofld_tx_data_wr);
  521. sk = csk->sk;
  522. tp = tcp_sk(sk);
  523. if (unlikely(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE)))
  524. return 0;
  525. if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
  526. return 0;
  527. while (csk->wr_credits && (skb = skb_peek(&csk->txq)) &&
  528. (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_HOLD) ||
  529. skb_queue_len(&csk->txq) > 1)) {
  530. unsigned int credit_len = skb->len;
  531. unsigned int credits_needed;
  532. unsigned int completion = 0;
  533. int tls_len = skb->len;/* TLS data len before IV/key */
  534. unsigned int immdlen;
  535. int len = skb->len; /* length [ulp bytes] inserted by hw */
  536. int flowclen16 = 0;
  537. int tls_tx_imm = 0;
  538. immdlen = skb->len;
  539. if (!is_ofld_imm(csk, skb)) {
  540. immdlen = skb_transport_offset(skb);
  541. if (skb_ulp_tls_inline(skb))
  542. wr_size = chtls_wr_size(csk, skb, false);
  543. credit_len = 8 * calc_tx_flits(skb, immdlen);
  544. } else {
  545. if (skb_ulp_tls_inline(skb)) {
  546. wr_size = chtls_wr_size(csk, skb, false);
  547. tls_tx_imm = 1;
  548. }
  549. }
  550. if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR))
  551. credit_len += wr_size;
  552. credits_needed = DIV_ROUND_UP(credit_len, 16);
  553. if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
  554. flowclen16 = send_tx_flowc_wr(sk, 1, tp->snd_nxt,
  555. tp->rcv_nxt);
  556. if (flowclen16 <= 0)
  557. break;
  558. csk->wr_credits -= flowclen16;
  559. csk->wr_unacked += flowclen16;
  560. csk->wr_nondata += flowclen16;
  561. csk_set_flag(csk, CSK_TX_DATA_SENT);
  562. }
  563. if (csk->wr_credits < credits_needed) {
  564. if (skb_ulp_tls_inline(skb) &&
  565. !skb_ulp_tls_iv_imm(skb))
  566. skb_shinfo(skb)->nr_frags--;
  567. break;
  568. }
  569. __skb_unlink(skb, &csk->txq);
  570. skb_set_queue_mapping(skb, (csk->txq_idx << 1) |
  571. CPL_PRIORITY_DATA);
  572. if (hws->ofld)
  573. hws->txqid = (skb->queue_mapping >> 1);
  574. skb->csum = (__force __wsum)(credits_needed + csk->wr_nondata);
  575. csk->wr_credits -= credits_needed;
  576. csk->wr_unacked += credits_needed;
  577. csk->wr_nondata = 0;
  578. enqueue_wr(csk, skb);
  579. if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
  580. if ((comp && csk->wr_unacked == credits_needed) ||
  581. (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) ||
  582. csk->wr_unacked >= csk->wr_max_credits / 2) {
  583. completion = 1;
  584. csk->wr_unacked = 0;
  585. }
  586. if (skb_ulp_tls_inline(skb))
  587. make_tlstx_data_wr(sk, skb, tls_tx_imm,
  588. tls_len, credits_needed);
  589. else
  590. make_tx_data_wr(sk, skb, immdlen, len,
  591. credits_needed, completion);
  592. tp->snd_nxt += len;
  593. tp->lsndtime = tcp_time_stamp(tp);
  594. if (completion)
  595. ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
  596. } else {
  597. struct cpl_close_con_req *req = cplhdr(skb);
  598. unsigned int cmd = CPL_OPCODE_G(ntohl
  599. (OPCODE_TID(req)));
  600. if (cmd == CPL_CLOSE_CON_REQ)
  601. csk_set_flag(csk,
  602. CSK_CLOSE_CON_REQUESTED);
  603. if ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) &&
  604. (csk->wr_unacked >= csk->wr_max_credits / 2)) {
  605. req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
  606. csk->wr_unacked = 0;
  607. }
  608. }
  609. total_size += skb->truesize;
  610. if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_BARRIER)
  611. csk_set_flag(csk, CSK_TX_WAIT_IDLE);
  612. t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
  613. cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
  614. }
  615. sk->sk_wmem_queued -= total_size;
  616. return total_size;
  617. }
  618. static void mark_urg(struct tcp_sock *tp, int flags,
  619. struct sk_buff *skb)
  620. {
  621. if (unlikely(flags & MSG_OOB)) {
  622. tp->snd_up = tp->write_seq;
  623. ULP_SKB_CB(skb)->flags = ULPCB_FLAG_URG |
  624. ULPCB_FLAG_BARRIER |
  625. ULPCB_FLAG_NO_APPEND |
  626. ULPCB_FLAG_NEED_HDR;
  627. }
  628. }
  629. /*
  630. * Returns true if a connection should send more data to TCP engine
  631. */
  632. static bool should_push(struct sock *sk)
  633. {
  634. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  635. struct chtls_dev *cdev = csk->cdev;
  636. struct tcp_sock *tp = tcp_sk(sk);
  637. /*
  638. * If we've released our offload resources there's nothing to do ...
  639. */
  640. if (!cdev)
  641. return false;
  642. /*
  643. * If there aren't any work requests in flight, or there isn't enough
  644. * data in flight, or Nagle is off then send the current TX_DATA
  645. * otherwise hold it and wait to accumulate more data.
  646. */
  647. return csk->wr_credits == csk->wr_max_credits ||
  648. (tp->nonagle & TCP_NAGLE_OFF);
  649. }
  650. /*
  651. * Returns true if a TCP socket is corked.
  652. */
  653. static bool corked(const struct tcp_sock *tp, int flags)
  654. {
  655. return (flags & MSG_MORE) || (tp->nonagle & TCP_NAGLE_CORK);
  656. }
  657. /*
  658. * Returns true if a send should try to push new data.
  659. */
  660. static bool send_should_push(struct sock *sk, int flags)
  661. {
  662. return should_push(sk) && !corked(tcp_sk(sk), flags);
  663. }
  664. void chtls_tcp_push(struct sock *sk, int flags)
  665. {
  666. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  667. int qlen = skb_queue_len(&csk->txq);
  668. if (likely(qlen)) {
  669. struct sk_buff *skb = skb_peek_tail(&csk->txq);
  670. struct tcp_sock *tp = tcp_sk(sk);
  671. mark_urg(tp, flags, skb);
  672. if (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) &&
  673. corked(tp, flags)) {
  674. ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_HOLD;
  675. return;
  676. }
  677. ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_HOLD;
  678. if (qlen == 1 &&
  679. ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
  680. should_push(sk)))
  681. chtls_push_frames(csk, 1);
  682. }
  683. }
  684. /*
  685. * Calculate the size for a new send sk_buff. It's maximum size so we can
  686. * pack lots of data into it, unless we plan to send it immediately, in which
  687. * case we size it more tightly.
  688. *
  689. * Note: we don't bother compensating for MSS < PAGE_SIZE because it doesn't
  690. * arise in normal cases and when it does we are just wasting memory.
  691. */
  692. static int select_size(struct sock *sk, int io_len, int flags, int len)
  693. {
  694. const int pgbreak = SKB_MAX_HEAD(len);
  695. /*
  696. * If the data wouldn't fit in the main body anyway, put only the
  697. * header in the main body so it can use immediate data and place all
  698. * the payload in page fragments.
  699. */
  700. if (io_len > pgbreak)
  701. return 0;
  702. /*
  703. * If we will be accumulating payload get a large main body.
  704. */
  705. if (!send_should_push(sk, flags))
  706. return pgbreak;
  707. return io_len;
  708. }
  709. void skb_entail(struct sock *sk, struct sk_buff *skb, int flags)
  710. {
  711. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  712. struct tcp_sock *tp = tcp_sk(sk);
  713. ULP_SKB_CB(skb)->seq = tp->write_seq;
  714. ULP_SKB_CB(skb)->flags = flags;
  715. __skb_queue_tail(&csk->txq, skb);
  716. sk->sk_wmem_queued += skb->truesize;
  717. if (TCP_PAGE(sk) && TCP_OFF(sk)) {
  718. put_page(TCP_PAGE(sk));
  719. TCP_PAGE(sk) = NULL;
  720. TCP_OFF(sk) = 0;
  721. }
  722. }
  723. static struct sk_buff *get_tx_skb(struct sock *sk, int size)
  724. {
  725. struct sk_buff *skb;
  726. skb = alloc_skb(size + TX_HEADER_LEN, sk->sk_allocation);
  727. if (likely(skb)) {
  728. skb_reserve(skb, TX_HEADER_LEN);
  729. skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
  730. skb_reset_transport_header(skb);
  731. }
  732. return skb;
  733. }
  734. static struct sk_buff *get_record_skb(struct sock *sk, int size, bool zcopy)
  735. {
  736. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  737. struct sk_buff *skb;
  738. skb = alloc_skb(((zcopy ? 0 : size) + TX_TLSHDR_LEN +
  739. KEY_ON_MEM_SZ + max_ivs_size(sk, size)),
  740. sk->sk_allocation);
  741. if (likely(skb)) {
  742. skb_reserve(skb, (TX_TLSHDR_LEN +
  743. KEY_ON_MEM_SZ + max_ivs_size(sk, size)));
  744. skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
  745. skb_reset_transport_header(skb);
  746. ULP_SKB_CB(skb)->ulp.tls.ofld = 1;
  747. ULP_SKB_CB(skb)->ulp.tls.type = csk->tlshws.type;
  748. }
  749. return skb;
  750. }
  751. static void tx_skb_finalize(struct sk_buff *skb)
  752. {
  753. struct ulp_skb_cb *cb = ULP_SKB_CB(skb);
  754. if (!(cb->flags & ULPCB_FLAG_NO_HDR))
  755. cb->flags = ULPCB_FLAG_NEED_HDR;
  756. cb->flags |= ULPCB_FLAG_NO_APPEND;
  757. }
  758. static void push_frames_if_head(struct sock *sk)
  759. {
  760. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  761. if (skb_queue_len(&csk->txq) == 1)
  762. chtls_push_frames(csk, 1);
  763. }
  764. static int chtls_skb_copy_to_page_nocache(struct sock *sk,
  765. struct iov_iter *from,
  766. struct sk_buff *skb,
  767. struct page *page,
  768. int off, int copy)
  769. {
  770. int err;
  771. err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) +
  772. off, copy, skb->len);
  773. if (err)
  774. return err;
  775. skb->len += copy;
  776. skb->data_len += copy;
  777. skb->truesize += copy;
  778. sk->sk_wmem_queued += copy;
  779. return 0;
  780. }
  781. /* Read TLS header to find content type and data length */
  782. static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
  783. {
  784. if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr))
  785. return -EFAULT;
  786. return (__force int)cpu_to_be16(thdr->length);
  787. }
  788. static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
  789. {
  790. return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
  791. }
  792. static int csk_wait_memory(struct chtls_dev *cdev,
  793. struct sock *sk, long *timeo_p)
  794. {
  795. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  796. int sndbuf, err = 0;
  797. long current_timeo;
  798. long vm_wait = 0;
  799. bool noblock;
  800. current_timeo = *timeo_p;
  801. noblock = (*timeo_p ? false : true);
  802. sndbuf = cdev->max_host_sndbuf;
  803. if (csk_mem_free(cdev, sk)) {
  804. current_timeo = (prandom_u32() % (HZ / 5)) + 2;
  805. vm_wait = (prandom_u32() % (HZ / 5)) + 2;
  806. }
  807. add_wait_queue(sk_sleep(sk), &wait);
  808. while (1) {
  809. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  810. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  811. goto do_error;
  812. if (!*timeo_p) {
  813. if (noblock)
  814. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  815. goto do_nonblock;
  816. }
  817. if (signal_pending(current))
  818. goto do_interrupted;
  819. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  820. if (csk_mem_free(cdev, sk) && !vm_wait)
  821. break;
  822. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  823. sk->sk_write_pending++;
  824. sk_wait_event(sk, &current_timeo, sk->sk_err ||
  825. (sk->sk_shutdown & SEND_SHUTDOWN) ||
  826. (csk_mem_free(cdev, sk) && !vm_wait), &wait);
  827. sk->sk_write_pending--;
  828. if (vm_wait) {
  829. vm_wait -= current_timeo;
  830. current_timeo = *timeo_p;
  831. if (current_timeo != MAX_SCHEDULE_TIMEOUT) {
  832. current_timeo -= vm_wait;
  833. if (current_timeo < 0)
  834. current_timeo = 0;
  835. }
  836. vm_wait = 0;
  837. }
  838. *timeo_p = current_timeo;
  839. }
  840. do_rm_wq:
  841. remove_wait_queue(sk_sleep(sk), &wait);
  842. return err;
  843. do_error:
  844. err = -EPIPE;
  845. goto do_rm_wq;
  846. do_nonblock:
  847. err = -EAGAIN;
  848. goto do_rm_wq;
  849. do_interrupted:
  850. err = sock_intr_errno(*timeo_p);
  851. goto do_rm_wq;
  852. }
  853. int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  854. {
  855. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  856. struct chtls_dev *cdev = csk->cdev;
  857. struct tcp_sock *tp = tcp_sk(sk);
  858. struct sk_buff *skb;
  859. int mss, flags, err;
  860. int recordsz = 0;
  861. int copied = 0;
  862. int hdrlen = 0;
  863. long timeo;
  864. lock_sock(sk);
  865. flags = msg->msg_flags;
  866. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  867. if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
  868. err = sk_stream_wait_connect(sk, &timeo);
  869. if (err)
  870. goto out_err;
  871. }
  872. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  873. err = -EPIPE;
  874. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  875. goto out_err;
  876. mss = csk->mss;
  877. csk_set_flag(csk, CSK_TX_MORE_DATA);
  878. while (msg_data_left(msg)) {
  879. int copy = 0;
  880. skb = skb_peek_tail(&csk->txq);
  881. if (skb) {
  882. copy = mss - skb->len;
  883. skb->ip_summed = CHECKSUM_UNNECESSARY;
  884. }
  885. if (!csk_mem_free(cdev, sk))
  886. goto wait_for_sndbuf;
  887. if (is_tls_tx(csk) && !csk->tlshws.txleft) {
  888. struct tls_hdr hdr;
  889. recordsz = tls_header_read(&hdr, &msg->msg_iter);
  890. size -= TLS_HEADER_LENGTH;
  891. hdrlen += TLS_HEADER_LENGTH;
  892. csk->tlshws.txleft = recordsz;
  893. csk->tlshws.type = hdr.type;
  894. if (skb)
  895. ULP_SKB_CB(skb)->ulp.tls.type = hdr.type;
  896. }
  897. if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
  898. copy <= 0) {
  899. new_buf:
  900. if (skb) {
  901. tx_skb_finalize(skb);
  902. push_frames_if_head(sk);
  903. }
  904. if (is_tls_tx(csk)) {
  905. skb = get_record_skb(sk,
  906. select_size(sk,
  907. recordsz,
  908. flags,
  909. TX_TLSHDR_LEN),
  910. false);
  911. } else {
  912. skb = get_tx_skb(sk,
  913. select_size(sk, size, flags,
  914. TX_HEADER_LEN));
  915. }
  916. if (unlikely(!skb))
  917. goto wait_for_memory;
  918. skb->ip_summed = CHECKSUM_UNNECESSARY;
  919. copy = mss;
  920. }
  921. if (copy > size)
  922. copy = size;
  923. if (skb_tailroom(skb) > 0) {
  924. copy = min(copy, skb_tailroom(skb));
  925. if (is_tls_tx(csk))
  926. copy = min_t(int, copy, csk->tlshws.txleft);
  927. err = skb_add_data_nocache(sk, skb,
  928. &msg->msg_iter, copy);
  929. if (err)
  930. goto do_fault;
  931. } else {
  932. int i = skb_shinfo(skb)->nr_frags;
  933. struct page *page = TCP_PAGE(sk);
  934. int pg_size = PAGE_SIZE;
  935. int off = TCP_OFF(sk);
  936. bool merge;
  937. if (!page)
  938. goto wait_for_memory;
  939. pg_size <<= compound_order(page);
  940. if (off < pg_size &&
  941. skb_can_coalesce(skb, i, page, off)) {
  942. merge = 1;
  943. goto copy;
  944. }
  945. merge = 0;
  946. if (i == (is_tls_tx(csk) ? (MAX_SKB_FRAGS - 1) :
  947. MAX_SKB_FRAGS))
  948. goto new_buf;
  949. if (page && off == pg_size) {
  950. put_page(page);
  951. TCP_PAGE(sk) = page = NULL;
  952. pg_size = PAGE_SIZE;
  953. }
  954. if (!page) {
  955. gfp_t gfp = sk->sk_allocation;
  956. int order = cdev->send_page_order;
  957. if (order) {
  958. page = alloc_pages(gfp | __GFP_COMP |
  959. __GFP_NOWARN |
  960. __GFP_NORETRY,
  961. order);
  962. if (page)
  963. pg_size <<=
  964. compound_order(page);
  965. }
  966. if (!page) {
  967. page = alloc_page(gfp);
  968. pg_size = PAGE_SIZE;
  969. }
  970. if (!page)
  971. goto wait_for_memory;
  972. off = 0;
  973. }
  974. copy:
  975. if (copy > pg_size - off)
  976. copy = pg_size - off;
  977. if (is_tls_tx(csk))
  978. copy = min_t(int, copy, csk->tlshws.txleft);
  979. err = chtls_skb_copy_to_page_nocache(sk, &msg->msg_iter,
  980. skb, page,
  981. off, copy);
  982. if (unlikely(err)) {
  983. if (!TCP_PAGE(sk)) {
  984. TCP_PAGE(sk) = page;
  985. TCP_OFF(sk) = 0;
  986. }
  987. goto do_fault;
  988. }
  989. /* Update the skb. */
  990. if (merge) {
  991. skb_shinfo(skb)->frags[i - 1].size += copy;
  992. } else {
  993. skb_fill_page_desc(skb, i, page, off, copy);
  994. if (off + copy < pg_size) {
  995. /* space left keep page */
  996. get_page(page);
  997. TCP_PAGE(sk) = page;
  998. } else {
  999. TCP_PAGE(sk) = NULL;
  1000. }
  1001. }
  1002. TCP_OFF(sk) = off + copy;
  1003. }
  1004. if (unlikely(skb->len == mss))
  1005. tx_skb_finalize(skb);
  1006. tp->write_seq += copy;
  1007. copied += copy;
  1008. size -= copy;
  1009. if (is_tls_tx(csk))
  1010. csk->tlshws.txleft -= copy;
  1011. if (corked(tp, flags) &&
  1012. (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
  1013. ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
  1014. if (size == 0)
  1015. goto out;
  1016. if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)
  1017. push_frames_if_head(sk);
  1018. continue;
  1019. wait_for_sndbuf:
  1020. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1021. wait_for_memory:
  1022. err = csk_wait_memory(cdev, sk, &timeo);
  1023. if (err)
  1024. goto do_error;
  1025. }
  1026. out:
  1027. csk_reset_flag(csk, CSK_TX_MORE_DATA);
  1028. if (copied)
  1029. chtls_tcp_push(sk, flags);
  1030. done:
  1031. release_sock(sk);
  1032. return copied + hdrlen;
  1033. do_fault:
  1034. if (!skb->len) {
  1035. __skb_unlink(skb, &csk->txq);
  1036. sk->sk_wmem_queued -= skb->truesize;
  1037. __kfree_skb(skb);
  1038. }
  1039. do_error:
  1040. if (copied)
  1041. goto out;
  1042. out_err:
  1043. if (csk_conn_inline(csk))
  1044. csk_reset_flag(csk, CSK_TX_MORE_DATA);
  1045. copied = sk_stream_error(sk, flags, err);
  1046. goto done;
  1047. }
  1048. int chtls_sendpage(struct sock *sk, struct page *page,
  1049. int offset, size_t size, int flags)
  1050. {
  1051. struct chtls_sock *csk;
  1052. struct chtls_dev *cdev;
  1053. int mss, err, copied;
  1054. struct tcp_sock *tp;
  1055. long timeo;
  1056. tp = tcp_sk(sk);
  1057. copied = 0;
  1058. csk = rcu_dereference_sk_user_data(sk);
  1059. cdev = csk->cdev;
  1060. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  1061. err = sk_stream_wait_connect(sk, &timeo);
  1062. if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
  1063. err != 0)
  1064. goto out_err;
  1065. mss = csk->mss;
  1066. csk_set_flag(csk, CSK_TX_MORE_DATA);
  1067. while (size > 0) {
  1068. struct sk_buff *skb = skb_peek_tail(&csk->txq);
  1069. int copy, i;
  1070. if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
  1071. (copy = mss - skb->len) <= 0) {
  1072. new_buf:
  1073. if (!csk_mem_free(cdev, sk))
  1074. goto wait_for_sndbuf;
  1075. if (is_tls_tx(csk)) {
  1076. skb = get_record_skb(sk,
  1077. select_size(sk, size,
  1078. flags,
  1079. TX_TLSHDR_LEN),
  1080. true);
  1081. } else {
  1082. skb = get_tx_skb(sk, 0);
  1083. }
  1084. if (!skb)
  1085. goto wait_for_memory;
  1086. copy = mss;
  1087. }
  1088. if (copy > size)
  1089. copy = size;
  1090. i = skb_shinfo(skb)->nr_frags;
  1091. if (skb_can_coalesce(skb, i, page, offset)) {
  1092. skb_shinfo(skb)->frags[i - 1].size += copy;
  1093. } else if (i < MAX_SKB_FRAGS) {
  1094. get_page(page);
  1095. skb_fill_page_desc(skb, i, page, offset, copy);
  1096. } else {
  1097. tx_skb_finalize(skb);
  1098. push_frames_if_head(sk);
  1099. goto new_buf;
  1100. }
  1101. skb->len += copy;
  1102. if (skb->len == mss)
  1103. tx_skb_finalize(skb);
  1104. skb->data_len += copy;
  1105. skb->truesize += copy;
  1106. sk->sk_wmem_queued += copy;
  1107. tp->write_seq += copy;
  1108. copied += copy;
  1109. offset += copy;
  1110. size -= copy;
  1111. if (corked(tp, flags) &&
  1112. (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
  1113. ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
  1114. if (!size)
  1115. break;
  1116. if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
  1117. push_frames_if_head(sk);
  1118. continue;
  1119. wait_for_sndbuf:
  1120. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1121. wait_for_memory:
  1122. err = csk_wait_memory(cdev, sk, &timeo);
  1123. if (err)
  1124. goto do_error;
  1125. }
  1126. out:
  1127. csk_reset_flag(csk, CSK_TX_MORE_DATA);
  1128. if (copied)
  1129. chtls_tcp_push(sk, flags);
  1130. done:
  1131. release_sock(sk);
  1132. return copied;
  1133. do_error:
  1134. if (copied)
  1135. goto out;
  1136. out_err:
  1137. if (csk_conn_inline(csk))
  1138. csk_reset_flag(csk, CSK_TX_MORE_DATA);
  1139. copied = sk_stream_error(sk, flags, err);
  1140. goto done;
  1141. }
  1142. static void chtls_select_window(struct sock *sk)
  1143. {
  1144. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  1145. struct tcp_sock *tp = tcp_sk(sk);
  1146. unsigned int wnd = tp->rcv_wnd;
  1147. wnd = max_t(unsigned int, wnd, tcp_full_space(sk));
  1148. wnd = max_t(unsigned int, MIN_RCV_WND, wnd);
  1149. if (wnd > MAX_RCV_WND)
  1150. wnd = MAX_RCV_WND;
  1151. /*
  1152. * Check if we need to grow the receive window in response to an increase in
  1153. * the socket's receive buffer size. Some applications increase the buffer
  1154. * size dynamically and rely on the window to grow accordingly.
  1155. */
  1156. if (wnd > tp->rcv_wnd) {
  1157. tp->rcv_wup -= wnd - tp->rcv_wnd;
  1158. tp->rcv_wnd = wnd;
  1159. /* Mark the receive window as updated */
  1160. csk_reset_flag(csk, CSK_UPDATE_RCV_WND);
  1161. }
  1162. }
  1163. /*
  1164. * Send RX credits through an RX_DATA_ACK CPL message. We are permitted
  1165. * to return without sending the message in case we cannot allocate
  1166. * an sk_buff. Returns the number of credits sent.
  1167. */
  1168. static u32 send_rx_credits(struct chtls_sock *csk, u32 credits)
  1169. {
  1170. struct cpl_rx_data_ack *req;
  1171. struct sk_buff *skb;
  1172. skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
  1173. if (!skb)
  1174. return 0;
  1175. __skb_put(skb, sizeof(*req));
  1176. req = (struct cpl_rx_data_ack *)skb->head;
  1177. set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
  1178. INIT_TP_WR(req, csk->tid);
  1179. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
  1180. csk->tid));
  1181. req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) |
  1182. RX_FORCE_ACK_F);
  1183. cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
  1184. return credits;
  1185. }
  1186. #define CREDIT_RETURN_STATE (TCPF_ESTABLISHED | \
  1187. TCPF_FIN_WAIT1 | \
  1188. TCPF_FIN_WAIT2)
  1189. /*
  1190. * Called after some received data has been read. It returns RX credits
  1191. * to the HW for the amount of data processed.
  1192. */
  1193. static void chtls_cleanup_rbuf(struct sock *sk, int copied)
  1194. {
  1195. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  1196. struct tcp_sock *tp;
  1197. int must_send;
  1198. u32 credits;
  1199. u32 thres;
  1200. thres = 15 * 1024;
  1201. if (!sk_in_state(sk, CREDIT_RETURN_STATE))
  1202. return;
  1203. chtls_select_window(sk);
  1204. tp = tcp_sk(sk);
  1205. credits = tp->copied_seq - tp->rcv_wup;
  1206. if (unlikely(!credits))
  1207. return;
  1208. /*
  1209. * For coalescing to work effectively ensure the receive window has
  1210. * at least 16KB left.
  1211. */
  1212. must_send = credits + 16384 >= tp->rcv_wnd;
  1213. if (must_send || credits >= thres)
  1214. tp->rcv_wup += send_rx_credits(csk, credits);
  1215. }
  1216. static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
  1217. int nonblock, int flags, int *addr_len)
  1218. {
  1219. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  1220. struct net_device *dev = csk->egress_dev;
  1221. struct chtls_hws *hws = &csk->tlshws;
  1222. struct tcp_sock *tp = tcp_sk(sk);
  1223. struct adapter *adap;
  1224. unsigned long avail;
  1225. int buffers_freed;
  1226. int copied = 0;
  1227. int request;
  1228. int target;
  1229. long timeo;
  1230. adap = netdev2adap(dev);
  1231. buffers_freed = 0;
  1232. timeo = sock_rcvtimeo(sk, nonblock);
  1233. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  1234. request = len;
  1235. if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
  1236. chtls_cleanup_rbuf(sk, copied);
  1237. do {
  1238. struct sk_buff *skb;
  1239. u32 offset = 0;
  1240. if (unlikely(tp->urg_data &&
  1241. tp->urg_seq == tp->copied_seq)) {
  1242. if (copied)
  1243. break;
  1244. if (signal_pending(current)) {
  1245. copied = timeo ? sock_intr_errno(timeo) :
  1246. -EAGAIN;
  1247. break;
  1248. }
  1249. }
  1250. skb = skb_peek(&sk->sk_receive_queue);
  1251. if (skb)
  1252. goto found_ok_skb;
  1253. if (csk->wr_credits &&
  1254. skb_queue_len(&csk->txq) &&
  1255. chtls_push_frames(csk, csk->wr_credits ==
  1256. csk->wr_max_credits))
  1257. sk->sk_write_space(sk);
  1258. if (copied >= target && !sk->sk_backlog.tail)
  1259. break;
  1260. if (copied) {
  1261. if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
  1262. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  1263. signal_pending(current))
  1264. break;
  1265. if (!timeo)
  1266. break;
  1267. } else {
  1268. if (sock_flag(sk, SOCK_DONE))
  1269. break;
  1270. if (sk->sk_err) {
  1271. copied = sock_error(sk);
  1272. break;
  1273. }
  1274. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1275. break;
  1276. if (sk->sk_state == TCP_CLOSE) {
  1277. copied = -ENOTCONN;
  1278. break;
  1279. }
  1280. if (!timeo) {
  1281. copied = -EAGAIN;
  1282. break;
  1283. }
  1284. if (signal_pending(current)) {
  1285. copied = sock_intr_errno(timeo);
  1286. break;
  1287. }
  1288. }
  1289. if (sk->sk_backlog.tail) {
  1290. release_sock(sk);
  1291. lock_sock(sk);
  1292. chtls_cleanup_rbuf(sk, copied);
  1293. continue;
  1294. }
  1295. if (copied >= target)
  1296. break;
  1297. chtls_cleanup_rbuf(sk, copied);
  1298. sk_wait_data(sk, &timeo, NULL);
  1299. continue;
  1300. found_ok_skb:
  1301. if (!skb->len) {
  1302. skb_dst_set(skb, NULL);
  1303. __skb_unlink(skb, &sk->sk_receive_queue);
  1304. kfree_skb(skb);
  1305. if (!copied && !timeo) {
  1306. copied = -EAGAIN;
  1307. break;
  1308. }
  1309. if (copied < target) {
  1310. release_sock(sk);
  1311. lock_sock(sk);
  1312. continue;
  1313. }
  1314. break;
  1315. }
  1316. offset = hws->copied_seq;
  1317. avail = skb->len - offset;
  1318. if (len < avail)
  1319. avail = len;
  1320. if (unlikely(tp->urg_data)) {
  1321. u32 urg_offset = tp->urg_seq - tp->copied_seq;
  1322. if (urg_offset < avail) {
  1323. if (urg_offset) {
  1324. avail = urg_offset;
  1325. } else if (!sock_flag(sk, SOCK_URGINLINE)) {
  1326. /* First byte is urgent, skip */
  1327. tp->copied_seq++;
  1328. offset++;
  1329. avail--;
  1330. if (!avail)
  1331. goto skip_copy;
  1332. }
  1333. }
  1334. }
  1335. if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
  1336. if (!copied) {
  1337. copied = -EFAULT;
  1338. break;
  1339. }
  1340. }
  1341. copied += avail;
  1342. len -= avail;
  1343. hws->copied_seq += avail;
  1344. skip_copy:
  1345. if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
  1346. tp->urg_data = 0;
  1347. if ((avail + offset) >= skb->len) {
  1348. if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
  1349. tp->copied_seq += skb->len;
  1350. hws->rcvpld = skb->hdr_len;
  1351. } else {
  1352. tp->copied_seq += hws->rcvpld;
  1353. }
  1354. chtls_free_skb(sk, skb);
  1355. buffers_freed++;
  1356. hws->copied_seq = 0;
  1357. if (copied >= target &&
  1358. !skb_peek(&sk->sk_receive_queue))
  1359. break;
  1360. }
  1361. } while (len > 0);
  1362. if (buffers_freed)
  1363. chtls_cleanup_rbuf(sk, copied);
  1364. release_sock(sk);
  1365. return copied;
  1366. }
  1367. /*
  1368. * Peek at data in a socket's receive buffer.
  1369. */
  1370. static int peekmsg(struct sock *sk, struct msghdr *msg,
  1371. size_t len, int nonblock, int flags)
  1372. {
  1373. struct tcp_sock *tp = tcp_sk(sk);
  1374. u32 peek_seq, offset;
  1375. struct sk_buff *skb;
  1376. int copied = 0;
  1377. size_t avail; /* amount of available data in current skb */
  1378. long timeo;
  1379. lock_sock(sk);
  1380. timeo = sock_rcvtimeo(sk, nonblock);
  1381. peek_seq = tp->copied_seq;
  1382. do {
  1383. if (unlikely(tp->urg_data && tp->urg_seq == peek_seq)) {
  1384. if (copied)
  1385. break;
  1386. if (signal_pending(current)) {
  1387. copied = timeo ? sock_intr_errno(timeo) :
  1388. -EAGAIN;
  1389. break;
  1390. }
  1391. }
  1392. skb_queue_walk(&sk->sk_receive_queue, skb) {
  1393. offset = peek_seq - ULP_SKB_CB(skb)->seq;
  1394. if (offset < skb->len)
  1395. goto found_ok_skb;
  1396. }
  1397. /* empty receive queue */
  1398. if (copied)
  1399. break;
  1400. if (sock_flag(sk, SOCK_DONE))
  1401. break;
  1402. if (sk->sk_err) {
  1403. copied = sock_error(sk);
  1404. break;
  1405. }
  1406. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1407. break;
  1408. if (sk->sk_state == TCP_CLOSE) {
  1409. copied = -ENOTCONN;
  1410. break;
  1411. }
  1412. if (!timeo) {
  1413. copied = -EAGAIN;
  1414. break;
  1415. }
  1416. if (signal_pending(current)) {
  1417. copied = sock_intr_errno(timeo);
  1418. break;
  1419. }
  1420. if (sk->sk_backlog.tail) {
  1421. /* Do not sleep, just process backlog. */
  1422. release_sock(sk);
  1423. lock_sock(sk);
  1424. } else {
  1425. sk_wait_data(sk, &timeo, NULL);
  1426. }
  1427. if (unlikely(peek_seq != tp->copied_seq)) {
  1428. if (net_ratelimit())
  1429. pr_info("TCP(%s:%d), race in MSG_PEEK.\n",
  1430. current->comm, current->pid);
  1431. peek_seq = tp->copied_seq;
  1432. }
  1433. continue;
  1434. found_ok_skb:
  1435. avail = skb->len - offset;
  1436. if (len < avail)
  1437. avail = len;
  1438. /*
  1439. * Do we have urgent data here? We need to skip over the
  1440. * urgent byte.
  1441. */
  1442. if (unlikely(tp->urg_data)) {
  1443. u32 urg_offset = tp->urg_seq - peek_seq;
  1444. if (urg_offset < avail) {
  1445. /*
  1446. * The amount of data we are preparing to copy
  1447. * contains urgent data.
  1448. */
  1449. if (!urg_offset) { /* First byte is urgent */
  1450. if (!sock_flag(sk, SOCK_URGINLINE)) {
  1451. peek_seq++;
  1452. offset++;
  1453. avail--;
  1454. }
  1455. if (!avail)
  1456. continue;
  1457. } else {
  1458. /* stop short of the urgent data */
  1459. avail = urg_offset;
  1460. }
  1461. }
  1462. }
  1463. /*
  1464. * If MSG_TRUNC is specified the data is discarded.
  1465. */
  1466. if (likely(!(flags & MSG_TRUNC)))
  1467. if (skb_copy_datagram_msg(skb, offset, msg, len)) {
  1468. if (!copied) {
  1469. copied = -EFAULT;
  1470. break;
  1471. }
  1472. }
  1473. peek_seq += avail;
  1474. copied += avail;
  1475. len -= avail;
  1476. } while (len > 0);
  1477. release_sock(sk);
  1478. return copied;
  1479. }
  1480. int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
  1481. int nonblock, int flags, int *addr_len)
  1482. {
  1483. struct tcp_sock *tp = tcp_sk(sk);
  1484. struct chtls_sock *csk;
  1485. struct chtls_hws *hws;
  1486. unsigned long avail; /* amount of available data in current skb */
  1487. int buffers_freed;
  1488. int copied = 0;
  1489. int request;
  1490. long timeo;
  1491. int target; /* Read at least this many bytes */
  1492. buffers_freed = 0;
  1493. if (unlikely(flags & MSG_OOB))
  1494. return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
  1495. addr_len);
  1496. if (unlikely(flags & MSG_PEEK))
  1497. return peekmsg(sk, msg, len, nonblock, flags);
  1498. if (sk_can_busy_loop(sk) &&
  1499. skb_queue_empty(&sk->sk_receive_queue) &&
  1500. sk->sk_state == TCP_ESTABLISHED)
  1501. sk_busy_loop(sk, nonblock);
  1502. lock_sock(sk);
  1503. csk = rcu_dereference_sk_user_data(sk);
  1504. hws = &csk->tlshws;
  1505. if (is_tls_rx(csk))
  1506. return chtls_pt_recvmsg(sk, msg, len, nonblock,
  1507. flags, addr_len);
  1508. timeo = sock_rcvtimeo(sk, nonblock);
  1509. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  1510. request = len;
  1511. if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
  1512. chtls_cleanup_rbuf(sk, copied);
  1513. do {
  1514. struct sk_buff *skb;
  1515. u32 offset;
  1516. if (unlikely(tp->urg_data && tp->urg_seq == tp->copied_seq)) {
  1517. if (copied)
  1518. break;
  1519. if (signal_pending(current)) {
  1520. copied = timeo ? sock_intr_errno(timeo) :
  1521. -EAGAIN;
  1522. break;
  1523. }
  1524. }
  1525. skb = skb_peek(&sk->sk_receive_queue);
  1526. if (skb)
  1527. goto found_ok_skb;
  1528. if (csk->wr_credits &&
  1529. skb_queue_len(&csk->txq) &&
  1530. chtls_push_frames(csk, csk->wr_credits ==
  1531. csk->wr_max_credits))
  1532. sk->sk_write_space(sk);
  1533. if (copied >= target && !sk->sk_backlog.tail)
  1534. break;
  1535. if (copied) {
  1536. if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
  1537. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  1538. signal_pending(current))
  1539. break;
  1540. } else {
  1541. if (sock_flag(sk, SOCK_DONE))
  1542. break;
  1543. if (sk->sk_err) {
  1544. copied = sock_error(sk);
  1545. break;
  1546. }
  1547. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1548. break;
  1549. if (sk->sk_state == TCP_CLOSE) {
  1550. copied = -ENOTCONN;
  1551. break;
  1552. }
  1553. if (!timeo) {
  1554. copied = -EAGAIN;
  1555. break;
  1556. }
  1557. if (signal_pending(current)) {
  1558. copied = sock_intr_errno(timeo);
  1559. break;
  1560. }
  1561. }
  1562. if (sk->sk_backlog.tail) {
  1563. release_sock(sk);
  1564. lock_sock(sk);
  1565. chtls_cleanup_rbuf(sk, copied);
  1566. continue;
  1567. }
  1568. if (copied >= target)
  1569. break;
  1570. chtls_cleanup_rbuf(sk, copied);
  1571. sk_wait_data(sk, &timeo, NULL);
  1572. continue;
  1573. found_ok_skb:
  1574. if (!skb->len) {
  1575. chtls_kfree_skb(sk, skb);
  1576. if (!copied && !timeo) {
  1577. copied = -EAGAIN;
  1578. break;
  1579. }
  1580. if (copied < target)
  1581. continue;
  1582. break;
  1583. }
  1584. offset = tp->copied_seq - ULP_SKB_CB(skb)->seq;
  1585. avail = skb->len - offset;
  1586. if (len < avail)
  1587. avail = len;
  1588. if (unlikely(tp->urg_data)) {
  1589. u32 urg_offset = tp->urg_seq - tp->copied_seq;
  1590. if (urg_offset < avail) {
  1591. if (urg_offset) {
  1592. avail = urg_offset;
  1593. } else if (!sock_flag(sk, SOCK_URGINLINE)) {
  1594. tp->copied_seq++;
  1595. offset++;
  1596. avail--;
  1597. if (!avail)
  1598. goto skip_copy;
  1599. }
  1600. }
  1601. }
  1602. if (likely(!(flags & MSG_TRUNC))) {
  1603. if (skb_copy_datagram_msg(skb, offset,
  1604. msg, avail)) {
  1605. if (!copied) {
  1606. copied = -EFAULT;
  1607. break;
  1608. }
  1609. }
  1610. }
  1611. tp->copied_seq += avail;
  1612. copied += avail;
  1613. len -= avail;
  1614. skip_copy:
  1615. if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
  1616. tp->urg_data = 0;
  1617. if (avail + offset >= skb->len) {
  1618. if (likely(skb))
  1619. chtls_free_skb(sk, skb);
  1620. buffers_freed++;
  1621. if (copied >= target &&
  1622. !skb_peek(&sk->sk_receive_queue))
  1623. break;
  1624. }
  1625. } while (len > 0);
  1626. if (buffers_freed)
  1627. chtls_cleanup_rbuf(sk, copied);
  1628. release_sock(sk);
  1629. return copied;
  1630. }