smc_tx.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * Manage send buffer.
  6. * Producer:
  7. * Copy user space data into send buffer, if send buffer space available.
  8. * Consumer:
  9. * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
  10. *
  11. * Copyright IBM Corp. 2016
  12. *
  13. * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
  14. */
  15. #include <linux/net.h>
  16. #include <linux/rcupdate.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/sched/signal.h>
  19. #include <net/sock.h>
  20. #include <net/tcp.h>
  21. #include "smc.h"
  22. #include "smc_wr.h"
  23. #include "smc_cdc.h"
  24. #include "smc_ism.h"
  25. #include "smc_tx.h"
  26. #define SMC_TX_WORK_DELAY HZ
  27. #define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
  28. /***************************** sndbuf producer *******************************/
  29. /* callback implementation for sk.sk_write_space()
  30. * to wakeup sndbuf producers that blocked with smc_tx_wait().
  31. * called under sk_socket lock.
  32. */
  33. static void smc_tx_write_space(struct sock *sk)
  34. {
  35. struct socket *sock = sk->sk_socket;
  36. struct smc_sock *smc = smc_sk(sk);
  37. struct socket_wq *wq;
  38. /* similar to sk_stream_write_space */
  39. if (atomic_read(&smc->conn.sndbuf_space) && sock) {
  40. clear_bit(SOCK_NOSPACE, &sock->flags);
  41. rcu_read_lock();
  42. wq = rcu_dereference(sk->sk_wq);
  43. if (skwq_has_sleeper(wq))
  44. wake_up_interruptible_poll(&wq->wait,
  45. EPOLLOUT | EPOLLWRNORM |
  46. EPOLLWRBAND);
  47. if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
  48. sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
  49. rcu_read_unlock();
  50. }
  51. }
  52. /* Wakeup sndbuf producers that blocked with smc_tx_wait().
  53. * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
  54. */
  55. void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
  56. {
  57. if (smc->sk.sk_socket &&
  58. test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
  59. smc->sk.sk_write_space(&smc->sk);
  60. }
  61. /* blocks sndbuf producer until at least one byte of free space available
  62. * or urgent Byte was consumed
  63. */
  64. static int smc_tx_wait(struct smc_sock *smc, int flags)
  65. {
  66. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  67. struct smc_connection *conn = &smc->conn;
  68. struct sock *sk = &smc->sk;
  69. bool noblock;
  70. long timeo;
  71. int rc = 0;
  72. /* similar to sk_stream_wait_memory */
  73. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  74. noblock = timeo ? false : true;
  75. add_wait_queue(sk_sleep(sk), &wait);
  76. while (1) {
  77. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  78. if (sk->sk_err ||
  79. (sk->sk_shutdown & SEND_SHUTDOWN) ||
  80. conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
  81. rc = -EPIPE;
  82. break;
  83. }
  84. if (smc_cdc_rxed_any_close(conn)) {
  85. rc = -ECONNRESET;
  86. break;
  87. }
  88. if (!timeo) {
  89. if (noblock)
  90. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  91. rc = -EAGAIN;
  92. break;
  93. }
  94. if (signal_pending(current)) {
  95. rc = sock_intr_errno(timeo);
  96. break;
  97. }
  98. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  99. if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend)
  100. break; /* at least 1 byte of free & no urgent data */
  101. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  102. sk_wait_event(sk, &timeo,
  103. sk->sk_err ||
  104. (sk->sk_shutdown & SEND_SHUTDOWN) ||
  105. smc_cdc_rxed_any_close(conn) ||
  106. (atomic_read(&conn->sndbuf_space) &&
  107. !conn->urg_tx_pend),
  108. &wait);
  109. }
  110. remove_wait_queue(sk_sleep(sk), &wait);
  111. return rc;
  112. }
  113. static bool smc_tx_is_corked(struct smc_sock *smc)
  114. {
  115. struct tcp_sock *tp = tcp_sk(smc->clcsock->sk);
  116. return (tp->nonagle & TCP_NAGLE_CORK) ? true : false;
  117. }
  118. /* sndbuf producer: main API called by socket layer.
  119. * called under sock lock.
  120. */
  121. int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
  122. {
  123. size_t copylen, send_done = 0, send_remaining = len;
  124. size_t chunk_len, chunk_off, chunk_len_sum;
  125. struct smc_connection *conn = &smc->conn;
  126. union smc_host_cursor prep;
  127. struct sock *sk = &smc->sk;
  128. char *sndbuf_base;
  129. int tx_cnt_prep;
  130. int writespace;
  131. int rc, chunk;
  132. /* This should be in poll */
  133. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  134. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
  135. rc = -EPIPE;
  136. goto out_err;
  137. }
  138. while (msg_data_left(msg)) {
  139. if (sk->sk_state == SMC_INIT)
  140. return -ENOTCONN;
  141. if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
  142. (smc->sk.sk_err == ECONNABORTED) ||
  143. conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
  144. return -EPIPE;
  145. if (smc_cdc_rxed_any_close(conn))
  146. return send_done ?: -ECONNRESET;
  147. if (msg->msg_flags & MSG_OOB)
  148. conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
  149. if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
  150. rc = smc_tx_wait(smc, msg->msg_flags);
  151. if (rc) {
  152. if (send_done)
  153. return send_done;
  154. goto out_err;
  155. }
  156. continue;
  157. }
  158. /* initialize variables for 1st iteration of subsequent loop */
  159. /* could be just 1 byte, even after smc_tx_wait above */
  160. writespace = atomic_read(&conn->sndbuf_space);
  161. /* not more than what user space asked for */
  162. copylen = min_t(size_t, send_remaining, writespace);
  163. /* determine start of sndbuf */
  164. sndbuf_base = conn->sndbuf_desc->cpu_addr;
  165. smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
  166. tx_cnt_prep = prep.count;
  167. /* determine chunks where to write into sndbuf */
  168. /* either unwrapped case, or 1st chunk of wrapped case */
  169. chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len -
  170. tx_cnt_prep);
  171. chunk_len_sum = chunk_len;
  172. chunk_off = tx_cnt_prep;
  173. smc_sndbuf_sync_sg_for_cpu(conn);
  174. for (chunk = 0; chunk < 2; chunk++) {
  175. rc = memcpy_from_msg(sndbuf_base + chunk_off,
  176. msg, chunk_len);
  177. if (rc) {
  178. smc_sndbuf_sync_sg_for_device(conn);
  179. if (send_done)
  180. return send_done;
  181. goto out_err;
  182. }
  183. send_done += chunk_len;
  184. send_remaining -= chunk_len;
  185. if (chunk_len_sum == copylen)
  186. break; /* either on 1st or 2nd iteration */
  187. /* prepare next (== 2nd) iteration */
  188. chunk_len = copylen - chunk_len; /* remainder */
  189. chunk_len_sum += chunk_len;
  190. chunk_off = 0; /* modulo offset in send ring buffer */
  191. }
  192. smc_sndbuf_sync_sg_for_device(conn);
  193. /* update cursors */
  194. smc_curs_add(conn->sndbuf_desc->len, &prep, copylen);
  195. smc_curs_copy(&conn->tx_curs_prep, &prep, conn);
  196. /* increased in send tasklet smc_cdc_tx_handler() */
  197. smp_mb__before_atomic();
  198. atomic_sub(copylen, &conn->sndbuf_space);
  199. /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
  200. smp_mb__after_atomic();
  201. /* since we just produced more new data into sndbuf,
  202. * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
  203. */
  204. if ((msg->msg_flags & MSG_OOB) && !send_remaining)
  205. conn->urg_tx_pend = true;
  206. if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) &&
  207. (atomic_read(&conn->sndbuf_space) >
  208. (conn->sndbuf_desc->len >> 1)))
  209. /* for a corked socket defer the RDMA writes if there
  210. * is still sufficient sndbuf_space available
  211. */
  212. schedule_delayed_work(&conn->tx_work,
  213. SMC_TX_CORK_DELAY);
  214. else
  215. smc_tx_sndbuf_nonempty(conn);
  216. } /* while (msg_data_left(msg)) */
  217. return send_done;
  218. out_err:
  219. rc = sk_stream_error(sk, msg->msg_flags, rc);
  220. /* make sure we wake any epoll edge trigger waiter */
  221. if (unlikely(rc == -EAGAIN))
  222. sk->sk_write_space(sk);
  223. return rc;
  224. }
  225. /***************************** sndbuf consumer *******************************/
  226. /* sndbuf consumer: actual data transfer of one target chunk with ISM write */
  227. int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
  228. u32 offset, int signal)
  229. {
  230. struct smc_ism_position pos;
  231. int rc;
  232. memset(&pos, 0, sizeof(pos));
  233. pos.token = conn->peer_token;
  234. pos.index = conn->peer_rmbe_idx;
  235. pos.offset = conn->tx_off + offset;
  236. pos.signal = signal;
  237. rc = smc_ism_write(conn->lgr->smcd, &pos, data, len);
  238. if (rc)
  239. conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
  240. return rc;
  241. }
  242. /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
  243. static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
  244. int num_sges, struct ib_sge sges[])
  245. {
  246. struct smc_link_group *lgr = conn->lgr;
  247. struct ib_rdma_wr rdma_wr;
  248. struct smc_link *link;
  249. int rc;
  250. memset(&rdma_wr, 0, sizeof(rdma_wr));
  251. link = &lgr->lnk[SMC_SINGLE_LINK];
  252. rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link);
  253. rdma_wr.wr.sg_list = sges;
  254. rdma_wr.wr.num_sge = num_sges;
  255. rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
  256. rdma_wr.remote_addr =
  257. lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
  258. /* RMBE within RMB */
  259. conn->tx_off +
  260. /* offset within RMBE */
  261. peer_rmbe_offset;
  262. rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
  263. rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL);
  264. if (rc) {
  265. conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
  266. smc_lgr_terminate(lgr);
  267. }
  268. return rc;
  269. }
  270. /* sndbuf consumer */
  271. static inline void smc_tx_advance_cursors(struct smc_connection *conn,
  272. union smc_host_cursor *prod,
  273. union smc_host_cursor *sent,
  274. size_t len)
  275. {
  276. smc_curs_add(conn->peer_rmbe_size, prod, len);
  277. /* increased in recv tasklet smc_cdc_msg_rcv() */
  278. smp_mb__before_atomic();
  279. /* data in flight reduces usable snd_wnd */
  280. atomic_sub(len, &conn->peer_rmbe_space);
  281. /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
  282. smp_mb__after_atomic();
  283. smc_curs_add(conn->sndbuf_desc->len, sent, len);
  284. }
  285. /* SMC-R helper for smc_tx_rdma_writes() */
  286. static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
  287. size_t src_off, size_t src_len,
  288. size_t dst_off, size_t dst_len)
  289. {
  290. dma_addr_t dma_addr =
  291. sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
  292. struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
  293. int src_len_sum = src_len, dst_len_sum = dst_len;
  294. struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
  295. int sent_count = src_off;
  296. int srcchunk, dstchunk;
  297. int num_sges;
  298. int rc;
  299. for (dstchunk = 0; dstchunk < 2; dstchunk++) {
  300. num_sges = 0;
  301. for (srcchunk = 0; srcchunk < 2; srcchunk++) {
  302. sges[srcchunk].addr = dma_addr + src_off;
  303. sges[srcchunk].length = src_len;
  304. sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
  305. num_sges++;
  306. src_off += src_len;
  307. if (src_off >= conn->sndbuf_desc->len)
  308. src_off -= conn->sndbuf_desc->len;
  309. /* modulo in send ring */
  310. if (src_len_sum == dst_len)
  311. break; /* either on 1st or 2nd iteration */
  312. /* prepare next (== 2nd) iteration */
  313. src_len = dst_len - src_len; /* remainder */
  314. src_len_sum += src_len;
  315. }
  316. rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
  317. if (rc)
  318. return rc;
  319. if (dst_len_sum == len)
  320. break; /* either on 1st or 2nd iteration */
  321. /* prepare next (== 2nd) iteration */
  322. dst_off = 0; /* modulo offset in RMBE ring buffer */
  323. dst_len = len - dst_len; /* remainder */
  324. dst_len_sum += dst_len;
  325. src_len = min_t(int, dst_len, conn->sndbuf_desc->len -
  326. sent_count);
  327. src_len_sum = src_len;
  328. }
  329. return 0;
  330. }
  331. /* SMC-D helper for smc_tx_rdma_writes() */
  332. static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
  333. size_t src_off, size_t src_len,
  334. size_t dst_off, size_t dst_len)
  335. {
  336. int src_len_sum = src_len, dst_len_sum = dst_len;
  337. int srcchunk, dstchunk;
  338. int rc;
  339. for (dstchunk = 0; dstchunk < 2; dstchunk++) {
  340. for (srcchunk = 0; srcchunk < 2; srcchunk++) {
  341. void *data = conn->sndbuf_desc->cpu_addr + src_off;
  342. rc = smcd_tx_ism_write(conn, data, src_len, dst_off +
  343. sizeof(struct smcd_cdc_msg), 0);
  344. if (rc)
  345. return rc;
  346. dst_off += src_len;
  347. src_off += src_len;
  348. if (src_off >= conn->sndbuf_desc->len)
  349. src_off -= conn->sndbuf_desc->len;
  350. /* modulo in send ring */
  351. if (src_len_sum == dst_len)
  352. break; /* either on 1st or 2nd iteration */
  353. /* prepare next (== 2nd) iteration */
  354. src_len = dst_len - src_len; /* remainder */
  355. src_len_sum += src_len;
  356. }
  357. if (dst_len_sum == len)
  358. break; /* either on 1st or 2nd iteration */
  359. /* prepare next (== 2nd) iteration */
  360. dst_off = 0; /* modulo offset in RMBE ring buffer */
  361. dst_len = len - dst_len; /* remainder */
  362. dst_len_sum += dst_len;
  363. src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off);
  364. src_len_sum = src_len;
  365. }
  366. return 0;
  367. }
  368. /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
  369. * usable snd_wnd as max transmit
  370. */
  371. static int smc_tx_rdma_writes(struct smc_connection *conn)
  372. {
  373. size_t len, src_len, dst_off, dst_len; /* current chunk values */
  374. union smc_host_cursor sent, prep, prod, cons;
  375. struct smc_cdc_producer_flags *pflags;
  376. int to_send, rmbespace;
  377. int rc;
  378. /* source: sndbuf */
  379. smc_curs_copy(&sent, &conn->tx_curs_sent, conn);
  380. smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
  381. /* cf. wmem_alloc - (snd_max - snd_una) */
  382. to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
  383. if (to_send <= 0)
  384. return 0;
  385. /* destination: RMBE */
  386. /* cf. snd_wnd */
  387. rmbespace = atomic_read(&conn->peer_rmbe_space);
  388. if (rmbespace <= 0)
  389. return 0;
  390. smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
  391. smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
  392. /* if usable snd_wnd closes ask peer to advertise once it opens again */
  393. pflags = &conn->local_tx_ctrl.prod_flags;
  394. pflags->write_blocked = (to_send >= rmbespace);
  395. /* cf. usable snd_wnd */
  396. len = min(to_send, rmbespace);
  397. /* initialize variables for first iteration of subsequent nested loop */
  398. dst_off = prod.count;
  399. if (prod.wrap == cons.wrap) {
  400. /* the filled destination area is unwrapped,
  401. * hence the available free destination space is wrapped
  402. * and we need 2 destination chunks of sum len; start with 1st
  403. * which is limited by what's available in sndbuf
  404. */
  405. dst_len = min_t(size_t,
  406. conn->peer_rmbe_size - prod.count, len);
  407. } else {
  408. /* the filled destination area is wrapped,
  409. * hence the available free destination space is unwrapped
  410. * and we need a single destination chunk of entire len
  411. */
  412. dst_len = len;
  413. }
  414. /* dst_len determines the maximum src_len */
  415. if (sent.count + dst_len <= conn->sndbuf_desc->len) {
  416. /* unwrapped src case: single chunk of entire dst_len */
  417. src_len = dst_len;
  418. } else {
  419. /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
  420. src_len = conn->sndbuf_desc->len - sent.count;
  421. }
  422. if (conn->lgr->is_smcd)
  423. rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len,
  424. dst_off, dst_len);
  425. else
  426. rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
  427. dst_off, dst_len);
  428. if (rc)
  429. return rc;
  430. if (conn->urg_tx_pend && len == to_send)
  431. pflags->urg_data_present = 1;
  432. smc_tx_advance_cursors(conn, &prod, &sent, len);
  433. /* update connection's cursors with advanced local cursors */
  434. smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn);
  435. /* dst: peer RMBE */
  436. smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */
  437. return 0;
  438. }
  439. /* Wakeup sndbuf consumers from any context (IRQ or process)
  440. * since there is more data to transmit; usable snd_wnd as max transmit
  441. */
  442. static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
  443. {
  444. struct smc_cdc_producer_flags *pflags;
  445. struct smc_cdc_tx_pend *pend;
  446. struct smc_wr_buf *wr_buf;
  447. int rc;
  448. spin_lock_bh(&conn->send_lock);
  449. rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
  450. if (rc < 0) {
  451. if (rc == -EBUSY) {
  452. struct smc_sock *smc =
  453. container_of(conn, struct smc_sock, conn);
  454. if (smc->sk.sk_err == ECONNABORTED) {
  455. rc = sock_error(&smc->sk);
  456. goto out_unlock;
  457. }
  458. rc = 0;
  459. if (conn->alert_token_local) /* connection healthy */
  460. mod_delayed_work(system_wq, &conn->tx_work,
  461. SMC_TX_WORK_DELAY);
  462. }
  463. goto out_unlock;
  464. }
  465. if (!conn->local_tx_ctrl.prod_flags.urg_data_present) {
  466. rc = smc_tx_rdma_writes(conn);
  467. if (rc) {
  468. smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
  469. (struct smc_wr_tx_pend_priv *)pend);
  470. goto out_unlock;
  471. }
  472. }
  473. rc = smc_cdc_msg_send(conn, wr_buf, pend);
  474. pflags = &conn->local_tx_ctrl.prod_flags;
  475. if (!rc && pflags->urg_data_present) {
  476. pflags->urg_data_pending = 0;
  477. pflags->urg_data_present = 0;
  478. }
  479. out_unlock:
  480. spin_unlock_bh(&conn->send_lock);
  481. return rc;
  482. }
  483. static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
  484. {
  485. struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
  486. int rc = 0;
  487. spin_lock_bh(&conn->send_lock);
  488. if (!pflags->urg_data_present)
  489. rc = smc_tx_rdma_writes(conn);
  490. if (!rc)
  491. rc = smcd_cdc_msg_send(conn);
  492. if (!rc && pflags->urg_data_present) {
  493. pflags->urg_data_pending = 0;
  494. pflags->urg_data_present = 0;
  495. }
  496. spin_unlock_bh(&conn->send_lock);
  497. return rc;
  498. }
  499. int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
  500. {
  501. int rc;
  502. if (conn->lgr->is_smcd)
  503. rc = smcd_tx_sndbuf_nonempty(conn);
  504. else
  505. rc = smcr_tx_sndbuf_nonempty(conn);
  506. return rc;
  507. }
  508. /* Wakeup sndbuf consumers from process context
  509. * since there is more data to transmit
  510. */
  511. void smc_tx_work(struct work_struct *work)
  512. {
  513. struct smc_connection *conn = container_of(to_delayed_work(work),
  514. struct smc_connection,
  515. tx_work);
  516. struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
  517. int rc;
  518. lock_sock(&smc->sk);
  519. if (smc->sk.sk_err ||
  520. !conn->alert_token_local ||
  521. conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
  522. goto out;
  523. rc = smc_tx_sndbuf_nonempty(conn);
  524. if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
  525. !atomic_read(&conn->bytes_to_rcv))
  526. conn->local_rx_ctrl.prod_flags.write_blocked = 0;
  527. out:
  528. release_sock(&smc->sk);
  529. }
  530. void smc_tx_consumer_update(struct smc_connection *conn, bool force)
  531. {
  532. union smc_host_cursor cfed, cons, prod;
  533. int sender_free = conn->rmb_desc->len;
  534. int to_confirm;
  535. smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
  536. smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn);
  537. to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
  538. if (to_confirm > conn->rmbe_update_limit) {
  539. smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
  540. sender_free = conn->rmb_desc->len -
  541. smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
  542. }
  543. if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
  544. force ||
  545. ((to_confirm > conn->rmbe_update_limit) &&
  546. ((sender_free <= (conn->rmb_desc->len / 2)) ||
  547. conn->local_rx_ctrl.prod_flags.write_blocked))) {
  548. if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
  549. conn->alert_token_local) { /* connection healthy */
  550. schedule_delayed_work(&conn->tx_work,
  551. SMC_TX_WORK_DELAY);
  552. return;
  553. }
  554. smc_curs_copy(&conn->rx_curs_confirmed,
  555. &conn->local_tx_ctrl.cons, conn);
  556. conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
  557. }
  558. if (conn->local_rx_ctrl.prod_flags.write_blocked &&
  559. !atomic_read(&conn->bytes_to_rcv))
  560. conn->local_rx_ctrl.prod_flags.write_blocked = 0;
  561. }
  562. /***************************** send initialize *******************************/
  563. /* Initialize send properties on connection establishment. NB: not __init! */
  564. void smc_tx_init(struct smc_sock *smc)
  565. {
  566. smc->sk.sk_write_space = smc_tx_write_space;
  567. }