smc_clc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * CLC (connection layer control) handshake over initial TCP socket to
  6. * prepare for RDMA traffic
  7. *
  8. * Copyright IBM Corp. 2016, 2018
  9. *
  10. * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
  11. */
  12. #include <linux/in.h>
  13. #include <linux/inetdevice.h>
  14. #include <linux/if_ether.h>
  15. #include <linux/sched/signal.h>
  16. #include <net/addrconf.h>
  17. #include <net/sock.h>
  18. #include <net/tcp.h>
  19. #include "smc.h"
  20. #include "smc_core.h"
  21. #include "smc_clc.h"
  22. #include "smc_ib.h"
  23. /* eye catcher "SMCR" EBCDIC for CLC messages */
  24. static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
  25. /* check if received message has a correct header length and contains valid
  26. * heading and trailing eyecatchers
  27. */
  28. static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
  29. {
  30. struct smc_clc_msg_proposal_prefix *pclc_prfx;
  31. struct smc_clc_msg_accept_confirm *clc;
  32. struct smc_clc_msg_proposal *pclc;
  33. struct smc_clc_msg_decline *dclc;
  34. struct smc_clc_msg_trail *trl;
  35. if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
  36. return false;
  37. switch (clcm->type) {
  38. case SMC_CLC_PROPOSAL:
  39. pclc = (struct smc_clc_msg_proposal *)clcm;
  40. pclc_prfx = smc_clc_proposal_get_prefix(pclc);
  41. if (ntohs(pclc->hdr.length) !=
  42. sizeof(*pclc) + ntohs(pclc->iparea_offset) +
  43. sizeof(*pclc_prfx) +
  44. pclc_prfx->ipv6_prefixes_cnt *
  45. sizeof(struct smc_clc_ipv6_prefix) +
  46. sizeof(*trl))
  47. return false;
  48. trl = (struct smc_clc_msg_trail *)
  49. ((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl));
  50. break;
  51. case SMC_CLC_ACCEPT:
  52. case SMC_CLC_CONFIRM:
  53. clc = (struct smc_clc_msg_accept_confirm *)clcm;
  54. if (ntohs(clc->hdr.length) != sizeof(*clc))
  55. return false;
  56. trl = &clc->trl;
  57. break;
  58. case SMC_CLC_DECLINE:
  59. dclc = (struct smc_clc_msg_decline *)clcm;
  60. if (ntohs(dclc->hdr.length) != sizeof(*dclc))
  61. return false;
  62. trl = &dclc->trl;
  63. break;
  64. default:
  65. return false;
  66. }
  67. if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
  68. return false;
  69. return true;
  70. }
  71. /* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */
  72. static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4,
  73. struct smc_clc_msg_proposal_prefix *prop)
  74. {
  75. struct in_device *in_dev = __in_dev_get_rcu(dst->dev);
  76. if (!in_dev)
  77. return -ENODEV;
  78. for_ifa(in_dev) {
  79. if (!inet_ifa_match(ipv4, ifa))
  80. continue;
  81. prop->prefix_len = inet_mask_len(ifa->ifa_mask);
  82. prop->outgoing_subnet = ifa->ifa_address & ifa->ifa_mask;
  83. /* prop->ipv6_prefixes_cnt = 0; already done by memset before */
  84. return 0;
  85. } endfor_ifa(in_dev);
  86. return -ENOENT;
  87. }
  88. /* fill CLC proposal msg with ipv6 prefixes from device */
  89. static int smc_clc_prfx_set6_rcu(struct dst_entry *dst,
  90. struct smc_clc_msg_proposal_prefix *prop,
  91. struct smc_clc_ipv6_prefix *ipv6_prfx)
  92. {
  93. #if IS_ENABLED(CONFIG_IPV6)
  94. struct inet6_dev *in6_dev = __in6_dev_get(dst->dev);
  95. struct inet6_ifaddr *ifa;
  96. int cnt = 0;
  97. if (!in6_dev)
  98. return -ENODEV;
  99. /* use a maximum of 8 IPv6 prefixes from device */
  100. list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
  101. if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)
  102. continue;
  103. ipv6_addr_prefix(&ipv6_prfx[cnt].prefix,
  104. &ifa->addr, ifa->prefix_len);
  105. ipv6_prfx[cnt].prefix_len = ifa->prefix_len;
  106. cnt++;
  107. if (cnt == SMC_CLC_MAX_V6_PREFIX)
  108. break;
  109. }
  110. prop->ipv6_prefixes_cnt = cnt;
  111. if (cnt)
  112. return 0;
  113. #endif
  114. return -ENOENT;
  115. }
  116. /* retrieve and set prefixes in CLC proposal msg */
  117. static int smc_clc_prfx_set(struct socket *clcsock,
  118. struct smc_clc_msg_proposal_prefix *prop,
  119. struct smc_clc_ipv6_prefix *ipv6_prfx)
  120. {
  121. struct dst_entry *dst = sk_dst_get(clcsock->sk);
  122. struct sockaddr_storage addrs;
  123. struct sockaddr_in6 *addr6;
  124. struct sockaddr_in *addr;
  125. int rc = -ENOENT;
  126. memset(prop, 0, sizeof(*prop));
  127. if (!dst) {
  128. rc = -ENOTCONN;
  129. goto out;
  130. }
  131. if (!dst->dev) {
  132. rc = -ENODEV;
  133. goto out_rel;
  134. }
  135. /* get address to which the internal TCP socket is bound */
  136. kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
  137. /* analyze IP specific data of net_device belonging to TCP socket */
  138. addr6 = (struct sockaddr_in6 *)&addrs;
  139. rcu_read_lock();
  140. if (addrs.ss_family == PF_INET) {
  141. /* IPv4 */
  142. addr = (struct sockaddr_in *)&addrs;
  143. rc = smc_clc_prfx_set4_rcu(dst, addr->sin_addr.s_addr, prop);
  144. } else if (ipv6_addr_v4mapped(&addr6->sin6_addr)) {
  145. /* mapped IPv4 address - peer is IPv4 only */
  146. rc = smc_clc_prfx_set4_rcu(dst, addr6->sin6_addr.s6_addr32[3],
  147. prop);
  148. } else {
  149. /* IPv6 */
  150. rc = smc_clc_prfx_set6_rcu(dst, prop, ipv6_prfx);
  151. }
  152. rcu_read_unlock();
  153. out_rel:
  154. dst_release(dst);
  155. out:
  156. return rc;
  157. }
  158. /* match ipv4 addrs of dev against addr in CLC proposal */
  159. static int smc_clc_prfx_match4_rcu(struct net_device *dev,
  160. struct smc_clc_msg_proposal_prefix *prop)
  161. {
  162. struct in_device *in_dev = __in_dev_get_rcu(dev);
  163. if (!in_dev)
  164. return -ENODEV;
  165. for_ifa(in_dev) {
  166. if (prop->prefix_len == inet_mask_len(ifa->ifa_mask) &&
  167. inet_ifa_match(prop->outgoing_subnet, ifa))
  168. return 0;
  169. } endfor_ifa(in_dev);
  170. return -ENOENT;
  171. }
  172. /* match ipv6 addrs of dev against addrs in CLC proposal */
  173. static int smc_clc_prfx_match6_rcu(struct net_device *dev,
  174. struct smc_clc_msg_proposal_prefix *prop)
  175. {
  176. #if IS_ENABLED(CONFIG_IPV6)
  177. struct inet6_dev *in6_dev = __in6_dev_get(dev);
  178. struct smc_clc_ipv6_prefix *ipv6_prfx;
  179. struct inet6_ifaddr *ifa;
  180. int i, max;
  181. if (!in6_dev)
  182. return -ENODEV;
  183. /* ipv6 prefix list starts behind smc_clc_msg_proposal_prefix */
  184. ipv6_prfx = (struct smc_clc_ipv6_prefix *)((u8 *)prop + sizeof(*prop));
  185. max = min_t(u8, prop->ipv6_prefixes_cnt, SMC_CLC_MAX_V6_PREFIX);
  186. list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
  187. if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)
  188. continue;
  189. for (i = 0; i < max; i++) {
  190. if (ifa->prefix_len == ipv6_prfx[i].prefix_len &&
  191. ipv6_prefix_equal(&ifa->addr, &ipv6_prfx[i].prefix,
  192. ifa->prefix_len))
  193. return 0;
  194. }
  195. }
  196. #endif
  197. return -ENOENT;
  198. }
  199. /* check if proposed prefixes match one of our device prefixes */
  200. int smc_clc_prfx_match(struct socket *clcsock,
  201. struct smc_clc_msg_proposal_prefix *prop)
  202. {
  203. struct dst_entry *dst = sk_dst_get(clcsock->sk);
  204. int rc;
  205. if (!dst) {
  206. rc = -ENOTCONN;
  207. goto out;
  208. }
  209. if (!dst->dev) {
  210. rc = -ENODEV;
  211. goto out_rel;
  212. }
  213. rcu_read_lock();
  214. if (!prop->ipv6_prefixes_cnt)
  215. rc = smc_clc_prfx_match4_rcu(dst->dev, prop);
  216. else
  217. rc = smc_clc_prfx_match6_rcu(dst->dev, prop);
  218. rcu_read_unlock();
  219. out_rel:
  220. dst_release(dst);
  221. out:
  222. return rc;
  223. }
  224. /* Wait for data on the tcp-socket, analyze received data
  225. * Returns:
  226. * 0 if success and it was not a decline that we received.
  227. * SMC_CLC_DECL_REPLY if decline received for fallback w/o another decl send.
  228. * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise.
  229. */
  230. int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
  231. u8 expected_type)
  232. {
  233. struct sock *clc_sk = smc->clcsock->sk;
  234. struct smc_clc_msg_hdr *clcm = buf;
  235. struct msghdr msg = {NULL, 0};
  236. int reason_code = 0;
  237. struct kvec vec = {buf, buflen};
  238. int len, datlen;
  239. int krflags;
  240. /* peek the first few bytes to determine length of data to receive
  241. * so we don't consume any subsequent CLC message or payload data
  242. * in the TCP byte stream
  243. */
  244. /*
  245. * Caller must make sure that buflen is no less than
  246. * sizeof(struct smc_clc_msg_hdr)
  247. */
  248. krflags = MSG_PEEK | MSG_WAITALL;
  249. smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
  250. iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1,
  251. sizeof(struct smc_clc_msg_hdr));
  252. len = sock_recvmsg(smc->clcsock, &msg, krflags);
  253. if (signal_pending(current)) {
  254. reason_code = -EINTR;
  255. clc_sk->sk_err = EINTR;
  256. smc->sk.sk_err = EINTR;
  257. goto out;
  258. }
  259. if (clc_sk->sk_err) {
  260. reason_code = -clc_sk->sk_err;
  261. smc->sk.sk_err = clc_sk->sk_err;
  262. goto out;
  263. }
  264. if (!len) { /* peer has performed orderly shutdown */
  265. smc->sk.sk_err = ECONNRESET;
  266. reason_code = -ECONNRESET;
  267. goto out;
  268. }
  269. if (len < 0) {
  270. smc->sk.sk_err = -len;
  271. reason_code = len;
  272. goto out;
  273. }
  274. datlen = ntohs(clcm->length);
  275. if ((len < sizeof(struct smc_clc_msg_hdr)) ||
  276. (datlen > buflen) ||
  277. ((clcm->type != SMC_CLC_DECLINE) &&
  278. (clcm->type != expected_type))) {
  279. smc->sk.sk_err = EPROTO;
  280. reason_code = -EPROTO;
  281. goto out;
  282. }
  283. /* receive the complete CLC message */
  284. memset(&msg, 0, sizeof(struct msghdr));
  285. iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
  286. krflags = MSG_WAITALL;
  287. smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
  288. len = sock_recvmsg(smc->clcsock, &msg, krflags);
  289. if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
  290. smc->sk.sk_err = EPROTO;
  291. reason_code = -EPROTO;
  292. goto out;
  293. }
  294. if (clcm->type == SMC_CLC_DECLINE) {
  295. reason_code = SMC_CLC_DECL_REPLY;
  296. if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
  297. smc->conn.lgr->sync_err = 1;
  298. smc_lgr_terminate(smc->conn.lgr);
  299. }
  300. }
  301. out:
  302. return reason_code;
  303. }
  304. /* send CLC DECLINE message across internal TCP socket */
  305. int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
  306. {
  307. struct smc_clc_msg_decline dclc;
  308. struct msghdr msg;
  309. struct kvec vec;
  310. int len;
  311. memset(&dclc, 0, sizeof(dclc));
  312. memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
  313. dclc.hdr.type = SMC_CLC_DECLINE;
  314. dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
  315. dclc.hdr.version = SMC_CLC_V1;
  316. dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
  317. memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
  318. dclc.peer_diagnosis = htonl(peer_diag_info);
  319. memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
  320. memset(&msg, 0, sizeof(msg));
  321. vec.iov_base = &dclc;
  322. vec.iov_len = sizeof(struct smc_clc_msg_decline);
  323. len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
  324. sizeof(struct smc_clc_msg_decline));
  325. if (len < sizeof(struct smc_clc_msg_decline))
  326. smc->sk.sk_err = EPROTO;
  327. if (len < 0)
  328. smc->sk.sk_err = -len;
  329. return sock_error(&smc->sk);
  330. }
  331. /* send CLC PROPOSAL message across internal TCP socket */
  332. int smc_clc_send_proposal(struct smc_sock *smc,
  333. struct smc_ib_device *smcibdev,
  334. u8 ibport)
  335. {
  336. struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX];
  337. struct smc_clc_msg_proposal_prefix pclc_prfx;
  338. struct smc_clc_msg_proposal pclc;
  339. struct smc_clc_msg_trail trl;
  340. int len, i, plen, rc;
  341. int reason_code = 0;
  342. struct kvec vec[4];
  343. struct msghdr msg;
  344. /* retrieve ip prefixes for CLC proposal msg */
  345. rc = smc_clc_prfx_set(smc->clcsock, &pclc_prfx, ipv6_prfx);
  346. if (rc)
  347. return SMC_CLC_DECL_CNFERR; /* configuration error */
  348. /* send SMC Proposal CLC message */
  349. plen = sizeof(pclc) + sizeof(pclc_prfx) +
  350. (pclc_prfx.ipv6_prefixes_cnt * sizeof(ipv6_prfx[0])) +
  351. sizeof(trl);
  352. memset(&pclc, 0, sizeof(pclc));
  353. memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
  354. pclc.hdr.type = SMC_CLC_PROPOSAL;
  355. pclc.hdr.length = htons(plen);
  356. pclc.hdr.version = SMC_CLC_V1; /* SMC version */
  357. memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
  358. memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE);
  359. memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN);
  360. pclc.iparea_offset = htons(0);
  361. memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
  362. memset(&msg, 0, sizeof(msg));
  363. i = 0;
  364. vec[i].iov_base = &pclc;
  365. vec[i++].iov_len = sizeof(pclc);
  366. vec[i].iov_base = &pclc_prfx;
  367. vec[i++].iov_len = sizeof(pclc_prfx);
  368. if (pclc_prfx.ipv6_prefixes_cnt > 0) {
  369. vec[i].iov_base = &ipv6_prfx[0];
  370. vec[i++].iov_len = pclc_prfx.ipv6_prefixes_cnt *
  371. sizeof(ipv6_prfx[0]);
  372. }
  373. vec[i].iov_base = &trl;
  374. vec[i++].iov_len = sizeof(trl);
  375. /* due to the few bytes needed for clc-handshake this cannot block */
  376. len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
  377. if (len < sizeof(pclc)) {
  378. if (len >= 0) {
  379. reason_code = -ENETUNREACH;
  380. smc->sk.sk_err = -reason_code;
  381. } else {
  382. smc->sk.sk_err = smc->clcsock->sk->sk_err;
  383. reason_code = -smc->sk.sk_err;
  384. }
  385. }
  386. return reason_code;
  387. }
  388. /* send CLC CONFIRM message across internal TCP socket */
  389. int smc_clc_send_confirm(struct smc_sock *smc)
  390. {
  391. struct smc_connection *conn = &smc->conn;
  392. struct smc_clc_msg_accept_confirm cclc;
  393. struct smc_link *link;
  394. int reason_code = 0;
  395. struct msghdr msg;
  396. struct kvec vec;
  397. int len;
  398. link = &conn->lgr->lnk[SMC_SINGLE_LINK];
  399. /* send SMC Confirm CLC msg */
  400. memset(&cclc, 0, sizeof(cclc));
  401. memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
  402. cclc.hdr.type = SMC_CLC_CONFIRM;
  403. cclc.hdr.length = htons(sizeof(cclc));
  404. cclc.hdr.version = SMC_CLC_V1; /* SMC version */
  405. memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
  406. memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
  407. SMC_GID_SIZE);
  408. memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
  409. hton24(cclc.qpn, link->roce_qp->qp_num);
  410. cclc.rmb_rkey =
  411. htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
  412. cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
  413. cclc.rmbe_alert_token = htonl(conn->alert_token_local);
  414. cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
  415. cclc.rmbe_size = conn->rmbe_size_short;
  416. cclc.rmb_dma_addr = cpu_to_be64(
  417. (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
  418. hton24(cclc.psn, link->psn_initial);
  419. memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
  420. memset(&msg, 0, sizeof(msg));
  421. vec.iov_base = &cclc;
  422. vec.iov_len = sizeof(cclc);
  423. len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(cclc));
  424. if (len < sizeof(cclc)) {
  425. if (len >= 0) {
  426. reason_code = -ENETUNREACH;
  427. smc->sk.sk_err = -reason_code;
  428. } else {
  429. smc->sk.sk_err = smc->clcsock->sk->sk_err;
  430. reason_code = -smc->sk.sk_err;
  431. }
  432. }
  433. return reason_code;
  434. }
  435. /* send CLC ACCEPT message across internal TCP socket */
  436. int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
  437. {
  438. struct smc_connection *conn = &new_smc->conn;
  439. struct smc_clc_msg_accept_confirm aclc;
  440. struct smc_link *link;
  441. struct msghdr msg;
  442. struct kvec vec;
  443. int rc = 0;
  444. int len;
  445. link = &conn->lgr->lnk[SMC_SINGLE_LINK];
  446. memset(&aclc, 0, sizeof(aclc));
  447. memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
  448. aclc.hdr.type = SMC_CLC_ACCEPT;
  449. aclc.hdr.length = htons(sizeof(aclc));
  450. aclc.hdr.version = SMC_CLC_V1; /* SMC version */
  451. if (srv_first_contact)
  452. aclc.hdr.flag = 1;
  453. memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
  454. memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
  455. SMC_GID_SIZE);
  456. memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
  457. hton24(aclc.qpn, link->roce_qp->qp_num);
  458. aclc.rmb_rkey =
  459. htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
  460. aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */
  461. aclc.rmbe_alert_token = htonl(conn->alert_token_local);
  462. aclc.qp_mtu = link->path_mtu;
  463. aclc.rmbe_size = conn->rmbe_size_short,
  464. aclc.rmb_dma_addr = cpu_to_be64(
  465. (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
  466. hton24(aclc.psn, link->psn_initial);
  467. memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
  468. memset(&msg, 0, sizeof(msg));
  469. vec.iov_base = &aclc;
  470. vec.iov_len = sizeof(aclc);
  471. len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, sizeof(aclc));
  472. if (len < sizeof(aclc)) {
  473. if (len >= 0)
  474. new_smc->sk.sk_err = EPROTO;
  475. else
  476. new_smc->sk.sk_err = new_smc->clcsock->sk->sk_err;
  477. rc = sock_error(&new_smc->sk);
  478. }
  479. return rc;
  480. }