virtio_transport_common.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * common code for virtio vsock
  3. *
  4. * Copyright (C) 2013-2015 Red Hat, Inc.
  5. * Author: Asias He <asias@redhat.com>
  6. * Stefan Hajnoczi <stefanha@redhat.com>
  7. *
  8. * This work is licensed under the terms of the GNU GPL, version 2.
  9. */
  10. #include <linux/spinlock.h>
  11. #include <linux/module.h>
  12. #include <linux/sched/signal.h>
  13. #include <linux/ctype.h>
  14. #include <linux/list.h>
  15. #include <linux/virtio.h>
  16. #include <linux/virtio_ids.h>
  17. #include <linux/virtio_config.h>
  18. #include <linux/virtio_vsock.h>
  19. #include <uapi/linux/vsockmon.h>
  20. #include <net/sock.h>
  21. #include <net/af_vsock.h>
  22. #define CREATE_TRACE_POINTS
  23. #include <trace/events/vsock_virtio_transport_common.h>
  24. /* How long to wait for graceful shutdown of a connection */
  25. #define VSOCK_CLOSE_TIMEOUT (8 * HZ)
  26. static const struct virtio_transport *virtio_transport_get_ops(void)
  27. {
  28. const struct vsock_transport *t = vsock_core_get_transport();
  29. return container_of(t, struct virtio_transport, transport);
  30. }
  31. static struct virtio_vsock_pkt *
  32. virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
  33. size_t len,
  34. u32 src_cid,
  35. u32 src_port,
  36. u32 dst_cid,
  37. u32 dst_port)
  38. {
  39. struct virtio_vsock_pkt *pkt;
  40. int err;
  41. pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
  42. if (!pkt)
  43. return NULL;
  44. pkt->hdr.type = cpu_to_le16(info->type);
  45. pkt->hdr.op = cpu_to_le16(info->op);
  46. pkt->hdr.src_cid = cpu_to_le64(src_cid);
  47. pkt->hdr.dst_cid = cpu_to_le64(dst_cid);
  48. pkt->hdr.src_port = cpu_to_le32(src_port);
  49. pkt->hdr.dst_port = cpu_to_le32(dst_port);
  50. pkt->hdr.flags = cpu_to_le32(info->flags);
  51. pkt->len = len;
  52. pkt->hdr.len = cpu_to_le32(len);
  53. pkt->reply = info->reply;
  54. pkt->vsk = info->vsk;
  55. if (info->msg && len > 0) {
  56. pkt->buf = kmalloc(len, GFP_KERNEL);
  57. if (!pkt->buf)
  58. goto out_pkt;
  59. err = memcpy_from_msg(pkt->buf, info->msg, len);
  60. if (err)
  61. goto out;
  62. }
  63. trace_virtio_transport_alloc_pkt(src_cid, src_port,
  64. dst_cid, dst_port,
  65. len,
  66. info->type,
  67. info->op,
  68. info->flags);
  69. return pkt;
  70. out:
  71. kfree(pkt->buf);
  72. out_pkt:
  73. kfree(pkt);
  74. return NULL;
  75. }
  76. /* Packet capture */
  77. static struct sk_buff *virtio_transport_build_skb(void *opaque)
  78. {
  79. struct virtio_vsock_pkt *pkt = opaque;
  80. struct af_vsockmon_hdr *hdr;
  81. struct sk_buff *skb;
  82. skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + pkt->len,
  83. GFP_ATOMIC);
  84. if (!skb)
  85. return NULL;
  86. hdr = skb_put(skb, sizeof(*hdr));
  87. /* pkt->hdr is little-endian so no need to byteswap here */
  88. hdr->src_cid = pkt->hdr.src_cid;
  89. hdr->src_port = pkt->hdr.src_port;
  90. hdr->dst_cid = pkt->hdr.dst_cid;
  91. hdr->dst_port = pkt->hdr.dst_port;
  92. hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
  93. hdr->len = cpu_to_le16(sizeof(pkt->hdr));
  94. memset(hdr->reserved, 0, sizeof(hdr->reserved));
  95. switch (le16_to_cpu(pkt->hdr.op)) {
  96. case VIRTIO_VSOCK_OP_REQUEST:
  97. case VIRTIO_VSOCK_OP_RESPONSE:
  98. hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
  99. break;
  100. case VIRTIO_VSOCK_OP_RST:
  101. case VIRTIO_VSOCK_OP_SHUTDOWN:
  102. hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT);
  103. break;
  104. case VIRTIO_VSOCK_OP_RW:
  105. hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD);
  106. break;
  107. case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
  108. case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
  109. hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL);
  110. break;
  111. default:
  112. hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN);
  113. break;
  114. }
  115. skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
  116. if (pkt->len) {
  117. skb_put_data(skb, pkt->buf, pkt->len);
  118. }
  119. return skb;
  120. }
  121. void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
  122. {
  123. vsock_deliver_tap(virtio_transport_build_skb, pkt);
  124. }
  125. EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
  126. static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
  127. struct virtio_vsock_pkt_info *info)
  128. {
  129. u32 src_cid, src_port, dst_cid, dst_port;
  130. struct virtio_vsock_sock *vvs;
  131. struct virtio_vsock_pkt *pkt;
  132. u32 pkt_len = info->pkt_len;
  133. src_cid = vm_sockets_get_local_cid();
  134. src_port = vsk->local_addr.svm_port;
  135. if (!info->remote_cid) {
  136. dst_cid = vsk->remote_addr.svm_cid;
  137. dst_port = vsk->remote_addr.svm_port;
  138. } else {
  139. dst_cid = info->remote_cid;
  140. dst_port = info->remote_port;
  141. }
  142. vvs = vsk->trans;
  143. /* we can send less than pkt_len bytes */
  144. if (pkt_len > VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE)
  145. pkt_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
  146. /* virtio_transport_get_credit might return less than pkt_len credit */
  147. pkt_len = virtio_transport_get_credit(vvs, pkt_len);
  148. /* Do not send zero length OP_RW pkt */
  149. if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
  150. return pkt_len;
  151. pkt = virtio_transport_alloc_pkt(info, pkt_len,
  152. src_cid, src_port,
  153. dst_cid, dst_port);
  154. if (!pkt) {
  155. virtio_transport_put_credit(vvs, pkt_len);
  156. return -ENOMEM;
  157. }
  158. virtio_transport_inc_tx_pkt(vvs, pkt);
  159. return virtio_transport_get_ops()->send_pkt(pkt);
  160. }
  161. static void virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
  162. struct virtio_vsock_pkt *pkt)
  163. {
  164. vvs->rx_bytes += pkt->len;
  165. }
  166. static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
  167. struct virtio_vsock_pkt *pkt)
  168. {
  169. vvs->rx_bytes -= pkt->len;
  170. vvs->fwd_cnt += pkt->len;
  171. }
  172. void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
  173. {
  174. spin_lock_bh(&vvs->tx_lock);
  175. pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
  176. pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
  177. spin_unlock_bh(&vvs->tx_lock);
  178. }
  179. EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
  180. u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
  181. {
  182. u32 ret;
  183. spin_lock_bh(&vvs->tx_lock);
  184. ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
  185. if (ret > credit)
  186. ret = credit;
  187. vvs->tx_cnt += ret;
  188. spin_unlock_bh(&vvs->tx_lock);
  189. return ret;
  190. }
  191. EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
  192. void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
  193. {
  194. spin_lock_bh(&vvs->tx_lock);
  195. vvs->tx_cnt -= credit;
  196. spin_unlock_bh(&vvs->tx_lock);
  197. }
  198. EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
  199. static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
  200. int type,
  201. struct virtio_vsock_hdr *hdr)
  202. {
  203. struct virtio_vsock_pkt_info info = {
  204. .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
  205. .type = type,
  206. .vsk = vsk,
  207. };
  208. return virtio_transport_send_pkt_info(vsk, &info);
  209. }
  210. static ssize_t
  211. virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
  212. struct msghdr *msg,
  213. size_t len)
  214. {
  215. struct virtio_vsock_sock *vvs = vsk->trans;
  216. struct virtio_vsock_pkt *pkt;
  217. size_t bytes, total = 0;
  218. int err = -EFAULT;
  219. spin_lock_bh(&vvs->rx_lock);
  220. while (total < len && !list_empty(&vvs->rx_queue)) {
  221. pkt = list_first_entry(&vvs->rx_queue,
  222. struct virtio_vsock_pkt, list);
  223. bytes = len - total;
  224. if (bytes > pkt->len - pkt->off)
  225. bytes = pkt->len - pkt->off;
  226. /* sk_lock is held by caller so no one else can dequeue.
  227. * Unlock rx_lock since memcpy_to_msg() may sleep.
  228. */
  229. spin_unlock_bh(&vvs->rx_lock);
  230. err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
  231. if (err)
  232. goto out;
  233. spin_lock_bh(&vvs->rx_lock);
  234. total += bytes;
  235. pkt->off += bytes;
  236. if (pkt->off == pkt->len) {
  237. virtio_transport_dec_rx_pkt(vvs, pkt);
  238. list_del(&pkt->list);
  239. virtio_transport_free_pkt(pkt);
  240. }
  241. }
  242. spin_unlock_bh(&vvs->rx_lock);
  243. /* Send a credit pkt to peer */
  244. virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
  245. NULL);
  246. return total;
  247. out:
  248. if (total)
  249. err = total;
  250. return err;
  251. }
  252. ssize_t
  253. virtio_transport_stream_dequeue(struct vsock_sock *vsk,
  254. struct msghdr *msg,
  255. size_t len, int flags)
  256. {
  257. if (flags & MSG_PEEK)
  258. return -EOPNOTSUPP;
  259. return virtio_transport_stream_do_dequeue(vsk, msg, len);
  260. }
  261. EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
  262. int
  263. virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
  264. struct msghdr *msg,
  265. size_t len, int flags)
  266. {
  267. return -EOPNOTSUPP;
  268. }
  269. EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
  270. s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
  271. {
  272. struct virtio_vsock_sock *vvs = vsk->trans;
  273. s64 bytes;
  274. spin_lock_bh(&vvs->rx_lock);
  275. bytes = vvs->rx_bytes;
  276. spin_unlock_bh(&vvs->rx_lock);
  277. return bytes;
  278. }
  279. EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
  280. static s64 virtio_transport_has_space(struct vsock_sock *vsk)
  281. {
  282. struct virtio_vsock_sock *vvs = vsk->trans;
  283. s64 bytes;
  284. bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
  285. if (bytes < 0)
  286. bytes = 0;
  287. return bytes;
  288. }
  289. s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
  290. {
  291. struct virtio_vsock_sock *vvs = vsk->trans;
  292. s64 bytes;
  293. spin_lock_bh(&vvs->tx_lock);
  294. bytes = virtio_transport_has_space(vsk);
  295. spin_unlock_bh(&vvs->tx_lock);
  296. return bytes;
  297. }
  298. EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
  299. int virtio_transport_do_socket_init(struct vsock_sock *vsk,
  300. struct vsock_sock *psk)
  301. {
  302. struct virtio_vsock_sock *vvs;
  303. vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
  304. if (!vvs)
  305. return -ENOMEM;
  306. vsk->trans = vvs;
  307. vvs->vsk = vsk;
  308. if (psk) {
  309. struct virtio_vsock_sock *ptrans = psk->trans;
  310. vvs->buf_size = ptrans->buf_size;
  311. vvs->buf_size_min = ptrans->buf_size_min;
  312. vvs->buf_size_max = ptrans->buf_size_max;
  313. vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
  314. } else {
  315. vvs->buf_size = VIRTIO_VSOCK_DEFAULT_BUF_SIZE;
  316. vvs->buf_size_min = VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE;
  317. vvs->buf_size_max = VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE;
  318. }
  319. vvs->buf_alloc = vvs->buf_size;
  320. spin_lock_init(&vvs->rx_lock);
  321. spin_lock_init(&vvs->tx_lock);
  322. INIT_LIST_HEAD(&vvs->rx_queue);
  323. return 0;
  324. }
  325. EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
  326. u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk)
  327. {
  328. struct virtio_vsock_sock *vvs = vsk->trans;
  329. return vvs->buf_size;
  330. }
  331. EXPORT_SYMBOL_GPL(virtio_transport_get_buffer_size);
  332. u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk)
  333. {
  334. struct virtio_vsock_sock *vvs = vsk->trans;
  335. return vvs->buf_size_min;
  336. }
  337. EXPORT_SYMBOL_GPL(virtio_transport_get_min_buffer_size);
  338. u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk)
  339. {
  340. struct virtio_vsock_sock *vvs = vsk->trans;
  341. return vvs->buf_size_max;
  342. }
  343. EXPORT_SYMBOL_GPL(virtio_transport_get_max_buffer_size);
  344. void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
  345. {
  346. struct virtio_vsock_sock *vvs = vsk->trans;
  347. if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
  348. val = VIRTIO_VSOCK_MAX_BUF_SIZE;
  349. if (val < vvs->buf_size_min)
  350. vvs->buf_size_min = val;
  351. if (val > vvs->buf_size_max)
  352. vvs->buf_size_max = val;
  353. vvs->buf_size = val;
  354. vvs->buf_alloc = val;
  355. }
  356. EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
  357. void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
  358. {
  359. struct virtio_vsock_sock *vvs = vsk->trans;
  360. if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
  361. val = VIRTIO_VSOCK_MAX_BUF_SIZE;
  362. if (val > vvs->buf_size)
  363. vvs->buf_size = val;
  364. vvs->buf_size_min = val;
  365. }
  366. EXPORT_SYMBOL_GPL(virtio_transport_set_min_buffer_size);
  367. void virtio_transport_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
  368. {
  369. struct virtio_vsock_sock *vvs = vsk->trans;
  370. if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
  371. val = VIRTIO_VSOCK_MAX_BUF_SIZE;
  372. if (val < vvs->buf_size)
  373. vvs->buf_size = val;
  374. vvs->buf_size_max = val;
  375. }
  376. EXPORT_SYMBOL_GPL(virtio_transport_set_max_buffer_size);
  377. int
  378. virtio_transport_notify_poll_in(struct vsock_sock *vsk,
  379. size_t target,
  380. bool *data_ready_now)
  381. {
  382. if (vsock_stream_has_data(vsk))
  383. *data_ready_now = true;
  384. else
  385. *data_ready_now = false;
  386. return 0;
  387. }
  388. EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
  389. int
  390. virtio_transport_notify_poll_out(struct vsock_sock *vsk,
  391. size_t target,
  392. bool *space_avail_now)
  393. {
  394. s64 free_space;
  395. free_space = vsock_stream_has_space(vsk);
  396. if (free_space > 0)
  397. *space_avail_now = true;
  398. else if (free_space == 0)
  399. *space_avail_now = false;
  400. return 0;
  401. }
  402. EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
  403. int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
  404. size_t target, struct vsock_transport_recv_notify_data *data)
  405. {
  406. return 0;
  407. }
  408. EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
  409. int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
  410. size_t target, struct vsock_transport_recv_notify_data *data)
  411. {
  412. return 0;
  413. }
  414. EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
  415. int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
  416. size_t target, struct vsock_transport_recv_notify_data *data)
  417. {
  418. return 0;
  419. }
  420. EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
  421. int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
  422. size_t target, ssize_t copied, bool data_read,
  423. struct vsock_transport_recv_notify_data *data)
  424. {
  425. return 0;
  426. }
  427. EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
  428. int virtio_transport_notify_send_init(struct vsock_sock *vsk,
  429. struct vsock_transport_send_notify_data *data)
  430. {
  431. return 0;
  432. }
  433. EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
  434. int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
  435. struct vsock_transport_send_notify_data *data)
  436. {
  437. return 0;
  438. }
  439. EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
  440. int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
  441. struct vsock_transport_send_notify_data *data)
  442. {
  443. return 0;
  444. }
  445. EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
  446. int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
  447. ssize_t written, struct vsock_transport_send_notify_data *data)
  448. {
  449. return 0;
  450. }
  451. EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
  452. u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
  453. {
  454. struct virtio_vsock_sock *vvs = vsk->trans;
  455. return vvs->buf_size;
  456. }
  457. EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
  458. bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
  459. {
  460. return true;
  461. }
  462. EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
  463. bool virtio_transport_stream_allow(u32 cid, u32 port)
  464. {
  465. return true;
  466. }
  467. EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
  468. int virtio_transport_dgram_bind(struct vsock_sock *vsk,
  469. struct sockaddr_vm *addr)
  470. {
  471. return -EOPNOTSUPP;
  472. }
  473. EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
  474. bool virtio_transport_dgram_allow(u32 cid, u32 port)
  475. {
  476. return false;
  477. }
  478. EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
  479. int virtio_transport_connect(struct vsock_sock *vsk)
  480. {
  481. struct virtio_vsock_pkt_info info = {
  482. .op = VIRTIO_VSOCK_OP_REQUEST,
  483. .type = VIRTIO_VSOCK_TYPE_STREAM,
  484. .vsk = vsk,
  485. };
  486. return virtio_transport_send_pkt_info(vsk, &info);
  487. }
  488. EXPORT_SYMBOL_GPL(virtio_transport_connect);
  489. int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
  490. {
  491. struct virtio_vsock_pkt_info info = {
  492. .op = VIRTIO_VSOCK_OP_SHUTDOWN,
  493. .type = VIRTIO_VSOCK_TYPE_STREAM,
  494. .flags = (mode & RCV_SHUTDOWN ?
  495. VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
  496. (mode & SEND_SHUTDOWN ?
  497. VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
  498. .vsk = vsk,
  499. };
  500. return virtio_transport_send_pkt_info(vsk, &info);
  501. }
  502. EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
  503. int
  504. virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
  505. struct sockaddr_vm *remote_addr,
  506. struct msghdr *msg,
  507. size_t dgram_len)
  508. {
  509. return -EOPNOTSUPP;
  510. }
  511. EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
  512. ssize_t
  513. virtio_transport_stream_enqueue(struct vsock_sock *vsk,
  514. struct msghdr *msg,
  515. size_t len)
  516. {
  517. struct virtio_vsock_pkt_info info = {
  518. .op = VIRTIO_VSOCK_OP_RW,
  519. .type = VIRTIO_VSOCK_TYPE_STREAM,
  520. .msg = msg,
  521. .pkt_len = len,
  522. .vsk = vsk,
  523. };
  524. return virtio_transport_send_pkt_info(vsk, &info);
  525. }
  526. EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
  527. void virtio_transport_destruct(struct vsock_sock *vsk)
  528. {
  529. struct virtio_vsock_sock *vvs = vsk->trans;
  530. kfree(vvs);
  531. }
  532. EXPORT_SYMBOL_GPL(virtio_transport_destruct);
  533. static int virtio_transport_reset(struct vsock_sock *vsk,
  534. struct virtio_vsock_pkt *pkt)
  535. {
  536. struct virtio_vsock_pkt_info info = {
  537. .op = VIRTIO_VSOCK_OP_RST,
  538. .type = VIRTIO_VSOCK_TYPE_STREAM,
  539. .reply = !!pkt,
  540. .vsk = vsk,
  541. };
  542. /* Send RST only if the original pkt is not a RST pkt */
  543. if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
  544. return 0;
  545. return virtio_transport_send_pkt_info(vsk, &info);
  546. }
  547. /* Normally packets are associated with a socket. There may be no socket if an
  548. * attempt was made to connect to a socket that does not exist.
  549. */
  550. static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
  551. {
  552. struct virtio_vsock_pkt_info info = {
  553. .op = VIRTIO_VSOCK_OP_RST,
  554. .type = le16_to_cpu(pkt->hdr.type),
  555. .reply = true,
  556. };
  557. /* Send RST only if the original pkt is not a RST pkt */
  558. if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
  559. return 0;
  560. pkt = virtio_transport_alloc_pkt(&info, 0,
  561. le64_to_cpu(pkt->hdr.dst_cid),
  562. le32_to_cpu(pkt->hdr.dst_port),
  563. le64_to_cpu(pkt->hdr.src_cid),
  564. le32_to_cpu(pkt->hdr.src_port));
  565. if (!pkt)
  566. return -ENOMEM;
  567. return virtio_transport_get_ops()->send_pkt(pkt);
  568. }
  569. static void virtio_transport_wait_close(struct sock *sk, long timeout)
  570. {
  571. if (timeout) {
  572. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  573. add_wait_queue(sk_sleep(sk), &wait);
  574. do {
  575. if (sk_wait_event(sk, &timeout,
  576. sock_flag(sk, SOCK_DONE), &wait))
  577. break;
  578. } while (!signal_pending(current) && timeout);
  579. remove_wait_queue(sk_sleep(sk), &wait);
  580. }
  581. }
  582. static void virtio_transport_do_close(struct vsock_sock *vsk,
  583. bool cancel_timeout)
  584. {
  585. struct sock *sk = sk_vsock(vsk);
  586. sock_set_flag(sk, SOCK_DONE);
  587. vsk->peer_shutdown = SHUTDOWN_MASK;
  588. if (vsock_stream_has_data(vsk) <= 0)
  589. sk->sk_state = SS_DISCONNECTING;
  590. sk->sk_state_change(sk);
  591. if (vsk->close_work_scheduled &&
  592. (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
  593. vsk->close_work_scheduled = false;
  594. vsock_remove_sock(vsk);
  595. /* Release refcnt obtained when we scheduled the timeout */
  596. sock_put(sk);
  597. }
  598. }
  599. static void virtio_transport_close_timeout(struct work_struct *work)
  600. {
  601. struct vsock_sock *vsk =
  602. container_of(work, struct vsock_sock, close_work.work);
  603. struct sock *sk = sk_vsock(vsk);
  604. sock_hold(sk);
  605. lock_sock(sk);
  606. if (!sock_flag(sk, SOCK_DONE)) {
  607. (void)virtio_transport_reset(vsk, NULL);
  608. virtio_transport_do_close(vsk, false);
  609. }
  610. vsk->close_work_scheduled = false;
  611. release_sock(sk);
  612. sock_put(sk);
  613. }
  614. /* User context, vsk->sk is locked */
  615. static bool virtio_transport_close(struct vsock_sock *vsk)
  616. {
  617. struct sock *sk = &vsk->sk;
  618. if (!(sk->sk_state == SS_CONNECTED ||
  619. sk->sk_state == SS_DISCONNECTING))
  620. return true;
  621. /* Already received SHUTDOWN from peer, reply with RST */
  622. if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
  623. (void)virtio_transport_reset(vsk, NULL);
  624. return true;
  625. }
  626. if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
  627. (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
  628. if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
  629. virtio_transport_wait_close(sk, sk->sk_lingertime);
  630. if (sock_flag(sk, SOCK_DONE)) {
  631. return true;
  632. }
  633. sock_hold(sk);
  634. INIT_DELAYED_WORK(&vsk->close_work,
  635. virtio_transport_close_timeout);
  636. vsk->close_work_scheduled = true;
  637. schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
  638. return false;
  639. }
  640. void virtio_transport_release(struct vsock_sock *vsk)
  641. {
  642. struct sock *sk = &vsk->sk;
  643. bool remove_sock = true;
  644. lock_sock(sk);
  645. if (sk->sk_type == SOCK_STREAM)
  646. remove_sock = virtio_transport_close(vsk);
  647. release_sock(sk);
  648. if (remove_sock)
  649. vsock_remove_sock(vsk);
  650. }
  651. EXPORT_SYMBOL_GPL(virtio_transport_release);
  652. static int
  653. virtio_transport_recv_connecting(struct sock *sk,
  654. struct virtio_vsock_pkt *pkt)
  655. {
  656. struct vsock_sock *vsk = vsock_sk(sk);
  657. int err;
  658. int skerr;
  659. switch (le16_to_cpu(pkt->hdr.op)) {
  660. case VIRTIO_VSOCK_OP_RESPONSE:
  661. sk->sk_state = SS_CONNECTED;
  662. sk->sk_socket->state = SS_CONNECTED;
  663. vsock_insert_connected(vsk);
  664. sk->sk_state_change(sk);
  665. break;
  666. case VIRTIO_VSOCK_OP_INVALID:
  667. break;
  668. case VIRTIO_VSOCK_OP_RST:
  669. skerr = ECONNRESET;
  670. err = 0;
  671. goto destroy;
  672. default:
  673. skerr = EPROTO;
  674. err = -EINVAL;
  675. goto destroy;
  676. }
  677. return 0;
  678. destroy:
  679. virtio_transport_reset(vsk, pkt);
  680. sk->sk_state = SS_UNCONNECTED;
  681. sk->sk_err = skerr;
  682. sk->sk_error_report(sk);
  683. return err;
  684. }
  685. static int
  686. virtio_transport_recv_connected(struct sock *sk,
  687. struct virtio_vsock_pkt *pkt)
  688. {
  689. struct vsock_sock *vsk = vsock_sk(sk);
  690. struct virtio_vsock_sock *vvs = vsk->trans;
  691. int err = 0;
  692. switch (le16_to_cpu(pkt->hdr.op)) {
  693. case VIRTIO_VSOCK_OP_RW:
  694. pkt->len = le32_to_cpu(pkt->hdr.len);
  695. pkt->off = 0;
  696. spin_lock_bh(&vvs->rx_lock);
  697. virtio_transport_inc_rx_pkt(vvs, pkt);
  698. list_add_tail(&pkt->list, &vvs->rx_queue);
  699. spin_unlock_bh(&vvs->rx_lock);
  700. sk->sk_data_ready(sk);
  701. return err;
  702. case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
  703. sk->sk_write_space(sk);
  704. break;
  705. case VIRTIO_VSOCK_OP_SHUTDOWN:
  706. if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
  707. vsk->peer_shutdown |= RCV_SHUTDOWN;
  708. if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
  709. vsk->peer_shutdown |= SEND_SHUTDOWN;
  710. if (vsk->peer_shutdown == SHUTDOWN_MASK &&
  711. vsock_stream_has_data(vsk) <= 0)
  712. sk->sk_state = SS_DISCONNECTING;
  713. if (le32_to_cpu(pkt->hdr.flags))
  714. sk->sk_state_change(sk);
  715. break;
  716. case VIRTIO_VSOCK_OP_RST:
  717. virtio_transport_do_close(vsk, true);
  718. break;
  719. default:
  720. err = -EINVAL;
  721. break;
  722. }
  723. virtio_transport_free_pkt(pkt);
  724. return err;
  725. }
  726. static void
  727. virtio_transport_recv_disconnecting(struct sock *sk,
  728. struct virtio_vsock_pkt *pkt)
  729. {
  730. struct vsock_sock *vsk = vsock_sk(sk);
  731. if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
  732. virtio_transport_do_close(vsk, true);
  733. }
  734. static int
  735. virtio_transport_send_response(struct vsock_sock *vsk,
  736. struct virtio_vsock_pkt *pkt)
  737. {
  738. struct virtio_vsock_pkt_info info = {
  739. .op = VIRTIO_VSOCK_OP_RESPONSE,
  740. .type = VIRTIO_VSOCK_TYPE_STREAM,
  741. .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
  742. .remote_port = le32_to_cpu(pkt->hdr.src_port),
  743. .reply = true,
  744. .vsk = vsk,
  745. };
  746. return virtio_transport_send_pkt_info(vsk, &info);
  747. }
  748. /* Handle server socket */
  749. static int
  750. virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
  751. {
  752. struct vsock_sock *vsk = vsock_sk(sk);
  753. struct vsock_sock *vchild;
  754. struct sock *child;
  755. if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
  756. virtio_transport_reset(vsk, pkt);
  757. return -EINVAL;
  758. }
  759. if (sk_acceptq_is_full(sk)) {
  760. virtio_transport_reset(vsk, pkt);
  761. return -ENOMEM;
  762. }
  763. child = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
  764. sk->sk_type, 0);
  765. if (!child) {
  766. virtio_transport_reset(vsk, pkt);
  767. return -ENOMEM;
  768. }
  769. sk->sk_ack_backlog++;
  770. lock_sock_nested(child, SINGLE_DEPTH_NESTING);
  771. child->sk_state = SS_CONNECTED;
  772. vchild = vsock_sk(child);
  773. vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
  774. le32_to_cpu(pkt->hdr.dst_port));
  775. vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
  776. le32_to_cpu(pkt->hdr.src_port));
  777. vsock_insert_connected(vchild);
  778. vsock_enqueue_accept(sk, child);
  779. virtio_transport_send_response(vchild, pkt);
  780. release_sock(child);
  781. sk->sk_data_ready(sk);
  782. return 0;
  783. }
  784. static bool virtio_transport_space_update(struct sock *sk,
  785. struct virtio_vsock_pkt *pkt)
  786. {
  787. struct vsock_sock *vsk = vsock_sk(sk);
  788. struct virtio_vsock_sock *vvs = vsk->trans;
  789. bool space_available;
  790. /* buf_alloc and fwd_cnt is always included in the hdr */
  791. spin_lock_bh(&vvs->tx_lock);
  792. vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
  793. vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
  794. space_available = virtio_transport_has_space(vsk);
  795. spin_unlock_bh(&vvs->tx_lock);
  796. return space_available;
  797. }
  798. /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
  799. * lock.
  800. */
  801. void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
  802. {
  803. struct sockaddr_vm src, dst;
  804. struct vsock_sock *vsk;
  805. struct sock *sk;
  806. bool space_available;
  807. vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
  808. le32_to_cpu(pkt->hdr.src_port));
  809. vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
  810. le32_to_cpu(pkt->hdr.dst_port));
  811. trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
  812. dst.svm_cid, dst.svm_port,
  813. le32_to_cpu(pkt->hdr.len),
  814. le16_to_cpu(pkt->hdr.type),
  815. le16_to_cpu(pkt->hdr.op),
  816. le32_to_cpu(pkt->hdr.flags),
  817. le32_to_cpu(pkt->hdr.buf_alloc),
  818. le32_to_cpu(pkt->hdr.fwd_cnt));
  819. if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
  820. (void)virtio_transport_reset_no_sock(pkt);
  821. goto free_pkt;
  822. }
  823. /* The socket must be in connected or bound table
  824. * otherwise send reset back
  825. */
  826. sk = vsock_find_connected_socket(&src, &dst);
  827. if (!sk) {
  828. sk = vsock_find_bound_socket(&dst);
  829. if (!sk) {
  830. (void)virtio_transport_reset_no_sock(pkt);
  831. goto free_pkt;
  832. }
  833. }
  834. vsk = vsock_sk(sk);
  835. space_available = virtio_transport_space_update(sk, pkt);
  836. lock_sock(sk);
  837. /* Update CID in case it has changed after a transport reset event */
  838. vsk->local_addr.svm_cid = dst.svm_cid;
  839. if (space_available)
  840. sk->sk_write_space(sk);
  841. switch (sk->sk_state) {
  842. case VSOCK_SS_LISTEN:
  843. virtio_transport_recv_listen(sk, pkt);
  844. virtio_transport_free_pkt(pkt);
  845. break;
  846. case SS_CONNECTING:
  847. virtio_transport_recv_connecting(sk, pkt);
  848. virtio_transport_free_pkt(pkt);
  849. break;
  850. case SS_CONNECTED:
  851. virtio_transport_recv_connected(sk, pkt);
  852. break;
  853. case SS_DISCONNECTING:
  854. virtio_transport_recv_disconnecting(sk, pkt);
  855. virtio_transport_free_pkt(pkt);
  856. break;
  857. default:
  858. virtio_transport_free_pkt(pkt);
  859. break;
  860. }
  861. release_sock(sk);
  862. /* Release refcnt obtained when we fetched this socket out of the
  863. * bound or connected list.
  864. */
  865. sock_put(sk);
  866. return;
  867. free_pkt:
  868. virtio_transport_free_pkt(pkt);
  869. }
  870. EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
  871. void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
  872. {
  873. kfree(pkt->buf);
  874. kfree(pkt);
  875. }
  876. EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
  877. MODULE_LICENSE("GPL v2");
  878. MODULE_AUTHOR("Asias He");
  879. MODULE_DESCRIPTION("common code for virtio vsock");