vsock.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. /*
  2. * vhost transport for vsock
  3. *
  4. * Copyright (C) 2013-2015 Red Hat, Inc.
  5. * Author: Asias He <asias@redhat.com>
  6. * Stefan Hajnoczi <stefanha@redhat.com>
  7. *
  8. * This work is licensed under the terms of the GNU GPL, version 2.
  9. */
  10. #include <linux/miscdevice.h>
  11. #include <linux/atomic.h>
  12. #include <linux/module.h>
  13. #include <linux/mutex.h>
  14. #include <linux/vmalloc.h>
  15. #include <net/sock.h>
  16. #include <linux/virtio_vsock.h>
  17. #include <linux/vhost.h>
  18. #include <net/af_vsock.h>
  19. #include "vhost.h"
  20. #define VHOST_VSOCK_DEFAULT_HOST_CID 2
  21. enum {
  22. VHOST_VSOCK_FEATURES = VHOST_FEATURES,
  23. };
  24. /* Used to track all the vhost_vsock instances on the system. */
  25. static DEFINE_SPINLOCK(vhost_vsock_lock);
  26. static LIST_HEAD(vhost_vsock_list);
  27. struct vhost_vsock {
  28. struct vhost_dev dev;
  29. struct vhost_virtqueue vqs[2];
  30. /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
  31. struct list_head list;
  32. struct vhost_work send_pkt_work;
  33. spinlock_t send_pkt_list_lock;
  34. struct list_head send_pkt_list; /* host->guest pending packets */
  35. atomic_t queued_replies;
  36. u32 guest_cid;
  37. };
  38. static u32 vhost_transport_get_local_cid(void)
  39. {
  40. return VHOST_VSOCK_DEFAULT_HOST_CID;
  41. }
  42. static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
  43. {
  44. struct vhost_vsock *vsock;
  45. list_for_each_entry(vsock, &vhost_vsock_list, list) {
  46. u32 other_cid = vsock->guest_cid;
  47. /* Skip instances that have no CID yet */
  48. if (other_cid == 0)
  49. continue;
  50. if (other_cid == guest_cid) {
  51. return vsock;
  52. }
  53. }
  54. return NULL;
  55. }
  56. static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
  57. {
  58. struct vhost_vsock *vsock;
  59. spin_lock_bh(&vhost_vsock_lock);
  60. vsock = __vhost_vsock_get(guest_cid);
  61. spin_unlock_bh(&vhost_vsock_lock);
  62. return vsock;
  63. }
  64. static void
  65. vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
  66. struct vhost_virtqueue *vq)
  67. {
  68. struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
  69. bool added = false;
  70. bool restart_tx = false;
  71. mutex_lock(&vq->mutex);
  72. if (!vq->private_data)
  73. goto out;
  74. /* Avoid further vmexits, we're already processing the virtqueue */
  75. vhost_disable_notify(&vsock->dev, vq);
  76. for (;;) {
  77. struct virtio_vsock_pkt *pkt;
  78. struct iov_iter iov_iter;
  79. unsigned out, in;
  80. size_t nbytes;
  81. size_t len;
  82. int head;
  83. spin_lock_bh(&vsock->send_pkt_list_lock);
  84. if (list_empty(&vsock->send_pkt_list)) {
  85. spin_unlock_bh(&vsock->send_pkt_list_lock);
  86. vhost_enable_notify(&vsock->dev, vq);
  87. break;
  88. }
  89. pkt = list_first_entry(&vsock->send_pkt_list,
  90. struct virtio_vsock_pkt, list);
  91. list_del_init(&pkt->list);
  92. spin_unlock_bh(&vsock->send_pkt_list_lock);
  93. head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
  94. &out, &in, NULL, NULL);
  95. if (head < 0) {
  96. spin_lock_bh(&vsock->send_pkt_list_lock);
  97. list_add(&pkt->list, &vsock->send_pkt_list);
  98. spin_unlock_bh(&vsock->send_pkt_list_lock);
  99. break;
  100. }
  101. if (head == vq->num) {
  102. spin_lock_bh(&vsock->send_pkt_list_lock);
  103. list_add(&pkt->list, &vsock->send_pkt_list);
  104. spin_unlock_bh(&vsock->send_pkt_list_lock);
  105. /* We cannot finish yet if more buffers snuck in while
  106. * re-enabling notify.
  107. */
  108. if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
  109. vhost_disable_notify(&vsock->dev, vq);
  110. continue;
  111. }
  112. break;
  113. }
  114. if (out) {
  115. virtio_transport_free_pkt(pkt);
  116. vq_err(vq, "Expected 0 output buffers, got %u\n", out);
  117. break;
  118. }
  119. len = iov_length(&vq->iov[out], in);
  120. iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
  121. nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
  122. if (nbytes != sizeof(pkt->hdr)) {
  123. virtio_transport_free_pkt(pkt);
  124. vq_err(vq, "Faulted on copying pkt hdr\n");
  125. break;
  126. }
  127. nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
  128. if (nbytes != pkt->len) {
  129. virtio_transport_free_pkt(pkt);
  130. vq_err(vq, "Faulted on copying pkt buf\n");
  131. break;
  132. }
  133. vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
  134. added = true;
  135. if (pkt->reply) {
  136. int val;
  137. val = atomic_dec_return(&vsock->queued_replies);
  138. /* Do we have resources to resume tx processing? */
  139. if (val + 1 == tx_vq->num)
  140. restart_tx = true;
  141. }
  142. virtio_transport_free_pkt(pkt);
  143. }
  144. if (added)
  145. vhost_signal(&vsock->dev, vq);
  146. out:
  147. mutex_unlock(&vq->mutex);
  148. if (restart_tx)
  149. vhost_poll_queue(&tx_vq->poll);
  150. }
  151. static void vhost_transport_send_pkt_work(struct vhost_work *work)
  152. {
  153. struct vhost_virtqueue *vq;
  154. struct vhost_vsock *vsock;
  155. vsock = container_of(work, struct vhost_vsock, send_pkt_work);
  156. vq = &vsock->vqs[VSOCK_VQ_RX];
  157. vhost_transport_do_send_pkt(vsock, vq);
  158. }
  159. static int
  160. vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
  161. {
  162. struct vhost_vsock *vsock;
  163. int len = pkt->len;
  164. /* Find the vhost_vsock according to guest context id */
  165. vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
  166. if (!vsock) {
  167. virtio_transport_free_pkt(pkt);
  168. return -ENODEV;
  169. }
  170. if (pkt->reply)
  171. atomic_inc(&vsock->queued_replies);
  172. spin_lock_bh(&vsock->send_pkt_list_lock);
  173. list_add_tail(&pkt->list, &vsock->send_pkt_list);
  174. spin_unlock_bh(&vsock->send_pkt_list_lock);
  175. vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
  176. return len;
  177. }
  178. static int
  179. vhost_transport_cancel_pkt(struct vsock_sock *vsk)
  180. {
  181. struct vhost_vsock *vsock;
  182. struct virtio_vsock_pkt *pkt, *n;
  183. int cnt = 0;
  184. LIST_HEAD(freeme);
  185. /* Find the vhost_vsock according to guest context id */
  186. vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
  187. if (!vsock)
  188. return -ENODEV;
  189. spin_lock_bh(&vsock->send_pkt_list_lock);
  190. list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
  191. if (pkt->vsk != vsk)
  192. continue;
  193. list_move(&pkt->list, &freeme);
  194. }
  195. spin_unlock_bh(&vsock->send_pkt_list_lock);
  196. list_for_each_entry_safe(pkt, n, &freeme, list) {
  197. if (pkt->reply)
  198. cnt++;
  199. list_del(&pkt->list);
  200. virtio_transport_free_pkt(pkt);
  201. }
  202. if (cnt) {
  203. struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
  204. int new_cnt;
  205. new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
  206. if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
  207. vhost_poll_queue(&tx_vq->poll);
  208. }
  209. return 0;
  210. }
  211. static struct virtio_vsock_pkt *
  212. vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
  213. unsigned int out, unsigned int in)
  214. {
  215. struct virtio_vsock_pkt *pkt;
  216. struct iov_iter iov_iter;
  217. size_t nbytes;
  218. size_t len;
  219. if (in != 0) {
  220. vq_err(vq, "Expected 0 input buffers, got %u\n", in);
  221. return NULL;
  222. }
  223. pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
  224. if (!pkt)
  225. return NULL;
  226. len = iov_length(vq->iov, out);
  227. iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
  228. nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
  229. if (nbytes != sizeof(pkt->hdr)) {
  230. vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
  231. sizeof(pkt->hdr), nbytes);
  232. kfree(pkt);
  233. return NULL;
  234. }
  235. if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
  236. pkt->len = le32_to_cpu(pkt->hdr.len);
  237. /* No payload */
  238. if (!pkt->len)
  239. return pkt;
  240. /* The pkt is too big */
  241. if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
  242. kfree(pkt);
  243. return NULL;
  244. }
  245. pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
  246. if (!pkt->buf) {
  247. kfree(pkt);
  248. return NULL;
  249. }
  250. nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
  251. if (nbytes != pkt->len) {
  252. vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
  253. pkt->len, nbytes);
  254. virtio_transport_free_pkt(pkt);
  255. return NULL;
  256. }
  257. return pkt;
  258. }
  259. /* Is there space left for replies to rx packets? */
  260. static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
  261. {
  262. struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
  263. int val;
  264. smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
  265. val = atomic_read(&vsock->queued_replies);
  266. return val < vq->num;
  267. }
  268. static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
  269. {
  270. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  271. poll.work);
  272. struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
  273. dev);
  274. struct virtio_vsock_pkt *pkt;
  275. int head;
  276. unsigned int out, in;
  277. bool added = false;
  278. mutex_lock(&vq->mutex);
  279. if (!vq->private_data)
  280. goto out;
  281. vhost_disable_notify(&vsock->dev, vq);
  282. for (;;) {
  283. u32 len;
  284. if (!vhost_vsock_more_replies(vsock)) {
  285. /* Stop tx until the device processes already
  286. * pending replies. Leave tx virtqueue
  287. * callbacks disabled.
  288. */
  289. goto no_more_replies;
  290. }
  291. head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
  292. &out, &in, NULL, NULL);
  293. if (head < 0)
  294. break;
  295. if (head == vq->num) {
  296. if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
  297. vhost_disable_notify(&vsock->dev, vq);
  298. continue;
  299. }
  300. break;
  301. }
  302. pkt = vhost_vsock_alloc_pkt(vq, out, in);
  303. if (!pkt) {
  304. vq_err(vq, "Faulted on pkt\n");
  305. continue;
  306. }
  307. len = pkt->len;
  308. /* Only accept correctly addressed packets */
  309. if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
  310. virtio_transport_recv_pkt(pkt);
  311. else
  312. virtio_transport_free_pkt(pkt);
  313. vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
  314. added = true;
  315. }
  316. no_more_replies:
  317. if (added)
  318. vhost_signal(&vsock->dev, vq);
  319. out:
  320. mutex_unlock(&vq->mutex);
  321. }
  322. static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
  323. {
  324. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  325. poll.work);
  326. struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
  327. dev);
  328. vhost_transport_do_send_pkt(vsock, vq);
  329. }
  330. static int vhost_vsock_start(struct vhost_vsock *vsock)
  331. {
  332. struct vhost_virtqueue *vq;
  333. size_t i;
  334. int ret;
  335. mutex_lock(&vsock->dev.mutex);
  336. ret = vhost_dev_check_owner(&vsock->dev);
  337. if (ret)
  338. goto err;
  339. for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
  340. vq = &vsock->vqs[i];
  341. mutex_lock(&vq->mutex);
  342. if (!vhost_vq_access_ok(vq)) {
  343. ret = -EFAULT;
  344. goto err_vq;
  345. }
  346. if (!vq->private_data) {
  347. vq->private_data = vsock;
  348. ret = vhost_vq_init_access(vq);
  349. if (ret)
  350. goto err_vq;
  351. }
  352. mutex_unlock(&vq->mutex);
  353. }
  354. mutex_unlock(&vsock->dev.mutex);
  355. return 0;
  356. err_vq:
  357. vq->private_data = NULL;
  358. mutex_unlock(&vq->mutex);
  359. for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
  360. vq = &vsock->vqs[i];
  361. mutex_lock(&vq->mutex);
  362. vq->private_data = NULL;
  363. mutex_unlock(&vq->mutex);
  364. }
  365. err:
  366. mutex_unlock(&vsock->dev.mutex);
  367. return ret;
  368. }
  369. static int vhost_vsock_stop(struct vhost_vsock *vsock)
  370. {
  371. size_t i;
  372. int ret;
  373. mutex_lock(&vsock->dev.mutex);
  374. ret = vhost_dev_check_owner(&vsock->dev);
  375. if (ret)
  376. goto err;
  377. for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
  378. struct vhost_virtqueue *vq = &vsock->vqs[i];
  379. mutex_lock(&vq->mutex);
  380. vq->private_data = NULL;
  381. mutex_unlock(&vq->mutex);
  382. }
  383. err:
  384. mutex_unlock(&vsock->dev.mutex);
  385. return ret;
  386. }
  387. static void vhost_vsock_free(struct vhost_vsock *vsock)
  388. {
  389. kvfree(vsock);
  390. }
  391. static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
  392. {
  393. struct vhost_virtqueue **vqs;
  394. struct vhost_vsock *vsock;
  395. int ret;
  396. /* This struct is large and allocation could fail, fall back to vmalloc
  397. * if there is no other way.
  398. */
  399. vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
  400. if (!vsock) {
  401. vsock = vmalloc(sizeof(*vsock));
  402. if (!vsock)
  403. return -ENOMEM;
  404. }
  405. vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
  406. if (!vqs) {
  407. ret = -ENOMEM;
  408. goto out;
  409. }
  410. atomic_set(&vsock->queued_replies, 0);
  411. vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
  412. vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
  413. vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
  414. vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
  415. vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
  416. file->private_data = vsock;
  417. spin_lock_init(&vsock->send_pkt_list_lock);
  418. INIT_LIST_HEAD(&vsock->send_pkt_list);
  419. vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
  420. spin_lock_bh(&vhost_vsock_lock);
  421. list_add_tail(&vsock->list, &vhost_vsock_list);
  422. spin_unlock_bh(&vhost_vsock_lock);
  423. return 0;
  424. out:
  425. vhost_vsock_free(vsock);
  426. return ret;
  427. }
  428. static void vhost_vsock_flush(struct vhost_vsock *vsock)
  429. {
  430. int i;
  431. for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
  432. if (vsock->vqs[i].handle_kick)
  433. vhost_poll_flush(&vsock->vqs[i].poll);
  434. vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
  435. }
  436. static void vhost_vsock_reset_orphans(struct sock *sk)
  437. {
  438. struct vsock_sock *vsk = vsock_sk(sk);
  439. /* vmci_transport.c doesn't take sk_lock here either. At least we're
  440. * under vsock_table_lock so the sock cannot disappear while we're
  441. * executing.
  442. */
  443. if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
  444. sock_set_flag(sk, SOCK_DONE);
  445. vsk->peer_shutdown = SHUTDOWN_MASK;
  446. sk->sk_state = SS_UNCONNECTED;
  447. sk->sk_err = ECONNRESET;
  448. sk->sk_error_report(sk);
  449. }
  450. }
  451. static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
  452. {
  453. struct vhost_vsock *vsock = file->private_data;
  454. spin_lock_bh(&vhost_vsock_lock);
  455. list_del(&vsock->list);
  456. spin_unlock_bh(&vhost_vsock_lock);
  457. /* Iterating over all connections for all CIDs to find orphans is
  458. * inefficient. Room for improvement here. */
  459. vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
  460. vhost_vsock_stop(vsock);
  461. vhost_vsock_flush(vsock);
  462. vhost_dev_stop(&vsock->dev);
  463. spin_lock_bh(&vsock->send_pkt_list_lock);
  464. while (!list_empty(&vsock->send_pkt_list)) {
  465. struct virtio_vsock_pkt *pkt;
  466. pkt = list_first_entry(&vsock->send_pkt_list,
  467. struct virtio_vsock_pkt, list);
  468. list_del_init(&pkt->list);
  469. virtio_transport_free_pkt(pkt);
  470. }
  471. spin_unlock_bh(&vsock->send_pkt_list_lock);
  472. vhost_dev_cleanup(&vsock->dev, false);
  473. kfree(vsock->dev.vqs);
  474. vhost_vsock_free(vsock);
  475. return 0;
  476. }
  477. static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
  478. {
  479. struct vhost_vsock *other;
  480. /* Refuse reserved CIDs */
  481. if (guest_cid <= VMADDR_CID_HOST ||
  482. guest_cid == U32_MAX)
  483. return -EINVAL;
  484. /* 64-bit CIDs are not yet supported */
  485. if (guest_cid > U32_MAX)
  486. return -EINVAL;
  487. /* Refuse if CID is already in use */
  488. spin_lock_bh(&vhost_vsock_lock);
  489. other = __vhost_vsock_get(guest_cid);
  490. if (other && other != vsock) {
  491. spin_unlock_bh(&vhost_vsock_lock);
  492. return -EADDRINUSE;
  493. }
  494. vsock->guest_cid = guest_cid;
  495. spin_unlock_bh(&vhost_vsock_lock);
  496. return 0;
  497. }
  498. static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
  499. {
  500. struct vhost_virtqueue *vq;
  501. int i;
  502. if (features & ~VHOST_VSOCK_FEATURES)
  503. return -EOPNOTSUPP;
  504. mutex_lock(&vsock->dev.mutex);
  505. if ((features & (1 << VHOST_F_LOG_ALL)) &&
  506. !vhost_log_access_ok(&vsock->dev)) {
  507. mutex_unlock(&vsock->dev.mutex);
  508. return -EFAULT;
  509. }
  510. for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
  511. vq = &vsock->vqs[i];
  512. mutex_lock(&vq->mutex);
  513. vq->acked_features = features;
  514. mutex_unlock(&vq->mutex);
  515. }
  516. mutex_unlock(&vsock->dev.mutex);
  517. return 0;
  518. }
  519. static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
  520. unsigned long arg)
  521. {
  522. struct vhost_vsock *vsock = f->private_data;
  523. void __user *argp = (void __user *)arg;
  524. u64 guest_cid;
  525. u64 features;
  526. int start;
  527. int r;
  528. switch (ioctl) {
  529. case VHOST_VSOCK_SET_GUEST_CID:
  530. if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
  531. return -EFAULT;
  532. return vhost_vsock_set_cid(vsock, guest_cid);
  533. case VHOST_VSOCK_SET_RUNNING:
  534. if (copy_from_user(&start, argp, sizeof(start)))
  535. return -EFAULT;
  536. if (start)
  537. return vhost_vsock_start(vsock);
  538. else
  539. return vhost_vsock_stop(vsock);
  540. case VHOST_GET_FEATURES:
  541. features = VHOST_VSOCK_FEATURES;
  542. if (copy_to_user(argp, &features, sizeof(features)))
  543. return -EFAULT;
  544. return 0;
  545. case VHOST_SET_FEATURES:
  546. if (copy_from_user(&features, argp, sizeof(features)))
  547. return -EFAULT;
  548. return vhost_vsock_set_features(vsock, features);
  549. default:
  550. mutex_lock(&vsock->dev.mutex);
  551. r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
  552. if (r == -ENOIOCTLCMD)
  553. r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
  554. else
  555. vhost_vsock_flush(vsock);
  556. mutex_unlock(&vsock->dev.mutex);
  557. return r;
  558. }
  559. }
  560. static const struct file_operations vhost_vsock_fops = {
  561. .owner = THIS_MODULE,
  562. .open = vhost_vsock_dev_open,
  563. .release = vhost_vsock_dev_release,
  564. .llseek = noop_llseek,
  565. .unlocked_ioctl = vhost_vsock_dev_ioctl,
  566. };
  567. static struct miscdevice vhost_vsock_misc = {
  568. .minor = MISC_DYNAMIC_MINOR,
  569. .name = "vhost-vsock",
  570. .fops = &vhost_vsock_fops,
  571. };
  572. static struct virtio_transport vhost_transport = {
  573. .transport = {
  574. .get_local_cid = vhost_transport_get_local_cid,
  575. .init = virtio_transport_do_socket_init,
  576. .destruct = virtio_transport_destruct,
  577. .release = virtio_transport_release,
  578. .connect = virtio_transport_connect,
  579. .shutdown = virtio_transport_shutdown,
  580. .cancel_pkt = vhost_transport_cancel_pkt,
  581. .dgram_enqueue = virtio_transport_dgram_enqueue,
  582. .dgram_dequeue = virtio_transport_dgram_dequeue,
  583. .dgram_bind = virtio_transport_dgram_bind,
  584. .dgram_allow = virtio_transport_dgram_allow,
  585. .stream_enqueue = virtio_transport_stream_enqueue,
  586. .stream_dequeue = virtio_transport_stream_dequeue,
  587. .stream_has_data = virtio_transport_stream_has_data,
  588. .stream_has_space = virtio_transport_stream_has_space,
  589. .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
  590. .stream_is_active = virtio_transport_stream_is_active,
  591. .stream_allow = virtio_transport_stream_allow,
  592. .notify_poll_in = virtio_transport_notify_poll_in,
  593. .notify_poll_out = virtio_transport_notify_poll_out,
  594. .notify_recv_init = virtio_transport_notify_recv_init,
  595. .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
  596. .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
  597. .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
  598. .notify_send_init = virtio_transport_notify_send_init,
  599. .notify_send_pre_block = virtio_transport_notify_send_pre_block,
  600. .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
  601. .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
  602. .set_buffer_size = virtio_transport_set_buffer_size,
  603. .set_min_buffer_size = virtio_transport_set_min_buffer_size,
  604. .set_max_buffer_size = virtio_transport_set_max_buffer_size,
  605. .get_buffer_size = virtio_transport_get_buffer_size,
  606. .get_min_buffer_size = virtio_transport_get_min_buffer_size,
  607. .get_max_buffer_size = virtio_transport_get_max_buffer_size,
  608. },
  609. .send_pkt = vhost_transport_send_pkt,
  610. };
  611. static int __init vhost_vsock_init(void)
  612. {
  613. int ret;
  614. ret = vsock_core_init(&vhost_transport.transport);
  615. if (ret < 0)
  616. return ret;
  617. return misc_register(&vhost_vsock_misc);
  618. };
  619. static void __exit vhost_vsock_exit(void)
  620. {
  621. misc_deregister(&vhost_vsock_misc);
  622. vsock_core_exit();
  623. };
  624. module_init(vhost_vsock_init);
  625. module_exit(vhost_vsock_exit);
  626. MODULE_LICENSE("GPL v2");
  627. MODULE_AUTHOR("Asias He");
  628. MODULE_DESCRIPTION("vhost transport for vsock ");