vsock.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. /*
  2. * vhost transport for vsock
  3. *
  4. * Copyright (C) 2013-2015 Red Hat, Inc.
  5. * Author: Asias He <asias@redhat.com>
  6. * Stefan Hajnoczi <stefanha@redhat.com>
  7. *
  8. * This work is licensed under the terms of the GNU GPL, version 2.
  9. */
  10. #include <linux/miscdevice.h>
  11. #include <linux/atomic.h>
  12. #include <linux/module.h>
  13. #include <linux/mutex.h>
  14. #include <linux/vmalloc.h>
  15. #include <net/sock.h>
  16. #include <linux/virtio_vsock.h>
  17. #include <linux/vhost.h>
  18. #include <net/af_vsock.h>
  19. #include "vhost.h"
  20. #define VHOST_VSOCK_DEFAULT_HOST_CID 2
  21. enum {
  22. VHOST_VSOCK_FEATURES = VHOST_FEATURES,
  23. };
  24. /* Used to track all the vhost_vsock instances on the system. */
  25. static DEFINE_SPINLOCK(vhost_vsock_lock);
  26. static LIST_HEAD(vhost_vsock_list);
  27. struct vhost_vsock {
  28. struct vhost_dev dev;
  29. struct vhost_virtqueue vqs[2];
  30. /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
  31. struct list_head list;
  32. struct vhost_work send_pkt_work;
  33. spinlock_t send_pkt_list_lock;
  34. struct list_head send_pkt_list; /* host->guest pending packets */
  35. atomic_t queued_replies;
  36. u32 guest_cid;
  37. };
  38. static u32 vhost_transport_get_local_cid(void)
  39. {
  40. return VHOST_VSOCK_DEFAULT_HOST_CID;
  41. }
  42. static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
  43. {
  44. struct vhost_vsock *vsock;
  45. list_for_each_entry(vsock, &vhost_vsock_list, list) {
  46. u32 other_cid = vsock->guest_cid;
  47. /* Skip instances that have no CID yet */
  48. if (other_cid == 0)
  49. continue;
  50. if (other_cid == guest_cid) {
  51. return vsock;
  52. }
  53. }
  54. return NULL;
  55. }
  56. static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
  57. {
  58. struct vhost_vsock *vsock;
  59. spin_lock_bh(&vhost_vsock_lock);
  60. vsock = __vhost_vsock_get(guest_cid);
  61. spin_unlock_bh(&vhost_vsock_lock);
  62. return vsock;
  63. }
  64. static void
  65. vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
  66. struct vhost_virtqueue *vq)
  67. {
  68. struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
  69. bool added = false;
  70. bool restart_tx = false;
  71. mutex_lock(&vq->mutex);
  72. if (!vq->private_data)
  73. goto out;
  74. /* Avoid further vmexits, we're already processing the virtqueue */
  75. vhost_disable_notify(&vsock->dev, vq);
  76. for (;;) {
  77. struct virtio_vsock_pkt *pkt;
  78. struct iov_iter iov_iter;
  79. unsigned out, in;
  80. size_t nbytes;
  81. size_t len;
  82. int head;
  83. spin_lock_bh(&vsock->send_pkt_list_lock);
  84. if (list_empty(&vsock->send_pkt_list)) {
  85. spin_unlock_bh(&vsock->send_pkt_list_lock);
  86. vhost_enable_notify(&vsock->dev, vq);
  87. break;
  88. }
  89. pkt = list_first_entry(&vsock->send_pkt_list,
  90. struct virtio_vsock_pkt, list);
  91. list_del_init(&pkt->list);
  92. spin_unlock_bh(&vsock->send_pkt_list_lock);
  93. head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
  94. &out, &in, NULL, NULL);
  95. if (head < 0) {
  96. spin_lock_bh(&vsock->send_pkt_list_lock);
  97. list_add(&pkt->list, &vsock->send_pkt_list);
  98. spin_unlock_bh(&vsock->send_pkt_list_lock);
  99. break;
  100. }
  101. if (head == vq->num) {
  102. spin_lock_bh(&vsock->send_pkt_list_lock);
  103. list_add(&pkt->list, &vsock->send_pkt_list);
  104. spin_unlock_bh(&vsock->send_pkt_list_lock);
  105. /* We cannot finish yet if more buffers snuck in while
  106. * re-enabling notify.
  107. */
  108. if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
  109. vhost_disable_notify(&vsock->dev, vq);
  110. continue;
  111. }
  112. break;
  113. }
  114. if (out) {
  115. virtio_transport_free_pkt(pkt);
  116. vq_err(vq, "Expected 0 output buffers, got %u\n", out);
  117. break;
  118. }
  119. len = iov_length(&vq->iov[out], in);
  120. iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
  121. nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
  122. if (nbytes != sizeof(pkt->hdr)) {
  123. virtio_transport_free_pkt(pkt);
  124. vq_err(vq, "Faulted on copying pkt hdr\n");
  125. break;
  126. }
  127. nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
  128. if (nbytes != pkt->len) {
  129. virtio_transport_free_pkt(pkt);
  130. vq_err(vq, "Faulted on copying pkt buf\n");
  131. break;
  132. }
  133. vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
  134. added = true;
  135. if (pkt->reply) {
  136. int val;
  137. val = atomic_dec_return(&vsock->queued_replies);
  138. /* Do we have resources to resume tx processing? */
  139. if (val + 1 == tx_vq->num)
  140. restart_tx = true;
  141. }
  142. /* Deliver to monitoring devices all correctly transmitted
  143. * packets.
  144. */
  145. virtio_transport_deliver_tap_pkt(pkt);
  146. virtio_transport_free_pkt(pkt);
  147. }
  148. if (added)
  149. vhost_signal(&vsock->dev, vq);
  150. out:
  151. mutex_unlock(&vq->mutex);
  152. if (restart_tx)
  153. vhost_poll_queue(&tx_vq->poll);
  154. }
  155. static void vhost_transport_send_pkt_work(struct vhost_work *work)
  156. {
  157. struct vhost_virtqueue *vq;
  158. struct vhost_vsock *vsock;
  159. vsock = container_of(work, struct vhost_vsock, send_pkt_work);
  160. vq = &vsock->vqs[VSOCK_VQ_RX];
  161. vhost_transport_do_send_pkt(vsock, vq);
  162. }
  163. static int
  164. vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
  165. {
  166. struct vhost_vsock *vsock;
  167. int len = pkt->len;
  168. /* Find the vhost_vsock according to guest context id */
  169. vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
  170. if (!vsock) {
  171. virtio_transport_free_pkt(pkt);
  172. return -ENODEV;
  173. }
  174. if (pkt->reply)
  175. atomic_inc(&vsock->queued_replies);
  176. spin_lock_bh(&vsock->send_pkt_list_lock);
  177. list_add_tail(&pkt->list, &vsock->send_pkt_list);
  178. spin_unlock_bh(&vsock->send_pkt_list_lock);
  179. vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
  180. return len;
  181. }
  182. static int
  183. vhost_transport_cancel_pkt(struct vsock_sock *vsk)
  184. {
  185. struct vhost_vsock *vsock;
  186. struct virtio_vsock_pkt *pkt, *n;
  187. int cnt = 0;
  188. LIST_HEAD(freeme);
  189. /* Find the vhost_vsock according to guest context id */
  190. vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
  191. if (!vsock)
  192. return -ENODEV;
  193. spin_lock_bh(&vsock->send_pkt_list_lock);
  194. list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
  195. if (pkt->vsk != vsk)
  196. continue;
  197. list_move(&pkt->list, &freeme);
  198. }
  199. spin_unlock_bh(&vsock->send_pkt_list_lock);
  200. list_for_each_entry_safe(pkt, n, &freeme, list) {
  201. if (pkt->reply)
  202. cnt++;
  203. list_del(&pkt->list);
  204. virtio_transport_free_pkt(pkt);
  205. }
  206. if (cnt) {
  207. struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
  208. int new_cnt;
  209. new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
  210. if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
  211. vhost_poll_queue(&tx_vq->poll);
  212. }
  213. return 0;
  214. }
  215. static struct virtio_vsock_pkt *
  216. vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
  217. unsigned int out, unsigned int in)
  218. {
  219. struct virtio_vsock_pkt *pkt;
  220. struct iov_iter iov_iter;
  221. size_t nbytes;
  222. size_t len;
  223. if (in != 0) {
  224. vq_err(vq, "Expected 0 input buffers, got %u\n", in);
  225. return NULL;
  226. }
  227. pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
  228. if (!pkt)
  229. return NULL;
  230. len = iov_length(vq->iov, out);
  231. iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
  232. nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
  233. if (nbytes != sizeof(pkt->hdr)) {
  234. vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
  235. sizeof(pkt->hdr), nbytes);
  236. kfree(pkt);
  237. return NULL;
  238. }
  239. if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
  240. pkt->len = le32_to_cpu(pkt->hdr.len);
  241. /* No payload */
  242. if (!pkt->len)
  243. return pkt;
  244. /* The pkt is too big */
  245. if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
  246. kfree(pkt);
  247. return NULL;
  248. }
  249. pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
  250. if (!pkt->buf) {
  251. kfree(pkt);
  252. return NULL;
  253. }
  254. nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
  255. if (nbytes != pkt->len) {
  256. vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
  257. pkt->len, nbytes);
  258. virtio_transport_free_pkt(pkt);
  259. return NULL;
  260. }
  261. return pkt;
  262. }
  263. /* Is there space left for replies to rx packets? */
  264. static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
  265. {
  266. struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
  267. int val;
  268. smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
  269. val = atomic_read(&vsock->queued_replies);
  270. return val < vq->num;
  271. }
  272. static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
  273. {
  274. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  275. poll.work);
  276. struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
  277. dev);
  278. struct virtio_vsock_pkt *pkt;
  279. int head;
  280. unsigned int out, in;
  281. bool added = false;
  282. mutex_lock(&vq->mutex);
  283. if (!vq->private_data)
  284. goto out;
  285. vhost_disable_notify(&vsock->dev, vq);
  286. for (;;) {
  287. u32 len;
  288. if (!vhost_vsock_more_replies(vsock)) {
  289. /* Stop tx until the device processes already
  290. * pending replies. Leave tx virtqueue
  291. * callbacks disabled.
  292. */
  293. goto no_more_replies;
  294. }
  295. head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
  296. &out, &in, NULL, NULL);
  297. if (head < 0)
  298. break;
  299. if (head == vq->num) {
  300. if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
  301. vhost_disable_notify(&vsock->dev, vq);
  302. continue;
  303. }
  304. break;
  305. }
  306. pkt = vhost_vsock_alloc_pkt(vq, out, in);
  307. if (!pkt) {
  308. vq_err(vq, "Faulted on pkt\n");
  309. continue;
  310. }
  311. len = pkt->len;
  312. /* Deliver to monitoring devices all received packets */
  313. virtio_transport_deliver_tap_pkt(pkt);
  314. /* Only accept correctly addressed packets */
  315. if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
  316. virtio_transport_recv_pkt(pkt);
  317. else
  318. virtio_transport_free_pkt(pkt);
  319. vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
  320. added = true;
  321. }
  322. no_more_replies:
  323. if (added)
  324. vhost_signal(&vsock->dev, vq);
  325. out:
  326. mutex_unlock(&vq->mutex);
  327. }
  328. static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
  329. {
  330. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  331. poll.work);
  332. struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
  333. dev);
  334. vhost_transport_do_send_pkt(vsock, vq);
  335. }
  336. static int vhost_vsock_start(struct vhost_vsock *vsock)
  337. {
  338. struct vhost_virtqueue *vq;
  339. size_t i;
  340. int ret;
  341. mutex_lock(&vsock->dev.mutex);
  342. ret = vhost_dev_check_owner(&vsock->dev);
  343. if (ret)
  344. goto err;
  345. for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
  346. vq = &vsock->vqs[i];
  347. mutex_lock(&vq->mutex);
  348. if (!vhost_vq_access_ok(vq)) {
  349. ret = -EFAULT;
  350. goto err_vq;
  351. }
  352. if (!vq->private_data) {
  353. vq->private_data = vsock;
  354. ret = vhost_vq_init_access(vq);
  355. if (ret)
  356. goto err_vq;
  357. }
  358. mutex_unlock(&vq->mutex);
  359. }
  360. mutex_unlock(&vsock->dev.mutex);
  361. return 0;
  362. err_vq:
  363. vq->private_data = NULL;
  364. mutex_unlock(&vq->mutex);
  365. for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
  366. vq = &vsock->vqs[i];
  367. mutex_lock(&vq->mutex);
  368. vq->private_data = NULL;
  369. mutex_unlock(&vq->mutex);
  370. }
  371. err:
  372. mutex_unlock(&vsock->dev.mutex);
  373. return ret;
  374. }
  375. static int vhost_vsock_stop(struct vhost_vsock *vsock)
  376. {
  377. size_t i;
  378. int ret;
  379. mutex_lock(&vsock->dev.mutex);
  380. ret = vhost_dev_check_owner(&vsock->dev);
  381. if (ret)
  382. goto err;
  383. for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
  384. struct vhost_virtqueue *vq = &vsock->vqs[i];
  385. mutex_lock(&vq->mutex);
  386. vq->private_data = NULL;
  387. mutex_unlock(&vq->mutex);
  388. }
  389. err:
  390. mutex_unlock(&vsock->dev.mutex);
  391. return ret;
  392. }
  393. static void vhost_vsock_free(struct vhost_vsock *vsock)
  394. {
  395. kvfree(vsock);
  396. }
  397. static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
  398. {
  399. struct vhost_virtqueue **vqs;
  400. struct vhost_vsock *vsock;
  401. int ret;
  402. /* This struct is large and allocation could fail, fall back to vmalloc
  403. * if there is no other way.
  404. */
  405. vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  406. if (!vsock)
  407. return -ENOMEM;
  408. vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
  409. if (!vqs) {
  410. ret = -ENOMEM;
  411. goto out;
  412. }
  413. atomic_set(&vsock->queued_replies, 0);
  414. vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
  415. vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
  416. vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
  417. vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
  418. vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
  419. file->private_data = vsock;
  420. spin_lock_init(&vsock->send_pkt_list_lock);
  421. INIT_LIST_HEAD(&vsock->send_pkt_list);
  422. vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
  423. spin_lock_bh(&vhost_vsock_lock);
  424. list_add_tail(&vsock->list, &vhost_vsock_list);
  425. spin_unlock_bh(&vhost_vsock_lock);
  426. return 0;
  427. out:
  428. vhost_vsock_free(vsock);
  429. return ret;
  430. }
  431. static void vhost_vsock_flush(struct vhost_vsock *vsock)
  432. {
  433. int i;
  434. for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
  435. if (vsock->vqs[i].handle_kick)
  436. vhost_poll_flush(&vsock->vqs[i].poll);
  437. vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
  438. }
  439. static void vhost_vsock_reset_orphans(struct sock *sk)
  440. {
  441. struct vsock_sock *vsk = vsock_sk(sk);
  442. /* vmci_transport.c doesn't take sk_lock here either. At least we're
  443. * under vsock_table_lock so the sock cannot disappear while we're
  444. * executing.
  445. */
  446. if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
  447. sock_set_flag(sk, SOCK_DONE);
  448. vsk->peer_shutdown = SHUTDOWN_MASK;
  449. sk->sk_state = SS_UNCONNECTED;
  450. sk->sk_err = ECONNRESET;
  451. sk->sk_error_report(sk);
  452. }
  453. }
  454. static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
  455. {
  456. struct vhost_vsock *vsock = file->private_data;
  457. spin_lock_bh(&vhost_vsock_lock);
  458. list_del(&vsock->list);
  459. spin_unlock_bh(&vhost_vsock_lock);
  460. /* Iterating over all connections for all CIDs to find orphans is
  461. * inefficient. Room for improvement here. */
  462. vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
  463. vhost_vsock_stop(vsock);
  464. vhost_vsock_flush(vsock);
  465. vhost_dev_stop(&vsock->dev);
  466. spin_lock_bh(&vsock->send_pkt_list_lock);
  467. while (!list_empty(&vsock->send_pkt_list)) {
  468. struct virtio_vsock_pkt *pkt;
  469. pkt = list_first_entry(&vsock->send_pkt_list,
  470. struct virtio_vsock_pkt, list);
  471. list_del_init(&pkt->list);
  472. virtio_transport_free_pkt(pkt);
  473. }
  474. spin_unlock_bh(&vsock->send_pkt_list_lock);
  475. vhost_dev_cleanup(&vsock->dev, false);
  476. kfree(vsock->dev.vqs);
  477. vhost_vsock_free(vsock);
  478. return 0;
  479. }
  480. static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
  481. {
  482. struct vhost_vsock *other;
  483. /* Refuse reserved CIDs */
  484. if (guest_cid <= VMADDR_CID_HOST ||
  485. guest_cid == U32_MAX)
  486. return -EINVAL;
  487. /* 64-bit CIDs are not yet supported */
  488. if (guest_cid > U32_MAX)
  489. return -EINVAL;
  490. /* Refuse if CID is already in use */
  491. spin_lock_bh(&vhost_vsock_lock);
  492. other = __vhost_vsock_get(guest_cid);
  493. if (other && other != vsock) {
  494. spin_unlock_bh(&vhost_vsock_lock);
  495. return -EADDRINUSE;
  496. }
  497. vsock->guest_cid = guest_cid;
  498. spin_unlock_bh(&vhost_vsock_lock);
  499. return 0;
  500. }
  501. static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
  502. {
  503. struct vhost_virtqueue *vq;
  504. int i;
  505. if (features & ~VHOST_VSOCK_FEATURES)
  506. return -EOPNOTSUPP;
  507. mutex_lock(&vsock->dev.mutex);
  508. if ((features & (1 << VHOST_F_LOG_ALL)) &&
  509. !vhost_log_access_ok(&vsock->dev)) {
  510. mutex_unlock(&vsock->dev.mutex);
  511. return -EFAULT;
  512. }
  513. for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
  514. vq = &vsock->vqs[i];
  515. mutex_lock(&vq->mutex);
  516. vq->acked_features = features;
  517. mutex_unlock(&vq->mutex);
  518. }
  519. mutex_unlock(&vsock->dev.mutex);
  520. return 0;
  521. }
  522. static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
  523. unsigned long arg)
  524. {
  525. struct vhost_vsock *vsock = f->private_data;
  526. void __user *argp = (void __user *)arg;
  527. u64 guest_cid;
  528. u64 features;
  529. int start;
  530. int r;
  531. switch (ioctl) {
  532. case VHOST_VSOCK_SET_GUEST_CID:
  533. if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
  534. return -EFAULT;
  535. return vhost_vsock_set_cid(vsock, guest_cid);
  536. case VHOST_VSOCK_SET_RUNNING:
  537. if (copy_from_user(&start, argp, sizeof(start)))
  538. return -EFAULT;
  539. if (start)
  540. return vhost_vsock_start(vsock);
  541. else
  542. return vhost_vsock_stop(vsock);
  543. case VHOST_GET_FEATURES:
  544. features = VHOST_VSOCK_FEATURES;
  545. if (copy_to_user(argp, &features, sizeof(features)))
  546. return -EFAULT;
  547. return 0;
  548. case VHOST_SET_FEATURES:
  549. if (copy_from_user(&features, argp, sizeof(features)))
  550. return -EFAULT;
  551. return vhost_vsock_set_features(vsock, features);
  552. default:
  553. mutex_lock(&vsock->dev.mutex);
  554. r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
  555. if (r == -ENOIOCTLCMD)
  556. r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
  557. else
  558. vhost_vsock_flush(vsock);
  559. mutex_unlock(&vsock->dev.mutex);
  560. return r;
  561. }
  562. }
  563. static const struct file_operations vhost_vsock_fops = {
  564. .owner = THIS_MODULE,
  565. .open = vhost_vsock_dev_open,
  566. .release = vhost_vsock_dev_release,
  567. .llseek = noop_llseek,
  568. .unlocked_ioctl = vhost_vsock_dev_ioctl,
  569. };
  570. static struct miscdevice vhost_vsock_misc = {
  571. .minor = VHOST_VSOCK_MINOR,
  572. .name = "vhost-vsock",
  573. .fops = &vhost_vsock_fops,
  574. };
  575. static struct virtio_transport vhost_transport = {
  576. .transport = {
  577. .get_local_cid = vhost_transport_get_local_cid,
  578. .init = virtio_transport_do_socket_init,
  579. .destruct = virtio_transport_destruct,
  580. .release = virtio_transport_release,
  581. .connect = virtio_transport_connect,
  582. .shutdown = virtio_transport_shutdown,
  583. .cancel_pkt = vhost_transport_cancel_pkt,
  584. .dgram_enqueue = virtio_transport_dgram_enqueue,
  585. .dgram_dequeue = virtio_transport_dgram_dequeue,
  586. .dgram_bind = virtio_transport_dgram_bind,
  587. .dgram_allow = virtio_transport_dgram_allow,
  588. .stream_enqueue = virtio_transport_stream_enqueue,
  589. .stream_dequeue = virtio_transport_stream_dequeue,
  590. .stream_has_data = virtio_transport_stream_has_data,
  591. .stream_has_space = virtio_transport_stream_has_space,
  592. .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
  593. .stream_is_active = virtio_transport_stream_is_active,
  594. .stream_allow = virtio_transport_stream_allow,
  595. .notify_poll_in = virtio_transport_notify_poll_in,
  596. .notify_poll_out = virtio_transport_notify_poll_out,
  597. .notify_recv_init = virtio_transport_notify_recv_init,
  598. .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
  599. .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
  600. .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
  601. .notify_send_init = virtio_transport_notify_send_init,
  602. .notify_send_pre_block = virtio_transport_notify_send_pre_block,
  603. .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
  604. .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
  605. .set_buffer_size = virtio_transport_set_buffer_size,
  606. .set_min_buffer_size = virtio_transport_set_min_buffer_size,
  607. .set_max_buffer_size = virtio_transport_set_max_buffer_size,
  608. .get_buffer_size = virtio_transport_get_buffer_size,
  609. .get_min_buffer_size = virtio_transport_get_min_buffer_size,
  610. .get_max_buffer_size = virtio_transport_get_max_buffer_size,
  611. },
  612. .send_pkt = vhost_transport_send_pkt,
  613. };
  614. static int __init vhost_vsock_init(void)
  615. {
  616. int ret;
  617. ret = vsock_core_init(&vhost_transport.transport);
  618. if (ret < 0)
  619. return ret;
  620. return misc_register(&vhost_vsock_misc);
  621. };
  622. static void __exit vhost_vsock_exit(void)
  623. {
  624. misc_deregister(&vhost_vsock_misc);
  625. vsock_core_exit();
  626. };
  627. module_init(vhost_vsock_init);
  628. module_exit(vhost_vsock_exit);
  629. MODULE_LICENSE("GPL v2");
  630. MODULE_AUTHOR("Asias He");
  631. MODULE_DESCRIPTION("vhost transport for vsock ");
  632. MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
  633. MODULE_ALIAS("devname:vhost-vsock");