server.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. /*
  2. * net/tipc/server.c: TIPC server infrastructure
  3. *
  4. * Copyright (c) 2012-2013, Wind River Systems
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the names of the copyright holders nor the names of its
  16. * contributors may be used to endorse or promote products derived from
  17. * this software without specific prior written permission.
  18. *
  19. * Alternatively, this software may be distributed under the terms of the
  20. * GNU General Public License ("GPL") version 2 as published by the Free
  21. * Software Foundation.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  24. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  25. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  27. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  30. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  31. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  32. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  33. * POSSIBILITY OF SUCH DAMAGE.
  34. */
  35. #include "server.h"
  36. #include "core.h"
  37. #include "socket.h"
  38. #include <net/sock.h>
  39. /* Number of messages to send before rescheduling */
  40. #define MAX_SEND_MSG_COUNT 25
  41. #define MAX_RECV_MSG_COUNT 25
  42. #define CF_CONNECTED 1
  43. #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data)
  44. /**
  45. * struct tipc_conn - TIPC connection structure
  46. * @kref: reference counter to connection object
  47. * @conid: connection identifier
  48. * @sock: socket handler associated with connection
  49. * @flags: indicates connection state
  50. * @server: pointer to connected server
  51. * @rwork: receive work item
  52. * @usr_data: user-specified field
  53. * @rx_action: what to do when connection socket is active
  54. * @outqueue: pointer to first outbound message in queue
  55. * @outqueue_lock: control access to the outqueue
  56. * @outqueue: list of connection objects for its server
  57. * @swork: send work item
  58. */
  59. struct tipc_conn {
  60. struct kref kref;
  61. int conid;
  62. struct socket *sock;
  63. unsigned long flags;
  64. struct tipc_server *server;
  65. struct work_struct rwork;
  66. int (*rx_action) (struct tipc_conn *con);
  67. void *usr_data;
  68. struct list_head outqueue;
  69. spinlock_t outqueue_lock;
  70. struct work_struct swork;
  71. };
  72. /* An entry waiting to be sent */
  73. struct outqueue_entry {
  74. struct list_head list;
  75. struct kvec iov;
  76. struct sockaddr_tipc dest;
  77. };
  78. static void tipc_recv_work(struct work_struct *work);
  79. static void tipc_send_work(struct work_struct *work);
  80. static void tipc_clean_outqueues(struct tipc_conn *con);
  81. static void tipc_conn_kref_release(struct kref *kref)
  82. {
  83. struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
  84. if (con->sock) {
  85. tipc_sock_release_local(con->sock);
  86. con->sock = NULL;
  87. }
  88. tipc_clean_outqueues(con);
  89. kfree(con);
  90. }
  91. static void conn_put(struct tipc_conn *con)
  92. {
  93. kref_put(&con->kref, tipc_conn_kref_release);
  94. }
  95. static void conn_get(struct tipc_conn *con)
  96. {
  97. kref_get(&con->kref);
  98. }
  99. static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
  100. {
  101. struct tipc_conn *con;
  102. spin_lock_bh(&s->idr_lock);
  103. con = idr_find(&s->conn_idr, conid);
  104. if (con)
  105. conn_get(con);
  106. spin_unlock_bh(&s->idr_lock);
  107. return con;
  108. }
  109. static void sock_data_ready(struct sock *sk)
  110. {
  111. struct tipc_conn *con;
  112. read_lock(&sk->sk_callback_lock);
  113. con = sock2con(sk);
  114. if (con && test_bit(CF_CONNECTED, &con->flags)) {
  115. conn_get(con);
  116. if (!queue_work(con->server->rcv_wq, &con->rwork))
  117. conn_put(con);
  118. }
  119. read_unlock(&sk->sk_callback_lock);
  120. }
  121. static void sock_write_space(struct sock *sk)
  122. {
  123. struct tipc_conn *con;
  124. read_lock(&sk->sk_callback_lock);
  125. con = sock2con(sk);
  126. if (con && test_bit(CF_CONNECTED, &con->flags)) {
  127. conn_get(con);
  128. if (!queue_work(con->server->send_wq, &con->swork))
  129. conn_put(con);
  130. }
  131. read_unlock(&sk->sk_callback_lock);
  132. }
  133. static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
  134. {
  135. struct sock *sk = sock->sk;
  136. write_lock_bh(&sk->sk_callback_lock);
  137. sk->sk_data_ready = sock_data_ready;
  138. sk->sk_write_space = sock_write_space;
  139. sk->sk_user_data = con;
  140. con->sock = sock;
  141. write_unlock_bh(&sk->sk_callback_lock);
  142. }
  143. static void tipc_unregister_callbacks(struct tipc_conn *con)
  144. {
  145. struct sock *sk = con->sock->sk;
  146. write_lock_bh(&sk->sk_callback_lock);
  147. sk->sk_user_data = NULL;
  148. write_unlock_bh(&sk->sk_callback_lock);
  149. }
  150. static void tipc_close_conn(struct tipc_conn *con)
  151. {
  152. struct tipc_server *s = con->server;
  153. if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
  154. if (con->conid)
  155. s->tipc_conn_shutdown(con->conid, con->usr_data);
  156. spin_lock_bh(&s->idr_lock);
  157. idr_remove(&s->conn_idr, con->conid);
  158. s->idr_in_use--;
  159. spin_unlock_bh(&s->idr_lock);
  160. tipc_unregister_callbacks(con);
  161. /* We shouldn't flush pending works as we may be in the
  162. * thread. In fact the races with pending rx/tx work structs
  163. * are harmless for us here as we have already deleted this
  164. * connection from server connection list and set
  165. * sk->sk_user_data to 0 before releasing connection object.
  166. */
  167. kernel_sock_shutdown(con->sock, SHUT_RDWR);
  168. conn_put(con);
  169. }
  170. }
  171. static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
  172. {
  173. struct tipc_conn *con;
  174. int ret;
  175. con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC);
  176. if (!con)
  177. return ERR_PTR(-ENOMEM);
  178. kref_init(&con->kref);
  179. INIT_LIST_HEAD(&con->outqueue);
  180. spin_lock_init(&con->outqueue_lock);
  181. INIT_WORK(&con->swork, tipc_send_work);
  182. INIT_WORK(&con->rwork, tipc_recv_work);
  183. spin_lock_bh(&s->idr_lock);
  184. ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
  185. if (ret < 0) {
  186. kfree(con);
  187. spin_unlock_bh(&s->idr_lock);
  188. return ERR_PTR(-ENOMEM);
  189. }
  190. con->conid = ret;
  191. s->idr_in_use++;
  192. spin_unlock_bh(&s->idr_lock);
  193. set_bit(CF_CONNECTED, &con->flags);
  194. con->server = s;
  195. return con;
  196. }
  197. static int tipc_receive_from_sock(struct tipc_conn *con)
  198. {
  199. struct msghdr msg = {};
  200. struct tipc_server *s = con->server;
  201. struct sockaddr_tipc addr;
  202. struct kvec iov;
  203. void *buf;
  204. int ret;
  205. buf = kmem_cache_alloc(s->rcvbuf_cache, GFP_ATOMIC);
  206. if (!buf) {
  207. ret = -ENOMEM;
  208. goto out_close;
  209. }
  210. iov.iov_base = buf;
  211. iov.iov_len = s->max_rcvbuf_size;
  212. msg.msg_name = &addr;
  213. ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
  214. MSG_DONTWAIT);
  215. if (ret <= 0) {
  216. kmem_cache_free(s->rcvbuf_cache, buf);
  217. goto out_close;
  218. }
  219. s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
  220. con->usr_data, buf, ret);
  221. kmem_cache_free(s->rcvbuf_cache, buf);
  222. return 0;
  223. out_close:
  224. if (ret != -EWOULDBLOCK)
  225. tipc_close_conn(con);
  226. else if (ret == 0)
  227. /* Don't return success if we really got EOF */
  228. ret = -EAGAIN;
  229. return ret;
  230. }
  231. static int tipc_accept_from_sock(struct tipc_conn *con)
  232. {
  233. struct tipc_server *s = con->server;
  234. struct socket *sock = con->sock;
  235. struct socket *newsock;
  236. struct tipc_conn *newcon;
  237. int ret;
  238. ret = tipc_sock_accept_local(sock, &newsock, O_NONBLOCK);
  239. if (ret < 0)
  240. return ret;
  241. newcon = tipc_alloc_conn(con->server);
  242. if (IS_ERR(newcon)) {
  243. ret = PTR_ERR(newcon);
  244. sock_release(newsock);
  245. return ret;
  246. }
  247. newcon->rx_action = tipc_receive_from_sock;
  248. tipc_register_callbacks(newsock, newcon);
  249. /* Notify that new connection is incoming */
  250. newcon->usr_data = s->tipc_conn_new(newcon->conid);
  251. /* Wake up receive process in case of 'SYN+' message */
  252. newsock->sk->sk_data_ready(newsock->sk);
  253. return ret;
  254. }
  255. static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
  256. {
  257. struct tipc_server *s = con->server;
  258. struct socket *sock = NULL;
  259. int ret;
  260. ret = tipc_sock_create_local(s->net, s->type, &sock);
  261. if (ret < 0)
  262. return NULL;
  263. ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
  264. (char *)&s->imp, sizeof(s->imp));
  265. if (ret < 0)
  266. goto create_err;
  267. ret = kernel_bind(sock, (struct sockaddr *)s->saddr, sizeof(*s->saddr));
  268. if (ret < 0)
  269. goto create_err;
  270. switch (s->type) {
  271. case SOCK_STREAM:
  272. case SOCK_SEQPACKET:
  273. con->rx_action = tipc_accept_from_sock;
  274. ret = kernel_listen(sock, 0);
  275. if (ret < 0)
  276. goto create_err;
  277. break;
  278. case SOCK_DGRAM:
  279. case SOCK_RDM:
  280. con->rx_action = tipc_receive_from_sock;
  281. break;
  282. default:
  283. pr_err("Unknown socket type %d\n", s->type);
  284. goto create_err;
  285. }
  286. return sock;
  287. create_err:
  288. sock_release(sock);
  289. con->sock = NULL;
  290. return NULL;
  291. }
  292. static int tipc_open_listening_sock(struct tipc_server *s)
  293. {
  294. struct socket *sock;
  295. struct tipc_conn *con;
  296. con = tipc_alloc_conn(s);
  297. if (IS_ERR(con))
  298. return PTR_ERR(con);
  299. sock = tipc_create_listen_sock(con);
  300. if (!sock) {
  301. idr_remove(&s->conn_idr, con->conid);
  302. s->idr_in_use--;
  303. kfree(con);
  304. return -EINVAL;
  305. }
  306. tipc_register_callbacks(sock, con);
  307. return 0;
  308. }
  309. static struct outqueue_entry *tipc_alloc_entry(void *data, int len)
  310. {
  311. struct outqueue_entry *entry;
  312. void *buf;
  313. entry = kmalloc(sizeof(struct outqueue_entry), GFP_ATOMIC);
  314. if (!entry)
  315. return NULL;
  316. buf = kmalloc(len, GFP_ATOMIC);
  317. if (!buf) {
  318. kfree(entry);
  319. return NULL;
  320. }
  321. memcpy(buf, data, len);
  322. entry->iov.iov_base = buf;
  323. entry->iov.iov_len = len;
  324. return entry;
  325. }
  326. static void tipc_free_entry(struct outqueue_entry *e)
  327. {
  328. kfree(e->iov.iov_base);
  329. kfree(e);
  330. }
  331. static void tipc_clean_outqueues(struct tipc_conn *con)
  332. {
  333. struct outqueue_entry *e, *safe;
  334. spin_lock_bh(&con->outqueue_lock);
  335. list_for_each_entry_safe(e, safe, &con->outqueue, list) {
  336. list_del(&e->list);
  337. tipc_free_entry(e);
  338. }
  339. spin_unlock_bh(&con->outqueue_lock);
  340. }
  341. int tipc_conn_sendmsg(struct tipc_server *s, int conid,
  342. struct sockaddr_tipc *addr, void *data, size_t len)
  343. {
  344. struct outqueue_entry *e;
  345. struct tipc_conn *con;
  346. con = tipc_conn_lookup(s, conid);
  347. if (!con)
  348. return -EINVAL;
  349. e = tipc_alloc_entry(data, len);
  350. if (!e) {
  351. conn_put(con);
  352. return -ENOMEM;
  353. }
  354. if (addr)
  355. memcpy(&e->dest, addr, sizeof(struct sockaddr_tipc));
  356. spin_lock_bh(&con->outqueue_lock);
  357. list_add_tail(&e->list, &con->outqueue);
  358. spin_unlock_bh(&con->outqueue_lock);
  359. if (test_bit(CF_CONNECTED, &con->flags)) {
  360. if (!queue_work(s->send_wq, &con->swork))
  361. conn_put(con);
  362. } else {
  363. conn_put(con);
  364. }
  365. return 0;
  366. }
  367. void tipc_conn_terminate(struct tipc_server *s, int conid)
  368. {
  369. struct tipc_conn *con;
  370. con = tipc_conn_lookup(s, conid);
  371. if (con) {
  372. tipc_close_conn(con);
  373. conn_put(con);
  374. }
  375. }
  376. static void tipc_send_to_sock(struct tipc_conn *con)
  377. {
  378. int count = 0;
  379. struct tipc_server *s = con->server;
  380. struct outqueue_entry *e;
  381. struct msghdr msg;
  382. int ret;
  383. spin_lock_bh(&con->outqueue_lock);
  384. while (1) {
  385. e = list_entry(con->outqueue.next, struct outqueue_entry,
  386. list);
  387. if ((struct list_head *) e == &con->outqueue)
  388. break;
  389. spin_unlock_bh(&con->outqueue_lock);
  390. memset(&msg, 0, sizeof(msg));
  391. msg.msg_flags = MSG_DONTWAIT;
  392. if (s->type == SOCK_DGRAM || s->type == SOCK_RDM) {
  393. msg.msg_name = &e->dest;
  394. msg.msg_namelen = sizeof(struct sockaddr_tipc);
  395. }
  396. ret = kernel_sendmsg(con->sock, &msg, &e->iov, 1,
  397. e->iov.iov_len);
  398. if (ret == -EWOULDBLOCK || ret == 0) {
  399. cond_resched();
  400. goto out;
  401. } else if (ret < 0) {
  402. goto send_err;
  403. }
  404. /* Don't starve users filling buffers */
  405. if (++count >= MAX_SEND_MSG_COUNT) {
  406. cond_resched();
  407. count = 0;
  408. }
  409. spin_lock_bh(&con->outqueue_lock);
  410. list_del(&e->list);
  411. tipc_free_entry(e);
  412. }
  413. spin_unlock_bh(&con->outqueue_lock);
  414. out:
  415. return;
  416. send_err:
  417. tipc_close_conn(con);
  418. }
  419. static void tipc_recv_work(struct work_struct *work)
  420. {
  421. struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
  422. int count = 0;
  423. while (test_bit(CF_CONNECTED, &con->flags)) {
  424. if (con->rx_action(con))
  425. break;
  426. /* Don't flood Rx machine */
  427. if (++count >= MAX_RECV_MSG_COUNT) {
  428. cond_resched();
  429. count = 0;
  430. }
  431. }
  432. conn_put(con);
  433. }
  434. static void tipc_send_work(struct work_struct *work)
  435. {
  436. struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
  437. if (test_bit(CF_CONNECTED, &con->flags))
  438. tipc_send_to_sock(con);
  439. conn_put(con);
  440. }
  441. static void tipc_work_stop(struct tipc_server *s)
  442. {
  443. destroy_workqueue(s->rcv_wq);
  444. destroy_workqueue(s->send_wq);
  445. }
  446. static int tipc_work_start(struct tipc_server *s)
  447. {
  448. s->rcv_wq = alloc_workqueue("tipc_rcv", WQ_UNBOUND, 1);
  449. if (!s->rcv_wq) {
  450. pr_err("can't start tipc receive workqueue\n");
  451. return -ENOMEM;
  452. }
  453. s->send_wq = alloc_workqueue("tipc_send", WQ_UNBOUND, 1);
  454. if (!s->send_wq) {
  455. pr_err("can't start tipc send workqueue\n");
  456. destroy_workqueue(s->rcv_wq);
  457. return -ENOMEM;
  458. }
  459. return 0;
  460. }
  461. int tipc_server_start(struct tipc_server *s)
  462. {
  463. int ret;
  464. spin_lock_init(&s->idr_lock);
  465. idr_init(&s->conn_idr);
  466. s->idr_in_use = 0;
  467. s->rcvbuf_cache = kmem_cache_create(s->name, s->max_rcvbuf_size,
  468. 0, SLAB_HWCACHE_ALIGN, NULL);
  469. if (!s->rcvbuf_cache)
  470. return -ENOMEM;
  471. ret = tipc_work_start(s);
  472. if (ret < 0) {
  473. kmem_cache_destroy(s->rcvbuf_cache);
  474. return ret;
  475. }
  476. ret = tipc_open_listening_sock(s);
  477. if (ret < 0) {
  478. tipc_work_stop(s);
  479. kmem_cache_destroy(s->rcvbuf_cache);
  480. return ret;
  481. }
  482. return ret;
  483. }
  484. void tipc_server_stop(struct tipc_server *s)
  485. {
  486. struct tipc_conn *con;
  487. int total = 0;
  488. int id;
  489. spin_lock_bh(&s->idr_lock);
  490. for (id = 0; total < s->idr_in_use; id++) {
  491. con = idr_find(&s->conn_idr, id);
  492. if (con) {
  493. total++;
  494. spin_unlock_bh(&s->idr_lock);
  495. tipc_close_conn(con);
  496. spin_lock_bh(&s->idr_lock);
  497. }
  498. }
  499. spin_unlock_bh(&s->idr_lock);
  500. tipc_work_stop(s);
  501. kmem_cache_destroy(s->rcvbuf_cache);
  502. idr_destroy(&s->conn_idr);
  503. }