server.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /*
  2. * net/tipc/server.c: TIPC server infrastructure
  3. *
  4. * Copyright (c) 2012-2013, Wind River Systems
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the names of the copyright holders nor the names of its
  16. * contributors may be used to endorse or promote products derived from
  17. * this software without specific prior written permission.
  18. *
  19. * Alternatively, this software may be distributed under the terms of the
  20. * GNU General Public License ("GPL") version 2 as published by the Free
  21. * Software Foundation.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  24. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  25. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  27. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  30. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  31. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  32. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  33. * POSSIBILITY OF SUCH DAMAGE.
  34. */
  35. #include "server.h"
  36. #include "core.h"
  37. #include "socket.h"
  38. #include "addr.h"
  39. #include "msg.h"
  40. #include <net/sock.h>
  41. #include <linux/module.h>
  42. /* Number of messages to send before rescheduling */
  43. #define MAX_SEND_MSG_COUNT 25
  44. #define MAX_RECV_MSG_COUNT 25
  45. #define CF_CONNECTED 1
  46. #define CF_SERVER 2
  47. #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data)
  48. /**
  49. * struct tipc_conn - TIPC connection structure
  50. * @kref: reference counter to connection object
  51. * @conid: connection identifier
  52. * @sock: socket handler associated with connection
  53. * @flags: indicates connection state
  54. * @server: pointer to connected server
  55. * @rwork: receive work item
  56. * @usr_data: user-specified field
  57. * @rx_action: what to do when connection socket is active
  58. * @outqueue: pointer to first outbound message in queue
  59. * @outqueue_lock: control access to the outqueue
  60. * @outqueue: list of connection objects for its server
  61. * @swork: send work item
  62. */
  63. struct tipc_conn {
  64. struct kref kref;
  65. int conid;
  66. struct socket *sock;
  67. unsigned long flags;
  68. struct tipc_server *server;
  69. struct work_struct rwork;
  70. int (*rx_action) (struct tipc_conn *con);
  71. void *usr_data;
  72. struct list_head outqueue;
  73. spinlock_t outqueue_lock;
  74. struct work_struct swork;
  75. };
  76. /* An entry waiting to be sent */
  77. struct outqueue_entry {
  78. struct list_head list;
  79. struct kvec iov;
  80. struct sockaddr_tipc dest;
  81. };
  82. static void tipc_recv_work(struct work_struct *work);
  83. static void tipc_send_work(struct work_struct *work);
  84. static void tipc_clean_outqueues(struct tipc_conn *con);
  85. static void tipc_conn_kref_release(struct kref *kref)
  86. {
  87. struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
  88. struct tipc_server *s = con->server;
  89. struct sockaddr_tipc *saddr = s->saddr;
  90. struct socket *sock = con->sock;
  91. struct sock *sk;
  92. if (sock) {
  93. sk = sock->sk;
  94. if (test_bit(CF_SERVER, &con->flags)) {
  95. __module_get(sock->ops->owner);
  96. __module_get(sk->sk_prot_creator->owner);
  97. }
  98. saddr->scope = -TIPC_NODE_SCOPE;
  99. kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
  100. sock_release(sock);
  101. con->sock = NULL;
  102. }
  103. spin_lock_bh(&s->idr_lock);
  104. idr_remove(&s->conn_idr, con->conid);
  105. s->idr_in_use--;
  106. spin_unlock_bh(&s->idr_lock);
  107. tipc_clean_outqueues(con);
  108. kfree(con);
  109. }
  110. static void conn_put(struct tipc_conn *con)
  111. {
  112. kref_put(&con->kref, tipc_conn_kref_release);
  113. }
  114. static void conn_get(struct tipc_conn *con)
  115. {
  116. kref_get(&con->kref);
  117. }
  118. static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
  119. {
  120. struct tipc_conn *con;
  121. spin_lock_bh(&s->idr_lock);
  122. con = idr_find(&s->conn_idr, conid);
  123. if (con && test_bit(CF_CONNECTED, &con->flags))
  124. conn_get(con);
  125. else
  126. con = NULL;
  127. spin_unlock_bh(&s->idr_lock);
  128. return con;
  129. }
  130. static void sock_data_ready(struct sock *sk)
  131. {
  132. struct tipc_conn *con;
  133. read_lock_bh(&sk->sk_callback_lock);
  134. con = sock2con(sk);
  135. if (con && test_bit(CF_CONNECTED, &con->flags)) {
  136. conn_get(con);
  137. if (!queue_work(con->server->rcv_wq, &con->rwork))
  138. conn_put(con);
  139. }
  140. read_unlock_bh(&sk->sk_callback_lock);
  141. }
  142. static void sock_write_space(struct sock *sk)
  143. {
  144. struct tipc_conn *con;
  145. read_lock_bh(&sk->sk_callback_lock);
  146. con = sock2con(sk);
  147. if (con && test_bit(CF_CONNECTED, &con->flags)) {
  148. conn_get(con);
  149. if (!queue_work(con->server->send_wq, &con->swork))
  150. conn_put(con);
  151. }
  152. read_unlock_bh(&sk->sk_callback_lock);
  153. }
  154. static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
  155. {
  156. struct sock *sk = sock->sk;
  157. write_lock_bh(&sk->sk_callback_lock);
  158. sk->sk_data_ready = sock_data_ready;
  159. sk->sk_write_space = sock_write_space;
  160. sk->sk_user_data = con;
  161. con->sock = sock;
  162. write_unlock_bh(&sk->sk_callback_lock);
  163. }
  164. static void tipc_unregister_callbacks(struct tipc_conn *con)
  165. {
  166. struct sock *sk = con->sock->sk;
  167. write_lock_bh(&sk->sk_callback_lock);
  168. sk->sk_user_data = NULL;
  169. write_unlock_bh(&sk->sk_callback_lock);
  170. }
  171. static void tipc_close_conn(struct tipc_conn *con)
  172. {
  173. struct tipc_server *s = con->server;
  174. if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
  175. if (con->sock)
  176. tipc_unregister_callbacks(con);
  177. if (con->conid)
  178. s->tipc_conn_release(con->conid, con->usr_data);
  179. /* We shouldn't flush pending works as we may be in the
  180. * thread. In fact the races with pending rx/tx work structs
  181. * are harmless for us here as we have already deleted this
  182. * connection from server connection list.
  183. */
  184. if (con->sock)
  185. kernel_sock_shutdown(con->sock, SHUT_RDWR);
  186. conn_put(con);
  187. }
  188. }
  189. static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
  190. {
  191. struct tipc_conn *con;
  192. int ret;
  193. con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC);
  194. if (!con)
  195. return ERR_PTR(-ENOMEM);
  196. kref_init(&con->kref);
  197. INIT_LIST_HEAD(&con->outqueue);
  198. spin_lock_init(&con->outqueue_lock);
  199. INIT_WORK(&con->swork, tipc_send_work);
  200. INIT_WORK(&con->rwork, tipc_recv_work);
  201. spin_lock_bh(&s->idr_lock);
  202. ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
  203. if (ret < 0) {
  204. kfree(con);
  205. spin_unlock_bh(&s->idr_lock);
  206. return ERR_PTR(-ENOMEM);
  207. }
  208. con->conid = ret;
  209. s->idr_in_use++;
  210. spin_unlock_bh(&s->idr_lock);
  211. set_bit(CF_CONNECTED, &con->flags);
  212. con->server = s;
  213. return con;
  214. }
  215. static int tipc_receive_from_sock(struct tipc_conn *con)
  216. {
  217. struct msghdr msg = {};
  218. struct tipc_server *s = con->server;
  219. struct sockaddr_tipc addr;
  220. struct kvec iov;
  221. void *buf;
  222. int ret;
  223. buf = kmem_cache_alloc(s->rcvbuf_cache, GFP_ATOMIC);
  224. if (!buf) {
  225. ret = -ENOMEM;
  226. goto out_close;
  227. }
  228. iov.iov_base = buf;
  229. iov.iov_len = s->max_rcvbuf_size;
  230. msg.msg_name = &addr;
  231. iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
  232. ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
  233. if (ret <= 0) {
  234. kmem_cache_free(s->rcvbuf_cache, buf);
  235. goto out_close;
  236. }
  237. s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
  238. con->usr_data, buf, ret);
  239. kmem_cache_free(s->rcvbuf_cache, buf);
  240. return 0;
  241. out_close:
  242. if (ret != -EWOULDBLOCK)
  243. tipc_close_conn(con);
  244. else if (ret == 0)
  245. /* Don't return success if we really got EOF */
  246. ret = -EAGAIN;
  247. return ret;
  248. }
  249. static int tipc_accept_from_sock(struct tipc_conn *con)
  250. {
  251. struct tipc_server *s = con->server;
  252. struct socket *sock = con->sock;
  253. struct socket *newsock;
  254. struct tipc_conn *newcon;
  255. int ret;
  256. ret = kernel_accept(sock, &newsock, O_NONBLOCK);
  257. if (ret < 0)
  258. return ret;
  259. newcon = tipc_alloc_conn(con->server);
  260. if (IS_ERR(newcon)) {
  261. ret = PTR_ERR(newcon);
  262. sock_release(newsock);
  263. return ret;
  264. }
  265. newcon->rx_action = tipc_receive_from_sock;
  266. tipc_register_callbacks(newsock, newcon);
  267. /* Notify that new connection is incoming */
  268. newcon->usr_data = s->tipc_conn_new(newcon->conid);
  269. if (!newcon->usr_data) {
  270. sock_release(newsock);
  271. conn_put(newcon);
  272. return -ENOMEM;
  273. }
  274. /* Wake up receive process in case of 'SYN+' message */
  275. newsock->sk->sk_data_ready(newsock->sk);
  276. return ret;
  277. }
  278. static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
  279. {
  280. struct tipc_server *s = con->server;
  281. struct socket *sock = NULL;
  282. int ret;
  283. ret = sock_create_kern(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock);
  284. if (ret < 0)
  285. return NULL;
  286. ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
  287. (char *)&s->imp, sizeof(s->imp));
  288. if (ret < 0)
  289. goto create_err;
  290. ret = kernel_bind(sock, (struct sockaddr *)s->saddr, sizeof(*s->saddr));
  291. if (ret < 0)
  292. goto create_err;
  293. switch (s->type) {
  294. case SOCK_STREAM:
  295. case SOCK_SEQPACKET:
  296. con->rx_action = tipc_accept_from_sock;
  297. ret = kernel_listen(sock, 0);
  298. if (ret < 0)
  299. goto create_err;
  300. break;
  301. case SOCK_DGRAM:
  302. case SOCK_RDM:
  303. con->rx_action = tipc_receive_from_sock;
  304. break;
  305. default:
  306. pr_err("Unknown socket type %d\n", s->type);
  307. goto create_err;
  308. }
  309. /* As server's listening socket owner and creator is the same module,
  310. * we have to decrease TIPC module reference count to guarantee that
  311. * it remains zero after the server socket is created, otherwise,
  312. * executing "rmmod" command is unable to make TIPC module deleted
  313. * after TIPC module is inserted successfully.
  314. *
  315. * However, the reference count is ever increased twice in
  316. * sock_create_kern(): one is to increase the reference count of owner
  317. * of TIPC socket's proto_ops struct; another is to increment the
  318. * reference count of owner of TIPC proto struct. Therefore, we must
  319. * decrement the module reference count twice to ensure that it keeps
  320. * zero after server's listening socket is created. Of course, we
  321. * must bump the module reference count twice as well before the socket
  322. * is closed.
  323. */
  324. module_put(sock->ops->owner);
  325. module_put(sock->sk->sk_prot_creator->owner);
  326. set_bit(CF_SERVER, &con->flags);
  327. return sock;
  328. create_err:
  329. kernel_sock_shutdown(sock, SHUT_RDWR);
  330. sock_release(sock);
  331. return NULL;
  332. }
  333. static int tipc_open_listening_sock(struct tipc_server *s)
  334. {
  335. struct socket *sock;
  336. struct tipc_conn *con;
  337. con = tipc_alloc_conn(s);
  338. if (IS_ERR(con))
  339. return PTR_ERR(con);
  340. sock = tipc_create_listen_sock(con);
  341. if (!sock) {
  342. idr_remove(&s->conn_idr, con->conid);
  343. s->idr_in_use--;
  344. kfree(con);
  345. return -EINVAL;
  346. }
  347. tipc_register_callbacks(sock, con);
  348. return 0;
  349. }
  350. static struct outqueue_entry *tipc_alloc_entry(void *data, int len)
  351. {
  352. struct outqueue_entry *entry;
  353. void *buf;
  354. entry = kmalloc(sizeof(struct outqueue_entry), GFP_ATOMIC);
  355. if (!entry)
  356. return NULL;
  357. buf = kmemdup(data, len, GFP_ATOMIC);
  358. if (!buf) {
  359. kfree(entry);
  360. return NULL;
  361. }
  362. entry->iov.iov_base = buf;
  363. entry->iov.iov_len = len;
  364. return entry;
  365. }
  366. static void tipc_free_entry(struct outqueue_entry *e)
  367. {
  368. kfree(e->iov.iov_base);
  369. kfree(e);
  370. }
  371. static void tipc_clean_outqueues(struct tipc_conn *con)
  372. {
  373. struct outqueue_entry *e, *safe;
  374. spin_lock_bh(&con->outqueue_lock);
  375. list_for_each_entry_safe(e, safe, &con->outqueue, list) {
  376. list_del(&e->list);
  377. tipc_free_entry(e);
  378. }
  379. spin_unlock_bh(&con->outqueue_lock);
  380. }
  381. int tipc_conn_sendmsg(struct tipc_server *s, int conid,
  382. struct sockaddr_tipc *addr, void *data, size_t len)
  383. {
  384. struct outqueue_entry *e;
  385. struct tipc_conn *con;
  386. con = tipc_conn_lookup(s, conid);
  387. if (!con)
  388. return -EINVAL;
  389. if (!test_bit(CF_CONNECTED, &con->flags)) {
  390. conn_put(con);
  391. return 0;
  392. }
  393. e = tipc_alloc_entry(data, len);
  394. if (!e) {
  395. conn_put(con);
  396. return -ENOMEM;
  397. }
  398. if (addr)
  399. memcpy(&e->dest, addr, sizeof(struct sockaddr_tipc));
  400. spin_lock_bh(&con->outqueue_lock);
  401. list_add_tail(&e->list, &con->outqueue);
  402. spin_unlock_bh(&con->outqueue_lock);
  403. if (!queue_work(s->send_wq, &con->swork))
  404. conn_put(con);
  405. return 0;
  406. }
  407. void tipc_conn_terminate(struct tipc_server *s, int conid)
  408. {
  409. struct tipc_conn *con;
  410. con = tipc_conn_lookup(s, conid);
  411. if (con) {
  412. tipc_close_conn(con);
  413. conn_put(con);
  414. }
  415. }
  416. bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
  417. u32 lower, u32 upper, int *conid)
  418. {
  419. struct tipc_subscriber *scbr;
  420. struct tipc_subscr sub;
  421. struct tipc_server *s;
  422. struct tipc_conn *con;
  423. sub.seq.type = type;
  424. sub.seq.lower = lower;
  425. sub.seq.upper = upper;
  426. sub.timeout = TIPC_WAIT_FOREVER;
  427. sub.filter = TIPC_SUB_PORTS;
  428. *(u32 *)&sub.usr_handle = port;
  429. con = tipc_alloc_conn(tipc_topsrv(net));
  430. if (IS_ERR(con))
  431. return false;
  432. *conid = con->conid;
  433. s = con->server;
  434. scbr = s->tipc_conn_new(*conid);
  435. if (!scbr) {
  436. conn_put(con);
  437. return false;
  438. }
  439. con->usr_data = scbr;
  440. con->sock = NULL;
  441. s->tipc_conn_recvmsg(net, *conid, NULL, scbr, &sub, sizeof(sub));
  442. return true;
  443. }
  444. void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
  445. {
  446. struct tipc_conn *con;
  447. con = tipc_conn_lookup(tipc_topsrv(net), conid);
  448. if (!con)
  449. return;
  450. tipc_close_conn(con);
  451. conn_put(con);
  452. }
  453. static void tipc_send_kern_top_evt(struct net *net, struct tipc_event *evt)
  454. {
  455. u32 port = *(u32 *)&evt->s.usr_handle;
  456. u32 self = tipc_own_addr(net);
  457. struct sk_buff_head evtq;
  458. struct sk_buff *skb;
  459. skb = tipc_msg_create(TOP_SRV, 0, INT_H_SIZE, sizeof(*evt),
  460. self, self, port, port, 0);
  461. if (!skb)
  462. return;
  463. msg_set_dest_droppable(buf_msg(skb), true);
  464. memcpy(msg_data(buf_msg(skb)), evt, sizeof(*evt));
  465. skb_queue_head_init(&evtq);
  466. __skb_queue_tail(&evtq, skb);
  467. tipc_sk_rcv(net, &evtq);
  468. }
  469. static void tipc_send_to_sock(struct tipc_conn *con)
  470. {
  471. struct tipc_server *s = con->server;
  472. struct outqueue_entry *e;
  473. struct tipc_event *evt;
  474. struct msghdr msg;
  475. int count = 0;
  476. int ret;
  477. spin_lock_bh(&con->outqueue_lock);
  478. while (test_bit(CF_CONNECTED, &con->flags)) {
  479. e = list_entry(con->outqueue.next, struct outqueue_entry, list);
  480. if ((struct list_head *) e == &con->outqueue)
  481. break;
  482. spin_unlock_bh(&con->outqueue_lock);
  483. if (con->sock) {
  484. memset(&msg, 0, sizeof(msg));
  485. msg.msg_flags = MSG_DONTWAIT;
  486. if (s->type == SOCK_DGRAM || s->type == SOCK_RDM) {
  487. msg.msg_name = &e->dest;
  488. msg.msg_namelen = sizeof(struct sockaddr_tipc);
  489. }
  490. ret = kernel_sendmsg(con->sock, &msg, &e->iov, 1,
  491. e->iov.iov_len);
  492. if (ret == -EWOULDBLOCK || ret == 0) {
  493. cond_resched();
  494. goto out;
  495. } else if (ret < 0) {
  496. goto send_err;
  497. }
  498. } else {
  499. evt = e->iov.iov_base;
  500. tipc_send_kern_top_evt(s->net, evt);
  501. }
  502. /* Don't starve users filling buffers */
  503. if (++count >= MAX_SEND_MSG_COUNT) {
  504. cond_resched();
  505. count = 0;
  506. }
  507. spin_lock_bh(&con->outqueue_lock);
  508. list_del(&e->list);
  509. tipc_free_entry(e);
  510. }
  511. spin_unlock_bh(&con->outqueue_lock);
  512. out:
  513. return;
  514. send_err:
  515. tipc_close_conn(con);
  516. }
  517. static void tipc_recv_work(struct work_struct *work)
  518. {
  519. struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
  520. int count = 0;
  521. while (test_bit(CF_CONNECTED, &con->flags)) {
  522. if (con->rx_action(con))
  523. break;
  524. /* Don't flood Rx machine */
  525. if (++count >= MAX_RECV_MSG_COUNT) {
  526. cond_resched();
  527. count = 0;
  528. }
  529. }
  530. conn_put(con);
  531. }
  532. static void tipc_send_work(struct work_struct *work)
  533. {
  534. struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
  535. if (test_bit(CF_CONNECTED, &con->flags))
  536. tipc_send_to_sock(con);
  537. conn_put(con);
  538. }
  539. static void tipc_work_stop(struct tipc_server *s)
  540. {
  541. destroy_workqueue(s->rcv_wq);
  542. destroy_workqueue(s->send_wq);
  543. }
  544. static int tipc_work_start(struct tipc_server *s)
  545. {
  546. s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
  547. if (!s->rcv_wq) {
  548. pr_err("can't start tipc receive workqueue\n");
  549. return -ENOMEM;
  550. }
  551. s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
  552. if (!s->send_wq) {
  553. pr_err("can't start tipc send workqueue\n");
  554. destroy_workqueue(s->rcv_wq);
  555. return -ENOMEM;
  556. }
  557. return 0;
  558. }
  559. int tipc_server_start(struct tipc_server *s)
  560. {
  561. int ret;
  562. spin_lock_init(&s->idr_lock);
  563. idr_init(&s->conn_idr);
  564. s->idr_in_use = 0;
  565. s->rcvbuf_cache = kmem_cache_create(s->name, s->max_rcvbuf_size,
  566. 0, SLAB_HWCACHE_ALIGN, NULL);
  567. if (!s->rcvbuf_cache)
  568. return -ENOMEM;
  569. ret = tipc_work_start(s);
  570. if (ret < 0) {
  571. kmem_cache_destroy(s->rcvbuf_cache);
  572. return ret;
  573. }
  574. ret = tipc_open_listening_sock(s);
  575. if (ret < 0) {
  576. tipc_work_stop(s);
  577. kmem_cache_destroy(s->rcvbuf_cache);
  578. return ret;
  579. }
  580. return ret;
  581. }
  582. void tipc_server_stop(struct tipc_server *s)
  583. {
  584. struct tipc_conn *con;
  585. int id;
  586. spin_lock_bh(&s->idr_lock);
  587. for (id = 0; s->idr_in_use; id++) {
  588. con = idr_find(&s->conn_idr, id);
  589. if (con) {
  590. spin_unlock_bh(&s->idr_lock);
  591. tipc_close_conn(con);
  592. spin_lock_bh(&s->idr_lock);
  593. }
  594. }
  595. spin_unlock_bh(&s->idr_lock);
  596. tipc_work_stop(s);
  597. kmem_cache_destroy(s->rcvbuf_cache);
  598. idr_destroy(&s->conn_idr);
  599. }