socket.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. /*
  2. * File: socket.c
  3. *
  4. * Phonet sockets
  5. *
  6. * Copyright (C) 2008 Nokia Corporation.
  7. *
  8. * Authors: Sakari Ailus <sakari.ailus@nokia.com>
  9. * Rémi Denis-Courmont
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * version 2 as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23. * 02110-1301 USA
  24. */
  25. #include <linux/gfp.h>
  26. #include <linux/kernel.h>
  27. #include <linux/net.h>
  28. #include <linux/poll.h>
  29. #include <linux/sched/signal.h>
  30. #include <net/sock.h>
  31. #include <net/tcp_states.h>
  32. #include <linux/phonet.h>
  33. #include <linux/export.h>
  34. #include <net/phonet/phonet.h>
  35. #include <net/phonet/pep.h>
  36. #include <net/phonet/pn_dev.h>
  37. static int pn_socket_release(struct socket *sock)
  38. {
  39. struct sock *sk = sock->sk;
  40. if (sk) {
  41. sock->sk = NULL;
  42. sk->sk_prot->close(sk, 0);
  43. }
  44. return 0;
  45. }
  46. #define PN_HASHSIZE 16
  47. #define PN_HASHMASK (PN_HASHSIZE-1)
  48. static struct {
  49. struct hlist_head hlist[PN_HASHSIZE];
  50. struct mutex lock;
  51. } pnsocks;
  52. void __init pn_sock_init(void)
  53. {
  54. unsigned int i;
  55. for (i = 0; i < PN_HASHSIZE; i++)
  56. INIT_HLIST_HEAD(pnsocks.hlist + i);
  57. mutex_init(&pnsocks.lock);
  58. }
  59. static struct hlist_head *pn_hash_list(u16 obj)
  60. {
  61. return pnsocks.hlist + (obj & PN_HASHMASK);
  62. }
  63. /*
  64. * Find address based on socket address, match only certain fields.
  65. * Also grab sock if it was found. Remember to sock_put it later.
  66. */
  67. struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
  68. {
  69. struct sock *sknode;
  70. struct sock *rval = NULL;
  71. u16 obj = pn_sockaddr_get_object(spn);
  72. u8 res = spn->spn_resource;
  73. struct hlist_head *hlist = pn_hash_list(obj);
  74. rcu_read_lock();
  75. sk_for_each_rcu(sknode, hlist) {
  76. struct pn_sock *pn = pn_sk(sknode);
  77. BUG_ON(!pn->sobject); /* unbound socket */
  78. if (!net_eq(sock_net(sknode), net))
  79. continue;
  80. if (pn_port(obj)) {
  81. /* Look up socket by port */
  82. if (pn_port(pn->sobject) != pn_port(obj))
  83. continue;
  84. } else {
  85. /* If port is zero, look up by resource */
  86. if (pn->resource != res)
  87. continue;
  88. }
  89. if (pn_addr(pn->sobject) &&
  90. pn_addr(pn->sobject) != pn_addr(obj))
  91. continue;
  92. rval = sknode;
  93. sock_hold(sknode);
  94. break;
  95. }
  96. rcu_read_unlock();
  97. return rval;
  98. }
  99. /* Deliver a broadcast packet (only in bottom-half) */
  100. void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
  101. {
  102. struct hlist_head *hlist = pnsocks.hlist;
  103. unsigned int h;
  104. rcu_read_lock();
  105. for (h = 0; h < PN_HASHSIZE; h++) {
  106. struct sock *sknode;
  107. sk_for_each(sknode, hlist) {
  108. struct sk_buff *clone;
  109. if (!net_eq(sock_net(sknode), net))
  110. continue;
  111. if (!sock_flag(sknode, SOCK_BROADCAST))
  112. continue;
  113. clone = skb_clone(skb, GFP_ATOMIC);
  114. if (clone) {
  115. sock_hold(sknode);
  116. sk_receive_skb(sknode, clone, 0);
  117. }
  118. }
  119. hlist++;
  120. }
  121. rcu_read_unlock();
  122. }
  123. int pn_sock_hash(struct sock *sk)
  124. {
  125. struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
  126. mutex_lock(&pnsocks.lock);
  127. sk_add_node_rcu(sk, hlist);
  128. mutex_unlock(&pnsocks.lock);
  129. return 0;
  130. }
  131. EXPORT_SYMBOL(pn_sock_hash);
  132. void pn_sock_unhash(struct sock *sk)
  133. {
  134. mutex_lock(&pnsocks.lock);
  135. sk_del_node_init_rcu(sk);
  136. mutex_unlock(&pnsocks.lock);
  137. pn_sock_unbind_all_res(sk);
  138. synchronize_rcu();
  139. }
  140. EXPORT_SYMBOL(pn_sock_unhash);
  141. static DEFINE_MUTEX(port_mutex);
  142. static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
  143. {
  144. struct sock *sk = sock->sk;
  145. struct pn_sock *pn = pn_sk(sk);
  146. struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
  147. int err;
  148. u16 handle;
  149. u8 saddr;
  150. if (sk->sk_prot->bind)
  151. return sk->sk_prot->bind(sk, addr, len);
  152. if (len < sizeof(struct sockaddr_pn))
  153. return -EINVAL;
  154. if (spn->spn_family != AF_PHONET)
  155. return -EAFNOSUPPORT;
  156. handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
  157. saddr = pn_addr(handle);
  158. if (saddr && phonet_address_lookup(sock_net(sk), saddr))
  159. return -EADDRNOTAVAIL;
  160. lock_sock(sk);
  161. if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
  162. err = -EINVAL; /* attempt to rebind */
  163. goto out;
  164. }
  165. WARN_ON(sk_hashed(sk));
  166. mutex_lock(&port_mutex);
  167. err = sk->sk_prot->get_port(sk, pn_port(handle));
  168. if (err)
  169. goto out_port;
  170. /* get_port() sets the port, bind() sets the address if applicable */
  171. pn->sobject = pn_object(saddr, pn_port(pn->sobject));
  172. pn->resource = spn->spn_resource;
  173. /* Enable RX on the socket */
  174. err = sk->sk_prot->hash(sk);
  175. out_port:
  176. mutex_unlock(&port_mutex);
  177. out:
  178. release_sock(sk);
  179. return err;
  180. }
  181. static int pn_socket_autobind(struct socket *sock)
  182. {
  183. struct sockaddr_pn sa;
  184. int err;
  185. memset(&sa, 0, sizeof(sa));
  186. sa.spn_family = AF_PHONET;
  187. err = pn_socket_bind(sock, (struct sockaddr *)&sa,
  188. sizeof(struct sockaddr_pn));
  189. if (err != -EINVAL)
  190. return err;
  191. BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
  192. return 0; /* socket was already bound */
  193. }
  194. static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
  195. int len, int flags)
  196. {
  197. struct sock *sk = sock->sk;
  198. struct pn_sock *pn = pn_sk(sk);
  199. struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
  200. struct task_struct *tsk = current;
  201. long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
  202. int err;
  203. if (pn_socket_autobind(sock))
  204. return -ENOBUFS;
  205. if (len < sizeof(struct sockaddr_pn))
  206. return -EINVAL;
  207. if (spn->spn_family != AF_PHONET)
  208. return -EAFNOSUPPORT;
  209. lock_sock(sk);
  210. switch (sock->state) {
  211. case SS_UNCONNECTED:
  212. if (sk->sk_state != TCP_CLOSE) {
  213. err = -EISCONN;
  214. goto out;
  215. }
  216. break;
  217. case SS_CONNECTING:
  218. err = -EALREADY;
  219. goto out;
  220. default:
  221. err = -EISCONN;
  222. goto out;
  223. }
  224. pn->dobject = pn_sockaddr_get_object(spn);
  225. pn->resource = pn_sockaddr_get_resource(spn);
  226. sock->state = SS_CONNECTING;
  227. err = sk->sk_prot->connect(sk, addr, len);
  228. if (err) {
  229. sock->state = SS_UNCONNECTED;
  230. pn->dobject = 0;
  231. goto out;
  232. }
  233. while (sk->sk_state == TCP_SYN_SENT) {
  234. DEFINE_WAIT(wait);
  235. if (!timeo) {
  236. err = -EINPROGRESS;
  237. goto out;
  238. }
  239. if (signal_pending(tsk)) {
  240. err = sock_intr_errno(timeo);
  241. goto out;
  242. }
  243. prepare_to_wait_exclusive(sk_sleep(sk), &wait,
  244. TASK_INTERRUPTIBLE);
  245. release_sock(sk);
  246. timeo = schedule_timeout(timeo);
  247. lock_sock(sk);
  248. finish_wait(sk_sleep(sk), &wait);
  249. }
  250. if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
  251. err = 0;
  252. else if (sk->sk_state == TCP_CLOSE_WAIT)
  253. err = -ECONNRESET;
  254. else
  255. err = -ECONNREFUSED;
  256. sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
  257. out:
  258. release_sock(sk);
  259. return err;
  260. }
  261. static int pn_socket_accept(struct socket *sock, struct socket *newsock,
  262. int flags, bool kern)
  263. {
  264. struct sock *sk = sock->sk;
  265. struct sock *newsk;
  266. int err;
  267. if (unlikely(sk->sk_state != TCP_LISTEN))
  268. return -EINVAL;
  269. newsk = sk->sk_prot->accept(sk, flags, &err, kern);
  270. if (!newsk)
  271. return err;
  272. lock_sock(newsk);
  273. sock_graft(newsk, newsock);
  274. newsock->state = SS_CONNECTED;
  275. release_sock(newsk);
  276. return 0;
  277. }
  278. static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
  279. int peer)
  280. {
  281. struct sock *sk = sock->sk;
  282. struct pn_sock *pn = pn_sk(sk);
  283. memset(addr, 0, sizeof(struct sockaddr_pn));
  284. addr->sa_family = AF_PHONET;
  285. if (!peer) /* Race with bind() here is userland's problem. */
  286. pn_sockaddr_set_object((struct sockaddr_pn *)addr,
  287. pn->sobject);
  288. return sizeof(struct sockaddr_pn);
  289. }
  290. static __poll_t pn_socket_poll_mask(struct socket *sock, __poll_t events)
  291. {
  292. struct sock *sk = sock->sk;
  293. struct pep_sock *pn = pep_sk(sk);
  294. __poll_t mask = 0;
  295. if (sk->sk_state == TCP_CLOSE)
  296. return EPOLLERR;
  297. if (!skb_queue_empty(&sk->sk_receive_queue))
  298. mask |= EPOLLIN | EPOLLRDNORM;
  299. if (!skb_queue_empty(&pn->ctrlreq_queue))
  300. mask |= EPOLLPRI;
  301. if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
  302. return EPOLLHUP;
  303. if (sk->sk_state == TCP_ESTABLISHED &&
  304. refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
  305. atomic_read(&pn->tx_credits))
  306. mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
  307. return mask;
  308. }
  309. static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
  310. unsigned long arg)
  311. {
  312. struct sock *sk = sock->sk;
  313. struct pn_sock *pn = pn_sk(sk);
  314. if (cmd == SIOCPNGETOBJECT) {
  315. struct net_device *dev;
  316. u16 handle;
  317. u8 saddr;
  318. if (get_user(handle, (__u16 __user *)arg))
  319. return -EFAULT;
  320. lock_sock(sk);
  321. if (sk->sk_bound_dev_if)
  322. dev = dev_get_by_index(sock_net(sk),
  323. sk->sk_bound_dev_if);
  324. else
  325. dev = phonet_device_get(sock_net(sk));
  326. if (dev && (dev->flags & IFF_UP))
  327. saddr = phonet_address_get(dev, pn_addr(handle));
  328. else
  329. saddr = PN_NO_ADDR;
  330. release_sock(sk);
  331. if (dev)
  332. dev_put(dev);
  333. if (saddr == PN_NO_ADDR)
  334. return -EHOSTUNREACH;
  335. handle = pn_object(saddr, pn_port(pn->sobject));
  336. return put_user(handle, (__u16 __user *)arg);
  337. }
  338. return sk->sk_prot->ioctl(sk, cmd, arg);
  339. }
  340. static int pn_socket_listen(struct socket *sock, int backlog)
  341. {
  342. struct sock *sk = sock->sk;
  343. int err = 0;
  344. if (pn_socket_autobind(sock))
  345. return -ENOBUFS;
  346. lock_sock(sk);
  347. if (sock->state != SS_UNCONNECTED) {
  348. err = -EINVAL;
  349. goto out;
  350. }
  351. if (sk->sk_state != TCP_LISTEN) {
  352. sk->sk_state = TCP_LISTEN;
  353. sk->sk_ack_backlog = 0;
  354. }
  355. sk->sk_max_ack_backlog = backlog;
  356. out:
  357. release_sock(sk);
  358. return err;
  359. }
  360. static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
  361. size_t total_len)
  362. {
  363. struct sock *sk = sock->sk;
  364. if (pn_socket_autobind(sock))
  365. return -EAGAIN;
  366. return sk->sk_prot->sendmsg(sk, m, total_len);
  367. }
  368. const struct proto_ops phonet_dgram_ops = {
  369. .family = AF_PHONET,
  370. .owner = THIS_MODULE,
  371. .release = pn_socket_release,
  372. .bind = pn_socket_bind,
  373. .connect = sock_no_connect,
  374. .socketpair = sock_no_socketpair,
  375. .accept = sock_no_accept,
  376. .getname = pn_socket_getname,
  377. .poll_mask = datagram_poll_mask,
  378. .ioctl = pn_socket_ioctl,
  379. .listen = sock_no_listen,
  380. .shutdown = sock_no_shutdown,
  381. .setsockopt = sock_no_setsockopt,
  382. .getsockopt = sock_no_getsockopt,
  383. #ifdef CONFIG_COMPAT
  384. .compat_setsockopt = sock_no_setsockopt,
  385. .compat_getsockopt = sock_no_getsockopt,
  386. #endif
  387. .sendmsg = pn_socket_sendmsg,
  388. .recvmsg = sock_common_recvmsg,
  389. .mmap = sock_no_mmap,
  390. .sendpage = sock_no_sendpage,
  391. };
  392. const struct proto_ops phonet_stream_ops = {
  393. .family = AF_PHONET,
  394. .owner = THIS_MODULE,
  395. .release = pn_socket_release,
  396. .bind = pn_socket_bind,
  397. .connect = pn_socket_connect,
  398. .socketpair = sock_no_socketpair,
  399. .accept = pn_socket_accept,
  400. .getname = pn_socket_getname,
  401. .poll_mask = pn_socket_poll_mask,
  402. .ioctl = pn_socket_ioctl,
  403. .listen = pn_socket_listen,
  404. .shutdown = sock_no_shutdown,
  405. .setsockopt = sock_common_setsockopt,
  406. .getsockopt = sock_common_getsockopt,
  407. #ifdef CONFIG_COMPAT
  408. .compat_setsockopt = compat_sock_common_setsockopt,
  409. .compat_getsockopt = compat_sock_common_getsockopt,
  410. #endif
  411. .sendmsg = pn_socket_sendmsg,
  412. .recvmsg = sock_common_recvmsg,
  413. .mmap = sock_no_mmap,
  414. .sendpage = sock_no_sendpage,
  415. };
  416. EXPORT_SYMBOL(phonet_stream_ops);
  417. /* allocate port for a socket */
  418. int pn_sock_get_port(struct sock *sk, unsigned short sport)
  419. {
  420. static int port_cur;
  421. struct net *net = sock_net(sk);
  422. struct pn_sock *pn = pn_sk(sk);
  423. struct sockaddr_pn try_sa;
  424. struct sock *tmpsk;
  425. memset(&try_sa, 0, sizeof(struct sockaddr_pn));
  426. try_sa.spn_family = AF_PHONET;
  427. WARN_ON(!mutex_is_locked(&port_mutex));
  428. if (!sport) {
  429. /* search free port */
  430. int port, pmin, pmax;
  431. phonet_get_local_port_range(&pmin, &pmax);
  432. for (port = pmin; port <= pmax; port++) {
  433. port_cur++;
  434. if (port_cur < pmin || port_cur > pmax)
  435. port_cur = pmin;
  436. pn_sockaddr_set_port(&try_sa, port_cur);
  437. tmpsk = pn_find_sock_by_sa(net, &try_sa);
  438. if (tmpsk == NULL) {
  439. sport = port_cur;
  440. goto found;
  441. } else
  442. sock_put(tmpsk);
  443. }
  444. } else {
  445. /* try to find specific port */
  446. pn_sockaddr_set_port(&try_sa, sport);
  447. tmpsk = pn_find_sock_by_sa(net, &try_sa);
  448. if (tmpsk == NULL)
  449. /* No sock there! We can use that port... */
  450. goto found;
  451. else
  452. sock_put(tmpsk);
  453. }
  454. /* the port must be in use already */
  455. return -EADDRINUSE;
  456. found:
  457. pn->sobject = pn_object(pn_addr(pn->sobject), sport);
  458. return 0;
  459. }
  460. EXPORT_SYMBOL(pn_sock_get_port);
  461. #ifdef CONFIG_PROC_FS
  462. static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
  463. {
  464. struct net *net = seq_file_net(seq);
  465. struct hlist_head *hlist = pnsocks.hlist;
  466. struct sock *sknode;
  467. unsigned int h;
  468. for (h = 0; h < PN_HASHSIZE; h++) {
  469. sk_for_each_rcu(sknode, hlist) {
  470. if (!net_eq(net, sock_net(sknode)))
  471. continue;
  472. if (!pos)
  473. return sknode;
  474. pos--;
  475. }
  476. hlist++;
  477. }
  478. return NULL;
  479. }
  480. static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
  481. {
  482. struct net *net = seq_file_net(seq);
  483. do
  484. sk = sk_next(sk);
  485. while (sk && !net_eq(net, sock_net(sk)));
  486. return sk;
  487. }
  488. static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
  489. __acquires(rcu)
  490. {
  491. rcu_read_lock();
  492. return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  493. }
  494. static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  495. {
  496. struct sock *sk;
  497. if (v == SEQ_START_TOKEN)
  498. sk = pn_sock_get_idx(seq, 0);
  499. else
  500. sk = pn_sock_get_next(seq, v);
  501. (*pos)++;
  502. return sk;
  503. }
  504. static void pn_sock_seq_stop(struct seq_file *seq, void *v)
  505. __releases(rcu)
  506. {
  507. rcu_read_unlock();
  508. }
  509. static int pn_sock_seq_show(struct seq_file *seq, void *v)
  510. {
  511. seq_setwidth(seq, 127);
  512. if (v == SEQ_START_TOKEN)
  513. seq_puts(seq, "pt loc rem rs st tx_queue rx_queue "
  514. " uid inode ref pointer drops");
  515. else {
  516. struct sock *sk = v;
  517. struct pn_sock *pn = pn_sk(sk);
  518. seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
  519. "%d %pK %d",
  520. sk->sk_protocol, pn->sobject, pn->dobject,
  521. pn->resource, sk->sk_state,
  522. sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
  523. from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
  524. sock_i_ino(sk),
  525. refcount_read(&sk->sk_refcnt), sk,
  526. atomic_read(&sk->sk_drops));
  527. }
  528. seq_pad(seq, '\n');
  529. return 0;
  530. }
  531. const struct seq_operations pn_sock_seq_ops = {
  532. .start = pn_sock_seq_start,
  533. .next = pn_sock_seq_next,
  534. .stop = pn_sock_seq_stop,
  535. .show = pn_sock_seq_show,
  536. };
  537. #endif
  538. static struct {
  539. struct sock *sk[256];
  540. } pnres;
  541. /*
  542. * Find and hold socket based on resource.
  543. */
  544. struct sock *pn_find_sock_by_res(struct net *net, u8 res)
  545. {
  546. struct sock *sk;
  547. if (!net_eq(net, &init_net))
  548. return NULL;
  549. rcu_read_lock();
  550. sk = rcu_dereference(pnres.sk[res]);
  551. if (sk)
  552. sock_hold(sk);
  553. rcu_read_unlock();
  554. return sk;
  555. }
  556. static DEFINE_MUTEX(resource_mutex);
  557. int pn_sock_bind_res(struct sock *sk, u8 res)
  558. {
  559. int ret = -EADDRINUSE;
  560. if (!net_eq(sock_net(sk), &init_net))
  561. return -ENOIOCTLCMD;
  562. if (!capable(CAP_SYS_ADMIN))
  563. return -EPERM;
  564. if (pn_socket_autobind(sk->sk_socket))
  565. return -EAGAIN;
  566. mutex_lock(&resource_mutex);
  567. if (pnres.sk[res] == NULL) {
  568. sock_hold(sk);
  569. rcu_assign_pointer(pnres.sk[res], sk);
  570. ret = 0;
  571. }
  572. mutex_unlock(&resource_mutex);
  573. return ret;
  574. }
  575. int pn_sock_unbind_res(struct sock *sk, u8 res)
  576. {
  577. int ret = -ENOENT;
  578. if (!capable(CAP_SYS_ADMIN))
  579. return -EPERM;
  580. mutex_lock(&resource_mutex);
  581. if (pnres.sk[res] == sk) {
  582. RCU_INIT_POINTER(pnres.sk[res], NULL);
  583. ret = 0;
  584. }
  585. mutex_unlock(&resource_mutex);
  586. if (ret == 0) {
  587. synchronize_rcu();
  588. sock_put(sk);
  589. }
  590. return ret;
  591. }
  592. void pn_sock_unbind_all_res(struct sock *sk)
  593. {
  594. unsigned int res, match = 0;
  595. mutex_lock(&resource_mutex);
  596. for (res = 0; res < 256; res++) {
  597. if (pnres.sk[res] == sk) {
  598. RCU_INIT_POINTER(pnres.sk[res], NULL);
  599. match++;
  600. }
  601. }
  602. mutex_unlock(&resource_mutex);
  603. while (match > 0) {
  604. __sock_put(sk);
  605. match--;
  606. }
  607. /* Caller is responsible for RCU sync before final sock_put() */
  608. }
  609. #ifdef CONFIG_PROC_FS
  610. static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
  611. {
  612. struct net *net = seq_file_net(seq);
  613. unsigned int i;
  614. if (!net_eq(net, &init_net))
  615. return NULL;
  616. for (i = 0; i < 256; i++) {
  617. if (pnres.sk[i] == NULL)
  618. continue;
  619. if (!pos)
  620. return pnres.sk + i;
  621. pos--;
  622. }
  623. return NULL;
  624. }
  625. static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
  626. {
  627. struct net *net = seq_file_net(seq);
  628. unsigned int i;
  629. BUG_ON(!net_eq(net, &init_net));
  630. for (i = (sk - pnres.sk) + 1; i < 256; i++)
  631. if (pnres.sk[i])
  632. return pnres.sk + i;
  633. return NULL;
  634. }
  635. static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
  636. __acquires(resource_mutex)
  637. {
  638. mutex_lock(&resource_mutex);
  639. return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  640. }
  641. static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  642. {
  643. struct sock **sk;
  644. if (v == SEQ_START_TOKEN)
  645. sk = pn_res_get_idx(seq, 0);
  646. else
  647. sk = pn_res_get_next(seq, v);
  648. (*pos)++;
  649. return sk;
  650. }
  651. static void pn_res_seq_stop(struct seq_file *seq, void *v)
  652. __releases(resource_mutex)
  653. {
  654. mutex_unlock(&resource_mutex);
  655. }
  656. static int pn_res_seq_show(struct seq_file *seq, void *v)
  657. {
  658. seq_setwidth(seq, 63);
  659. if (v == SEQ_START_TOKEN)
  660. seq_puts(seq, "rs uid inode");
  661. else {
  662. struct sock **psk = v;
  663. struct sock *sk = *psk;
  664. seq_printf(seq, "%02X %5u %lu",
  665. (int) (psk - pnres.sk),
  666. from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
  667. sock_i_ino(sk));
  668. }
  669. seq_pad(seq, '\n');
  670. return 0;
  671. }
  672. const struct seq_operations pn_res_seq_ops = {
  673. .start = pn_res_seq_start,
  674. .next = pn_res_seq_next,
  675. .stop = pn_res_seq_stop,
  676. .show = pn_res_seq_show,
  677. };
  678. #endif