proto.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /*
  2. * net/dccp/proto.c
  3. *
  4. * An implementation of the DCCP protocol
  5. * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/config.h>
  12. #include <linux/dccp.h>
  13. #include <linux/module.h>
  14. #include <linux/types.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/in.h>
  20. #include <linux/if_arp.h>
  21. #include <linux/init.h>
  22. #include <linux/random.h>
  23. #include <net/checksum.h>
  24. #include <net/inet_common.h>
  25. #include <net/ip.h>
  26. #include <net/protocol.h>
  27. #include <net/sock.h>
  28. #include <net/xfrm.h>
  29. #include <asm/semaphore.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/timer.h>
  32. #include <linux/delay.h>
  33. #include <linux/poll.h>
  34. #include <linux/dccp.h>
  35. #include "ccid.h"
  36. #include "dccp.h"
  37. DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics);
  38. atomic_t dccp_orphan_count = ATOMIC_INIT(0);
  39. static struct net_protocol dccp_protocol = {
  40. .handler = dccp_v4_rcv,
  41. .err_handler = dccp_v4_err,
  42. };
  43. const char *dccp_packet_name(const int type)
  44. {
  45. static const char *dccp_packet_names[] = {
  46. [DCCP_PKT_REQUEST] = "REQUEST",
  47. [DCCP_PKT_RESPONSE] = "RESPONSE",
  48. [DCCP_PKT_DATA] = "DATA",
  49. [DCCP_PKT_ACK] = "ACK",
  50. [DCCP_PKT_DATAACK] = "DATAACK",
  51. [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
  52. [DCCP_PKT_CLOSE] = "CLOSE",
  53. [DCCP_PKT_RESET] = "RESET",
  54. [DCCP_PKT_SYNC] = "SYNC",
  55. [DCCP_PKT_SYNCACK] = "SYNCACK",
  56. };
  57. if (type >= DCCP_NR_PKT_TYPES)
  58. return "INVALID";
  59. else
  60. return dccp_packet_names[type];
  61. }
  62. EXPORT_SYMBOL_GPL(dccp_packet_name);
  63. const char *dccp_state_name(const int state)
  64. {
  65. static char *dccp_state_names[] = {
  66. [DCCP_OPEN] = "OPEN",
  67. [DCCP_REQUESTING] = "REQUESTING",
  68. [DCCP_PARTOPEN] = "PARTOPEN",
  69. [DCCP_LISTEN] = "LISTEN",
  70. [DCCP_RESPOND] = "RESPOND",
  71. [DCCP_CLOSING] = "CLOSING",
  72. [DCCP_TIME_WAIT] = "TIME_WAIT",
  73. [DCCP_CLOSED] = "CLOSED",
  74. };
  75. if (state >= DCCP_MAX_STATES)
  76. return "INVALID STATE!";
  77. else
  78. return dccp_state_names[state];
  79. }
  80. EXPORT_SYMBOL_GPL(dccp_state_name);
  81. static inline int dccp_listen_start(struct sock *sk)
  82. {
  83. dccp_sk(sk)->dccps_role = DCCP_ROLE_LISTEN;
  84. return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
  85. }
  86. int dccp_disconnect(struct sock *sk, int flags)
  87. {
  88. struct inet_connection_sock *icsk = inet_csk(sk);
  89. struct inet_sock *inet = inet_sk(sk);
  90. int err = 0;
  91. const int old_state = sk->sk_state;
  92. if (old_state != DCCP_CLOSED)
  93. dccp_set_state(sk, DCCP_CLOSED);
  94. /* ABORT function of RFC793 */
  95. if (old_state == DCCP_LISTEN) {
  96. inet_csk_listen_stop(sk);
  97. /* FIXME: do the active reset thing */
  98. } else if (old_state == DCCP_REQUESTING)
  99. sk->sk_err = ECONNRESET;
  100. dccp_clear_xmit_timers(sk);
  101. __skb_queue_purge(&sk->sk_receive_queue);
  102. if (sk->sk_send_head != NULL) {
  103. __kfree_skb(sk->sk_send_head);
  104. sk->sk_send_head = NULL;
  105. }
  106. inet->dport = 0;
  107. if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
  108. inet_reset_saddr(sk);
  109. sk->sk_shutdown = 0;
  110. sock_reset_flag(sk, SOCK_DONE);
  111. icsk->icsk_backoff = 0;
  112. inet_csk_delack_init(sk);
  113. __sk_dst_reset(sk);
  114. BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
  115. sk->sk_error_report(sk);
  116. return err;
  117. }
  118. int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  119. {
  120. dccp_pr_debug("entry\n");
  121. return -ENOIOCTLCMD;
  122. }
  123. int dccp_setsockopt(struct sock *sk, int level, int optname,
  124. char __user *optval, int optlen)
  125. {
  126. dccp_pr_debug("entry\n");
  127. if (level != SOL_DCCP)
  128. return ip_setsockopt(sk, level, optname, optval, optlen);
  129. return -EOPNOTSUPP;
  130. }
  131. int dccp_getsockopt(struct sock *sk, int level, int optname,
  132. char __user *optval, int __user *optlen)
  133. {
  134. dccp_pr_debug("entry\n");
  135. if (level != SOL_DCCP)
  136. return ip_getsockopt(sk, level, optname, optval, optlen);
  137. return -EOPNOTSUPP;
  138. }
  139. int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  140. size_t len)
  141. {
  142. const struct dccp_sock *dp = dccp_sk(sk);
  143. const int flags = msg->msg_flags;
  144. const int noblock = flags & MSG_DONTWAIT;
  145. struct sk_buff *skb;
  146. int rc, size;
  147. long timeo;
  148. if (len > dp->dccps_mss_cache)
  149. return -EMSGSIZE;
  150. lock_sock(sk);
  151. timeo = sock_sndtimeo(sk, noblock);
  152. /*
  153. * We have to use sk_stream_wait_connect here to set sk_write_pending,
  154. * so that the trick in dccp_rcv_request_sent_state_process.
  155. */
  156. /* Wait for a connection to finish. */
  157. if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
  158. if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
  159. goto out_release;
  160. size = sk->sk_prot->max_header + len;
  161. release_sock(sk);
  162. skb = sock_alloc_send_skb(sk, size, noblock, &rc);
  163. lock_sock(sk);
  164. if (skb == NULL)
  165. goto out_release;
  166. skb_reserve(skb, sk->sk_prot->max_header);
  167. rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
  168. if (rc != 0)
  169. goto out_discard;
  170. rc = dccp_write_xmit(sk, skb, len);
  171. out_release:
  172. release_sock(sk);
  173. return rc ? : len;
  174. out_discard:
  175. kfree_skb(skb);
  176. goto out_release;
  177. }
  178. int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  179. size_t len, int nonblock, int flags, int *addr_len)
  180. {
  181. const struct dccp_hdr *dh;
  182. long timeo;
  183. lock_sock(sk);
  184. if (sk->sk_state == DCCP_LISTEN) {
  185. len = -ENOTCONN;
  186. goto out;
  187. }
  188. timeo = sock_rcvtimeo(sk, nonblock);
  189. do {
  190. struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
  191. if (skb == NULL)
  192. goto verify_sock_status;
  193. dh = dccp_hdr(skb);
  194. if (dh->dccph_type == DCCP_PKT_DATA ||
  195. dh->dccph_type == DCCP_PKT_DATAACK)
  196. goto found_ok_skb;
  197. if (dh->dccph_type == DCCP_PKT_RESET ||
  198. dh->dccph_type == DCCP_PKT_CLOSE) {
  199. dccp_pr_debug("found fin ok!\n");
  200. len = 0;
  201. goto found_fin_ok;
  202. }
  203. dccp_pr_debug("packet_type=%s\n",
  204. dccp_packet_name(dh->dccph_type));
  205. sk_eat_skb(sk, skb);
  206. verify_sock_status:
  207. if (sock_flag(sk, SOCK_DONE)) {
  208. len = 0;
  209. break;
  210. }
  211. if (sk->sk_err) {
  212. len = sock_error(sk);
  213. break;
  214. }
  215. if (sk->sk_shutdown & RCV_SHUTDOWN) {
  216. len = 0;
  217. break;
  218. }
  219. if (sk->sk_state == DCCP_CLOSED) {
  220. if (!sock_flag(sk, SOCK_DONE)) {
  221. /* This occurs when user tries to read
  222. * from never connected socket.
  223. */
  224. len = -ENOTCONN;
  225. break;
  226. }
  227. len = 0;
  228. break;
  229. }
  230. if (!timeo) {
  231. len = -EAGAIN;
  232. break;
  233. }
  234. if (signal_pending(current)) {
  235. len = sock_intr_errno(timeo);
  236. break;
  237. }
  238. sk_wait_data(sk, &timeo);
  239. continue;
  240. found_ok_skb:
  241. if (len > skb->len)
  242. len = skb->len;
  243. else if (len < skb->len)
  244. msg->msg_flags |= MSG_TRUNC;
  245. if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
  246. /* Exception. Bailout! */
  247. len = -EFAULT;
  248. break;
  249. }
  250. found_fin_ok:
  251. if (!(flags & MSG_PEEK))
  252. sk_eat_skb(sk, skb);
  253. break;
  254. } while (1);
  255. out:
  256. release_sock(sk);
  257. return len;
  258. }
  259. static int inet_dccp_listen(struct socket *sock, int backlog)
  260. {
  261. struct sock *sk = sock->sk;
  262. unsigned char old_state;
  263. int err;
  264. lock_sock(sk);
  265. err = -EINVAL;
  266. if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
  267. goto out;
  268. old_state = sk->sk_state;
  269. if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
  270. goto out;
  271. /* Really, if the socket is already in listen state
  272. * we can only allow the backlog to be adjusted.
  273. */
  274. if (old_state != DCCP_LISTEN) {
  275. /*
  276. * FIXME: here it probably should be sk->sk_prot->listen_start
  277. * see tcp_listen_start
  278. */
  279. err = dccp_listen_start(sk);
  280. if (err)
  281. goto out;
  282. }
  283. sk->sk_max_ack_backlog = backlog;
  284. err = 0;
  285. out:
  286. release_sock(sk);
  287. return err;
  288. }
  289. static const unsigned char dccp_new_state[] = {
  290. /* current state: new state: action: */
  291. [0] = DCCP_CLOSED,
  292. [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
  293. [DCCP_REQUESTING] = DCCP_CLOSED,
  294. [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
  295. [DCCP_LISTEN] = DCCP_CLOSED,
  296. [DCCP_RESPOND] = DCCP_CLOSED,
  297. [DCCP_CLOSING] = DCCP_CLOSED,
  298. [DCCP_TIME_WAIT] = DCCP_CLOSED,
  299. [DCCP_CLOSED] = DCCP_CLOSED,
  300. };
  301. static int dccp_close_state(struct sock *sk)
  302. {
  303. const int next = dccp_new_state[sk->sk_state];
  304. const int ns = next & DCCP_STATE_MASK;
  305. if (ns != sk->sk_state)
  306. dccp_set_state(sk, ns);
  307. return next & DCCP_ACTION_FIN;
  308. }
  309. void dccp_close(struct sock *sk, long timeout)
  310. {
  311. struct sk_buff *skb;
  312. lock_sock(sk);
  313. sk->sk_shutdown = SHUTDOWN_MASK;
  314. if (sk->sk_state == DCCP_LISTEN) {
  315. dccp_set_state(sk, DCCP_CLOSED);
  316. /* Special case. */
  317. inet_csk_listen_stop(sk);
  318. goto adjudge_to_death;
  319. }
  320. /*
  321. * We need to flush the recv. buffs. We do this only on the
  322. * descriptor close, not protocol-sourced closes, because the
  323. *reader process may not have drained the data yet!
  324. */
  325. /* FIXME: check for unread data */
  326. while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
  327. __kfree_skb(skb);
  328. }
  329. if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
  330. /* Check zero linger _after_ checking for unread data. */
  331. sk->sk_prot->disconnect(sk, 0);
  332. } else if (dccp_close_state(sk)) {
  333. dccp_send_close(sk, 1);
  334. }
  335. sk_stream_wait_close(sk, timeout);
  336. adjudge_to_death:
  337. /*
  338. * It is the last release_sock in its life. It will remove backlog.
  339. */
  340. release_sock(sk);
  341. /*
  342. * Now socket is owned by kernel and we acquire BH lock
  343. * to finish close. No need to check for user refs.
  344. */
  345. local_bh_disable();
  346. bh_lock_sock(sk);
  347. BUG_TRAP(!sock_owned_by_user(sk));
  348. sock_hold(sk);
  349. sock_orphan(sk);
  350. /*
  351. * The last release_sock may have processed the CLOSE or RESET
  352. * packet moving sock to CLOSED state, if not we have to fire
  353. * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
  354. * in draft-ietf-dccp-spec-11. -acme
  355. */
  356. if (sk->sk_state == DCCP_CLOSING) {
  357. /* FIXME: should start at 2 * RTT */
  358. /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
  359. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  360. inet_csk(sk)->icsk_rto,
  361. DCCP_RTO_MAX);
  362. #if 0
  363. /* Yeah, we should use sk->sk_prot->orphan_count, etc */
  364. dccp_set_state(sk, DCCP_CLOSED);
  365. #endif
  366. }
  367. atomic_inc(sk->sk_prot->orphan_count);
  368. if (sk->sk_state == DCCP_CLOSED)
  369. inet_csk_destroy_sock(sk);
  370. /* Otherwise, socket is reprieved until protocol close. */
  371. bh_unlock_sock(sk);
  372. local_bh_enable();
  373. sock_put(sk);
  374. }
  375. void dccp_shutdown(struct sock *sk, int how)
  376. {
  377. dccp_pr_debug("entry\n");
  378. }
  379. static struct proto_ops inet_dccp_ops = {
  380. .family = PF_INET,
  381. .owner = THIS_MODULE,
  382. .release = inet_release,
  383. .bind = inet_bind,
  384. .connect = inet_stream_connect,
  385. .socketpair = sock_no_socketpair,
  386. .accept = inet_accept,
  387. .getname = inet_getname,
  388. .poll = sock_no_poll,
  389. .ioctl = inet_ioctl,
  390. /* FIXME: work on inet_listen to rename it to sock_common_listen */
  391. .listen = inet_dccp_listen,
  392. .shutdown = inet_shutdown,
  393. .setsockopt = sock_common_setsockopt,
  394. .getsockopt = sock_common_getsockopt,
  395. .sendmsg = inet_sendmsg,
  396. .recvmsg = sock_common_recvmsg,
  397. .mmap = sock_no_mmap,
  398. .sendpage = sock_no_sendpage,
  399. };
  400. extern struct net_proto_family inet_family_ops;
  401. static struct inet_protosw dccp_v4_protosw = {
  402. .type = SOCK_DCCP,
  403. .protocol = IPPROTO_DCCP,
  404. .prot = &dccp_v4_prot,
  405. .ops = &inet_dccp_ops,
  406. .capability = -1,
  407. .no_check = 0,
  408. .flags = 0,
  409. };
  410. /*
  411. * This is the global socket data structure used for responding to
  412. * the Out-of-the-blue (OOTB) packets. A control sock will be created
  413. * for this socket at the initialization time.
  414. */
  415. struct socket *dccp_ctl_socket;
  416. static char dccp_ctl_socket_err_msg[] __initdata =
  417. KERN_ERR "DCCP: Failed to create the control socket.\n";
  418. static int __init dccp_ctl_sock_init(void)
  419. {
  420. int rc = sock_create_kern(PF_INET, SOCK_DCCP, IPPROTO_DCCP,
  421. &dccp_ctl_socket);
  422. if (rc < 0)
  423. printk(dccp_ctl_socket_err_msg);
  424. else {
  425. dccp_ctl_socket->sk->sk_allocation = GFP_ATOMIC;
  426. inet_sk(dccp_ctl_socket->sk)->uc_ttl = -1;
  427. /* Unhash it so that IP input processing does not even
  428. * see it, we do not wish this socket to see incoming
  429. * packets.
  430. */
  431. dccp_ctl_socket->sk->sk_prot->unhash(dccp_ctl_socket->sk);
  432. }
  433. return rc;
  434. }
  435. #ifdef CONFIG_IP_DCCP_UNLOAD_HACK
  436. void dccp_ctl_sock_exit(void)
  437. {
  438. if (dccp_ctl_socket != NULL) {
  439. sock_release(dccp_ctl_socket);
  440. dccp_ctl_socket = NULL;
  441. }
  442. }
  443. EXPORT_SYMBOL_GPL(dccp_ctl_sock_exit);
  444. #endif
  445. static int __init init_dccp_v4_mibs(void)
  446. {
  447. int rc = -ENOMEM;
  448. dccp_statistics[0] = alloc_percpu(struct dccp_mib);
  449. if (dccp_statistics[0] == NULL)
  450. goto out;
  451. dccp_statistics[1] = alloc_percpu(struct dccp_mib);
  452. if (dccp_statistics[1] == NULL)
  453. goto out_free_one;
  454. rc = 0;
  455. out:
  456. return rc;
  457. out_free_one:
  458. free_percpu(dccp_statistics[0]);
  459. dccp_statistics[0] = NULL;
  460. goto out;
  461. }
  462. static int thash_entries;
  463. module_param(thash_entries, int, 0444);
  464. MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
  465. #ifdef CONFIG_IP_DCCP_DEBUG
  466. int dccp_debug;
  467. module_param(dccp_debug, int, 0444);
  468. MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
  469. #endif
  470. static int __init dccp_init(void)
  471. {
  472. unsigned long goal;
  473. int ehash_order, bhash_order, i;
  474. int rc = proto_register(&dccp_v4_prot, 1);
  475. if (rc)
  476. goto out;
  477. dccp_hashinfo.bind_bucket_cachep =
  478. kmem_cache_create("dccp_bind_bucket",
  479. sizeof(struct inet_bind_bucket), 0,
  480. SLAB_HWCACHE_ALIGN, NULL, NULL);
  481. if (!dccp_hashinfo.bind_bucket_cachep)
  482. goto out_proto_unregister;
  483. /*
  484. * Size and allocate the main established and bind bucket
  485. * hash tables.
  486. *
  487. * The methodology is similar to that of the buffer cache.
  488. */
  489. if (num_physpages >= (128 * 1024))
  490. goal = num_physpages >> (21 - PAGE_SHIFT);
  491. else
  492. goal = num_physpages >> (23 - PAGE_SHIFT);
  493. if (thash_entries)
  494. goal = (thash_entries *
  495. sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
  496. for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
  497. ;
  498. do {
  499. dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
  500. sizeof(struct inet_ehash_bucket);
  501. dccp_hashinfo.ehash_size >>= 1;
  502. while (dccp_hashinfo.ehash_size &
  503. (dccp_hashinfo.ehash_size - 1))
  504. dccp_hashinfo.ehash_size--;
  505. dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
  506. __get_free_pages(GFP_ATOMIC, ehash_order);
  507. } while (!dccp_hashinfo.ehash && --ehash_order > 0);
  508. if (!dccp_hashinfo.ehash) {
  509. printk(KERN_CRIT "Failed to allocate DCCP "
  510. "established hash table\n");
  511. goto out_free_bind_bucket_cachep;
  512. }
  513. for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
  514. rwlock_init(&dccp_hashinfo.ehash[i].lock);
  515. INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
  516. }
  517. bhash_order = ehash_order;
  518. do {
  519. dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
  520. sizeof(struct inet_bind_hashbucket);
  521. if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
  522. bhash_order > 0)
  523. continue;
  524. dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
  525. __get_free_pages(GFP_ATOMIC, bhash_order);
  526. } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
  527. if (!dccp_hashinfo.bhash) {
  528. printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
  529. goto out_free_dccp_ehash;
  530. }
  531. for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
  532. spin_lock_init(&dccp_hashinfo.bhash[i].lock);
  533. INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
  534. }
  535. if (init_dccp_v4_mibs())
  536. goto out_free_dccp_bhash;
  537. rc = -EAGAIN;
  538. if (inet_add_protocol(&dccp_protocol, IPPROTO_DCCP))
  539. goto out_free_dccp_v4_mibs;
  540. inet_register_protosw(&dccp_v4_protosw);
  541. rc = dccp_ctl_sock_init();
  542. if (rc)
  543. goto out_unregister_protosw;
  544. out:
  545. return rc;
  546. out_unregister_protosw:
  547. inet_unregister_protosw(&dccp_v4_protosw);
  548. inet_del_protocol(&dccp_protocol, IPPROTO_DCCP);
  549. out_free_dccp_v4_mibs:
  550. free_percpu(dccp_statistics[0]);
  551. free_percpu(dccp_statistics[1]);
  552. dccp_statistics[0] = dccp_statistics[1] = NULL;
  553. out_free_dccp_bhash:
  554. free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
  555. dccp_hashinfo.bhash = NULL;
  556. out_free_dccp_ehash:
  557. free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
  558. dccp_hashinfo.ehash = NULL;
  559. out_free_bind_bucket_cachep:
  560. kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
  561. dccp_hashinfo.bind_bucket_cachep = NULL;
  562. out_proto_unregister:
  563. proto_unregister(&dccp_v4_prot);
  564. goto out;
  565. }
  566. static const char dccp_del_proto_err_msg[] __exitdata =
  567. KERN_ERR "can't remove dccp net_protocol\n";
  568. static void __exit dccp_fini(void)
  569. {
  570. inet_unregister_protosw(&dccp_v4_protosw);
  571. if (inet_del_protocol(&dccp_protocol, IPPROTO_DCCP) < 0)
  572. printk(dccp_del_proto_err_msg);
  573. free_percpu(dccp_statistics[0]);
  574. free_percpu(dccp_statistics[1]);
  575. free_pages((unsigned long)dccp_hashinfo.bhash,
  576. get_order(dccp_hashinfo.bhash_size *
  577. sizeof(struct inet_bind_hashbucket)));
  578. free_pages((unsigned long)dccp_hashinfo.ehash,
  579. get_order(dccp_hashinfo.ehash_size *
  580. sizeof(struct inet_ehash_bucket)));
  581. kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
  582. proto_unregister(&dccp_v4_prot);
  583. }
  584. module_init(dccp_init);
  585. module_exit(dccp_fini);
  586. /*
  587. * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
  588. * values directly, Also cover the case where the protocol is not specified,
  589. * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
  590. */
  591. MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6");
  592. MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6");
  593. MODULE_LICENSE("GPL");
  594. MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
  595. MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");