inet_hashtables.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Generic INET transport hashtables
  7. *
  8. * Authors: Lotsa people, from code originally in tcp
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/random.h>
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <linux/wait.h>
  20. #include <linux/vmalloc.h>
  21. #include <net/inet_connection_sock.h>
  22. #include <net/inet_hashtables.h>
  23. #include <net/secure_seq.h>
  24. #include <net/ip.h>
  25. static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
  26. const __u16 lport, const __be32 faddr,
  27. const __be16 fport)
  28. {
  29. static u32 inet_ehash_secret __read_mostly;
  30. net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
  31. return __inet_ehashfn(laddr, lport, faddr, fport,
  32. inet_ehash_secret + net_hash_mix(net));
  33. }
  34. /* This function handles inet_sock, but also timewait and request sockets
  35. * for IPv4/IPv6.
  36. */
  37. u32 sk_ehashfn(const struct sock *sk)
  38. {
  39. #if IS_ENABLED(CONFIG_IPV6)
  40. if (sk->sk_family == AF_INET6 &&
  41. !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
  42. return inet6_ehashfn(sock_net(sk),
  43. &sk->sk_v6_rcv_saddr, sk->sk_num,
  44. &sk->sk_v6_daddr, sk->sk_dport);
  45. #endif
  46. return inet_ehashfn(sock_net(sk),
  47. sk->sk_rcv_saddr, sk->sk_num,
  48. sk->sk_daddr, sk->sk_dport);
  49. }
  50. /*
  51. * Allocate and initialize a new local port bind bucket.
  52. * The bindhash mutex for snum's hash chain must be held here.
  53. */
  54. struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
  55. struct net *net,
  56. struct inet_bind_hashbucket *head,
  57. const unsigned short snum)
  58. {
  59. struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
  60. if (tb) {
  61. write_pnet(&tb->ib_net, net);
  62. tb->port = snum;
  63. tb->fastreuse = 0;
  64. tb->fastreuseport = 0;
  65. tb->num_owners = 0;
  66. INIT_HLIST_HEAD(&tb->owners);
  67. hlist_add_head(&tb->node, &head->chain);
  68. }
  69. return tb;
  70. }
  71. /*
  72. * Caller must hold hashbucket lock for this tb with local BH disabled
  73. */
  74. void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
  75. {
  76. if (hlist_empty(&tb->owners)) {
  77. __hlist_del(&tb->node);
  78. kmem_cache_free(cachep, tb);
  79. }
  80. }
  81. void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
  82. const unsigned short snum)
  83. {
  84. inet_sk(sk)->inet_num = snum;
  85. sk_add_bind_node(sk, &tb->owners);
  86. tb->num_owners++;
  87. inet_csk(sk)->icsk_bind_hash = tb;
  88. }
  89. /*
  90. * Get rid of any references to a local port held by the given sock.
  91. */
  92. static void __inet_put_port(struct sock *sk)
  93. {
  94. struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  95. const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
  96. hashinfo->bhash_size);
  97. struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
  98. struct inet_bind_bucket *tb;
  99. spin_lock(&head->lock);
  100. tb = inet_csk(sk)->icsk_bind_hash;
  101. __sk_del_bind_node(sk);
  102. tb->num_owners--;
  103. inet_csk(sk)->icsk_bind_hash = NULL;
  104. inet_sk(sk)->inet_num = 0;
  105. inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
  106. spin_unlock(&head->lock);
  107. }
  108. void inet_put_port(struct sock *sk)
  109. {
  110. local_bh_disable();
  111. __inet_put_port(sk);
  112. local_bh_enable();
  113. }
  114. EXPORT_SYMBOL(inet_put_port);
  115. int __inet_inherit_port(const struct sock *sk, struct sock *child)
  116. {
  117. struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
  118. unsigned short port = inet_sk(child)->inet_num;
  119. const int bhash = inet_bhashfn(sock_net(sk), port,
  120. table->bhash_size);
  121. struct inet_bind_hashbucket *head = &table->bhash[bhash];
  122. struct inet_bind_bucket *tb;
  123. spin_lock(&head->lock);
  124. tb = inet_csk(sk)->icsk_bind_hash;
  125. if (unlikely(!tb)) {
  126. spin_unlock(&head->lock);
  127. return -ENOENT;
  128. }
  129. if (tb->port != port) {
  130. /* NOTE: using tproxy and redirecting skbs to a proxy
  131. * on a different listener port breaks the assumption
  132. * that the listener socket's icsk_bind_hash is the same
  133. * as that of the child socket. We have to look up or
  134. * create a new bind bucket for the child here. */
  135. inet_bind_bucket_for_each(tb, &head->chain) {
  136. if (net_eq(ib_net(tb), sock_net(sk)) &&
  137. tb->port == port)
  138. break;
  139. }
  140. if (!tb) {
  141. tb = inet_bind_bucket_create(table->bind_bucket_cachep,
  142. sock_net(sk), head, port);
  143. if (!tb) {
  144. spin_unlock(&head->lock);
  145. return -ENOMEM;
  146. }
  147. }
  148. }
  149. inet_bind_hash(child, tb, port);
  150. spin_unlock(&head->lock);
  151. return 0;
  152. }
  153. EXPORT_SYMBOL_GPL(__inet_inherit_port);
  154. static inline int compute_score(struct sock *sk, struct net *net,
  155. const unsigned short hnum, const __be32 daddr,
  156. const int dif)
  157. {
  158. int score = -1;
  159. struct inet_sock *inet = inet_sk(sk);
  160. if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
  161. !ipv6_only_sock(sk)) {
  162. __be32 rcv_saddr = inet->inet_rcv_saddr;
  163. score = sk->sk_family == PF_INET ? 2 : 1;
  164. if (rcv_saddr) {
  165. if (rcv_saddr != daddr)
  166. return -1;
  167. score += 4;
  168. }
  169. if (sk->sk_bound_dev_if) {
  170. if (sk->sk_bound_dev_if != dif)
  171. return -1;
  172. score += 4;
  173. }
  174. if (sk->sk_incoming_cpu == raw_smp_processor_id())
  175. score++;
  176. }
  177. return score;
  178. }
  179. /*
  180. * Don't inline this cruft. Here are some nice properties to exploit here. The
  181. * BSD API does not allow a listening sock to specify the remote port nor the
  182. * remote address for the connection. So always assume those are both
  183. * wildcarded during the search since they can never be otherwise.
  184. */
  185. struct sock *__inet_lookup_listener(struct net *net,
  186. struct inet_hashinfo *hashinfo,
  187. const __be32 saddr, __be16 sport,
  188. const __be32 daddr, const unsigned short hnum,
  189. const int dif)
  190. {
  191. struct sock *sk, *result;
  192. struct hlist_nulls_node *node;
  193. unsigned int hash = inet_lhashfn(net, hnum);
  194. struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
  195. int score, hiscore, matches = 0, reuseport = 0;
  196. u32 phash = 0;
  197. rcu_read_lock();
  198. begin:
  199. result = NULL;
  200. hiscore = 0;
  201. sk_nulls_for_each_rcu(sk, node, &ilb->head) {
  202. score = compute_score(sk, net, hnum, daddr, dif);
  203. if (score > hiscore) {
  204. result = sk;
  205. hiscore = score;
  206. reuseport = sk->sk_reuseport;
  207. if (reuseport) {
  208. phash = inet_ehashfn(net, daddr, hnum,
  209. saddr, sport);
  210. matches = 1;
  211. }
  212. } else if (score == hiscore && reuseport) {
  213. matches++;
  214. if (reciprocal_scale(phash, matches) == 0)
  215. result = sk;
  216. phash = next_pseudo_random32(phash);
  217. }
  218. }
  219. /*
  220. * if the nulls value we got at the end of this lookup is
  221. * not the expected one, we must restart lookup.
  222. * We probably met an item that was moved to another chain.
  223. */
  224. if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
  225. goto begin;
  226. if (result) {
  227. if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
  228. result = NULL;
  229. else if (unlikely(compute_score(result, net, hnum, daddr,
  230. dif) < hiscore)) {
  231. sock_put(result);
  232. goto begin;
  233. }
  234. }
  235. rcu_read_unlock();
  236. return result;
  237. }
  238. EXPORT_SYMBOL_GPL(__inet_lookup_listener);
  239. /* All sockets share common refcount, but have different destructors */
  240. void sock_gen_put(struct sock *sk)
  241. {
  242. if (!atomic_dec_and_test(&sk->sk_refcnt))
  243. return;
  244. if (sk->sk_state == TCP_TIME_WAIT)
  245. inet_twsk_free(inet_twsk(sk));
  246. else if (sk->sk_state == TCP_NEW_SYN_RECV)
  247. reqsk_free(inet_reqsk(sk));
  248. else
  249. sk_free(sk);
  250. }
  251. EXPORT_SYMBOL_GPL(sock_gen_put);
  252. void sock_edemux(struct sk_buff *skb)
  253. {
  254. sock_gen_put(skb->sk);
  255. }
  256. EXPORT_SYMBOL(sock_edemux);
  257. struct sock *__inet_lookup_established(struct net *net,
  258. struct inet_hashinfo *hashinfo,
  259. const __be32 saddr, const __be16 sport,
  260. const __be32 daddr, const u16 hnum,
  261. const int dif)
  262. {
  263. INET_ADDR_COOKIE(acookie, saddr, daddr);
  264. const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
  265. struct sock *sk;
  266. const struct hlist_nulls_node *node;
  267. /* Optimize here for direct hit, only listening connections can
  268. * have wildcards anyways.
  269. */
  270. unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
  271. unsigned int slot = hash & hashinfo->ehash_mask;
  272. struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
  273. rcu_read_lock();
  274. begin:
  275. sk_nulls_for_each_rcu(sk, node, &head->chain) {
  276. if (sk->sk_hash != hash)
  277. continue;
  278. if (likely(INET_MATCH(sk, net, acookie,
  279. saddr, daddr, ports, dif))) {
  280. if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
  281. goto out;
  282. if (unlikely(!INET_MATCH(sk, net, acookie,
  283. saddr, daddr, ports, dif))) {
  284. sock_gen_put(sk);
  285. goto begin;
  286. }
  287. goto found;
  288. }
  289. }
  290. /*
  291. * if the nulls value we got at the end of this lookup is
  292. * not the expected one, we must restart lookup.
  293. * We probably met an item that was moved to another chain.
  294. */
  295. if (get_nulls_value(node) != slot)
  296. goto begin;
  297. out:
  298. sk = NULL;
  299. found:
  300. rcu_read_unlock();
  301. return sk;
  302. }
  303. EXPORT_SYMBOL_GPL(__inet_lookup_established);
  304. /* called with local bh disabled */
  305. static int __inet_check_established(struct inet_timewait_death_row *death_row,
  306. struct sock *sk, __u16 lport,
  307. struct inet_timewait_sock **twp)
  308. {
  309. struct inet_hashinfo *hinfo = death_row->hashinfo;
  310. struct inet_sock *inet = inet_sk(sk);
  311. __be32 daddr = inet->inet_rcv_saddr;
  312. __be32 saddr = inet->inet_daddr;
  313. int dif = sk->sk_bound_dev_if;
  314. INET_ADDR_COOKIE(acookie, saddr, daddr);
  315. const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
  316. struct net *net = sock_net(sk);
  317. unsigned int hash = inet_ehashfn(net, daddr, lport,
  318. saddr, inet->inet_dport);
  319. struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
  320. spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
  321. struct sock *sk2;
  322. const struct hlist_nulls_node *node;
  323. struct inet_timewait_sock *tw = NULL;
  324. spin_lock(lock);
  325. sk_nulls_for_each(sk2, node, &head->chain) {
  326. if (sk2->sk_hash != hash)
  327. continue;
  328. if (likely(INET_MATCH(sk2, net, acookie,
  329. saddr, daddr, ports, dif))) {
  330. if (sk2->sk_state == TCP_TIME_WAIT) {
  331. tw = inet_twsk(sk2);
  332. if (twsk_unique(sk, sk2, twp))
  333. break;
  334. }
  335. goto not_unique;
  336. }
  337. }
  338. /* Must record num and sport now. Otherwise we will see
  339. * in hash table socket with a funny identity.
  340. */
  341. inet->inet_num = lport;
  342. inet->inet_sport = htons(lport);
  343. sk->sk_hash = hash;
  344. WARN_ON(!sk_unhashed(sk));
  345. __sk_nulls_add_node_rcu(sk, &head->chain);
  346. if (tw) {
  347. sk_nulls_del_node_init_rcu((struct sock *)tw);
  348. NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
  349. }
  350. spin_unlock(lock);
  351. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  352. if (twp) {
  353. *twp = tw;
  354. } else if (tw) {
  355. /* Silly. Should hash-dance instead... */
  356. inet_twsk_deschedule_put(tw);
  357. }
  358. return 0;
  359. not_unique:
  360. spin_unlock(lock);
  361. return -EADDRNOTAVAIL;
  362. }
  363. static u32 inet_sk_port_offset(const struct sock *sk)
  364. {
  365. const struct inet_sock *inet = inet_sk(sk);
  366. return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
  367. inet->inet_daddr,
  368. inet->inet_dport);
  369. }
  370. /* insert a socket into ehash, and eventually remove another one
  371. * (The another one can be a SYN_RECV or TIMEWAIT
  372. */
  373. bool inet_ehash_insert(struct sock *sk, struct sock *osk)
  374. {
  375. struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  376. struct hlist_nulls_head *list;
  377. struct inet_ehash_bucket *head;
  378. spinlock_t *lock;
  379. bool ret = true;
  380. WARN_ON_ONCE(!sk_unhashed(sk));
  381. sk->sk_hash = sk_ehashfn(sk);
  382. head = inet_ehash_bucket(hashinfo, sk->sk_hash);
  383. list = &head->chain;
  384. lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
  385. spin_lock(lock);
  386. if (osk) {
  387. WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
  388. ret = sk_nulls_del_node_init_rcu(osk);
  389. }
  390. if (ret)
  391. __sk_nulls_add_node_rcu(sk, list);
  392. spin_unlock(lock);
  393. return ret;
  394. }
  395. bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
  396. {
  397. bool ok = inet_ehash_insert(sk, osk);
  398. if (ok) {
  399. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  400. } else {
  401. percpu_counter_inc(sk->sk_prot->orphan_count);
  402. sk->sk_state = TCP_CLOSE;
  403. sock_set_flag(sk, SOCK_DEAD);
  404. inet_csk_destroy_sock(sk);
  405. }
  406. return ok;
  407. }
  408. EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
  409. void __inet_hash(struct sock *sk, struct sock *osk)
  410. {
  411. struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  412. struct inet_listen_hashbucket *ilb;
  413. if (sk->sk_state != TCP_LISTEN) {
  414. inet_ehash_nolisten(sk, osk);
  415. return;
  416. }
  417. WARN_ON(!sk_unhashed(sk));
  418. ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
  419. spin_lock(&ilb->lock);
  420. __sk_nulls_add_node_rcu(sk, &ilb->head);
  421. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  422. spin_unlock(&ilb->lock);
  423. }
  424. EXPORT_SYMBOL(__inet_hash);
  425. void inet_hash(struct sock *sk)
  426. {
  427. if (sk->sk_state != TCP_CLOSE) {
  428. local_bh_disable();
  429. __inet_hash(sk, NULL);
  430. local_bh_enable();
  431. }
  432. }
  433. EXPORT_SYMBOL_GPL(inet_hash);
  434. void inet_unhash(struct sock *sk)
  435. {
  436. struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  437. spinlock_t *lock;
  438. int done;
  439. if (sk_unhashed(sk))
  440. return;
  441. if (sk->sk_state == TCP_LISTEN)
  442. lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
  443. else
  444. lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
  445. spin_lock_bh(lock);
  446. done = __sk_nulls_del_node_init_rcu(sk);
  447. if (done)
  448. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
  449. spin_unlock_bh(lock);
  450. }
  451. EXPORT_SYMBOL_GPL(inet_unhash);
  452. int __inet_hash_connect(struct inet_timewait_death_row *death_row,
  453. struct sock *sk, u32 port_offset,
  454. int (*check_established)(struct inet_timewait_death_row *,
  455. struct sock *, __u16, struct inet_timewait_sock **))
  456. {
  457. struct inet_hashinfo *hinfo = death_row->hashinfo;
  458. const unsigned short snum = inet_sk(sk)->inet_num;
  459. struct inet_bind_hashbucket *head;
  460. struct inet_bind_bucket *tb;
  461. int ret;
  462. struct net *net = sock_net(sk);
  463. if (!snum) {
  464. int i, remaining, low, high, port;
  465. static u32 hint;
  466. u32 offset = hint + port_offset;
  467. struct inet_timewait_sock *tw = NULL;
  468. inet_get_local_port_range(net, &low, &high);
  469. remaining = (high - low) + 1;
  470. /* By starting with offset being an even number,
  471. * we tend to leave about 50% of ports for other uses,
  472. * like bind(0).
  473. */
  474. offset &= ~1;
  475. local_bh_disable();
  476. for (i = 0; i < remaining; i++) {
  477. port = low + (i + offset) % remaining;
  478. if (inet_is_local_reserved_port(net, port))
  479. continue;
  480. head = &hinfo->bhash[inet_bhashfn(net, port,
  481. hinfo->bhash_size)];
  482. spin_lock(&head->lock);
  483. /* Does not bother with rcv_saddr checks,
  484. * because the established check is already
  485. * unique enough.
  486. */
  487. inet_bind_bucket_for_each(tb, &head->chain) {
  488. if (net_eq(ib_net(tb), net) &&
  489. tb->port == port) {
  490. if (tb->fastreuse >= 0 ||
  491. tb->fastreuseport >= 0)
  492. goto next_port;
  493. WARN_ON(hlist_empty(&tb->owners));
  494. if (!check_established(death_row, sk,
  495. port, &tw))
  496. goto ok;
  497. goto next_port;
  498. }
  499. }
  500. tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
  501. net, head, port);
  502. if (!tb) {
  503. spin_unlock(&head->lock);
  504. break;
  505. }
  506. tb->fastreuse = -1;
  507. tb->fastreuseport = -1;
  508. goto ok;
  509. next_port:
  510. spin_unlock(&head->lock);
  511. }
  512. local_bh_enable();
  513. return -EADDRNOTAVAIL;
  514. ok:
  515. hint += (i + 2) & ~1;
  516. /* Head lock still held and bh's disabled */
  517. inet_bind_hash(sk, tb, port);
  518. if (sk_unhashed(sk)) {
  519. inet_sk(sk)->inet_sport = htons(port);
  520. inet_ehash_nolisten(sk, (struct sock *)tw);
  521. }
  522. if (tw)
  523. inet_twsk_bind_unhash(tw, hinfo);
  524. spin_unlock(&head->lock);
  525. if (tw)
  526. inet_twsk_deschedule_put(tw);
  527. ret = 0;
  528. goto out;
  529. }
  530. head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
  531. tb = inet_csk(sk)->icsk_bind_hash;
  532. spin_lock_bh(&head->lock);
  533. if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
  534. inet_ehash_nolisten(sk, NULL);
  535. spin_unlock_bh(&head->lock);
  536. return 0;
  537. } else {
  538. spin_unlock(&head->lock);
  539. /* No definite answer... Walk to established hash table */
  540. ret = check_established(death_row, sk, snum, NULL);
  541. out:
  542. local_bh_enable();
  543. return ret;
  544. }
  545. }
  546. /*
  547. * Bind a port for a connect operation and hash it.
  548. */
  549. int inet_hash_connect(struct inet_timewait_death_row *death_row,
  550. struct sock *sk)
  551. {
  552. u32 port_offset = 0;
  553. if (!inet_sk(sk)->inet_num)
  554. port_offset = inet_sk_port_offset(sk);
  555. return __inet_hash_connect(death_row, sk, port_offset,
  556. __inet_check_established);
  557. }
  558. EXPORT_SYMBOL_GPL(inet_hash_connect);
  559. void inet_hashinfo_init(struct inet_hashinfo *h)
  560. {
  561. int i;
  562. for (i = 0; i < INET_LHTABLE_SIZE; i++) {
  563. spin_lock_init(&h->listening_hash[i].lock);
  564. INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
  565. i + LISTENING_NULLS_BASE);
  566. }
  567. }
  568. EXPORT_SYMBOL_GPL(inet_hashinfo_init);
  569. int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
  570. {
  571. unsigned int locksz = sizeof(spinlock_t);
  572. unsigned int i, nblocks = 1;
  573. if (locksz != 0) {
  574. /* allocate 2 cache lines or at least one spinlock per cpu */
  575. nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
  576. nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
  577. /* no more locks than number of hash buckets */
  578. nblocks = min(nblocks, hashinfo->ehash_mask + 1);
  579. hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
  580. GFP_KERNEL | __GFP_NOWARN);
  581. if (!hashinfo->ehash_locks)
  582. hashinfo->ehash_locks = vmalloc(nblocks * locksz);
  583. if (!hashinfo->ehash_locks)
  584. return -ENOMEM;
  585. for (i = 0; i < nblocks; i++)
  586. spin_lock_init(&hashinfo->ehash_locks[i]);
  587. }
  588. hashinfo->ehash_locks_mask = nblocks - 1;
  589. return 0;
  590. }
  591. EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);