ip6_flowlabel.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. /*
  2. * ip6_flowlabel.c IPv6 flowlabel manager.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. */
  11. #include <linux/capability.h>
  12. #include <linux/errno.h>
  13. #include <linux/types.h>
  14. #include <linux/socket.h>
  15. #include <linux/net.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/in6.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/slab.h>
  21. #include <linux/export.h>
  22. #include <linux/pid_namespace.h>
  23. #include <net/net_namespace.h>
  24. #include <net/sock.h>
  25. #include <net/ipv6.h>
  26. #include <net/addrconf.h>
  27. #include <net/rawv6.h>
  28. #include <net/transp_v6.h>
  29. #include <asm/uaccess.h>
  30. #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
  31. in old IPv6 RFC. Well, it was reasonable value.
  32. */
  33. #define FL_MAX_LINGER 150 /* Maximal linger timeout */
  34. /* FL hash table */
  35. #define FL_MAX_PER_SOCK 32
  36. #define FL_MAX_SIZE 4096
  37. #define FL_HASH_MASK 255
  38. #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
  39. static atomic_t fl_size = ATOMIC_INIT(0);
  40. static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
  41. static void ip6_fl_gc(unsigned long dummy);
  42. static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
  43. /* FL hash table lock: it protects only of GC */
  44. static DEFINE_SPINLOCK(ip6_fl_lock);
  45. /* Big socket sock */
  46. static DEFINE_SPINLOCK(ip6_sk_fl_lock);
  47. #define for_each_fl_rcu(hash, fl) \
  48. for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
  49. fl != NULL; \
  50. fl = rcu_dereference_bh(fl->next))
  51. #define for_each_fl_continue_rcu(fl) \
  52. for (fl = rcu_dereference_bh(fl->next); \
  53. fl != NULL; \
  54. fl = rcu_dereference_bh(fl->next))
  55. #define for_each_sk_fl_rcu(np, sfl) \
  56. for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
  57. sfl != NULL; \
  58. sfl = rcu_dereference_bh(sfl->next))
  59. static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
  60. {
  61. struct ip6_flowlabel *fl;
  62. for_each_fl_rcu(FL_HASH(label), fl) {
  63. if (fl->label == label && net_eq(fl->fl_net, net))
  64. return fl;
  65. }
  66. return NULL;
  67. }
  68. static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
  69. {
  70. struct ip6_flowlabel *fl;
  71. rcu_read_lock_bh();
  72. fl = __fl_lookup(net, label);
  73. if (fl && !atomic_inc_not_zero(&fl->users))
  74. fl = NULL;
  75. rcu_read_unlock_bh();
  76. return fl;
  77. }
  78. static void fl_free(struct ip6_flowlabel *fl)
  79. {
  80. if (fl) {
  81. if (fl->share == IPV6_FL_S_PROCESS)
  82. put_pid(fl->owner.pid);
  83. release_net(fl->fl_net);
  84. kfree(fl->opt);
  85. kfree_rcu(fl, rcu);
  86. }
  87. }
  88. static void fl_release(struct ip6_flowlabel *fl)
  89. {
  90. spin_lock_bh(&ip6_fl_lock);
  91. fl->lastuse = jiffies;
  92. if (atomic_dec_and_test(&fl->users)) {
  93. unsigned long ttd = fl->lastuse + fl->linger;
  94. if (time_after(ttd, fl->expires))
  95. fl->expires = ttd;
  96. ttd = fl->expires;
  97. if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
  98. struct ipv6_txoptions *opt = fl->opt;
  99. fl->opt = NULL;
  100. kfree(opt);
  101. }
  102. if (!timer_pending(&ip6_fl_gc_timer) ||
  103. time_after(ip6_fl_gc_timer.expires, ttd))
  104. mod_timer(&ip6_fl_gc_timer, ttd);
  105. }
  106. spin_unlock_bh(&ip6_fl_lock);
  107. }
  108. static void ip6_fl_gc(unsigned long dummy)
  109. {
  110. int i;
  111. unsigned long now = jiffies;
  112. unsigned long sched = 0;
  113. spin_lock(&ip6_fl_lock);
  114. for (i=0; i<=FL_HASH_MASK; i++) {
  115. struct ip6_flowlabel *fl;
  116. struct ip6_flowlabel __rcu **flp;
  117. flp = &fl_ht[i];
  118. while ((fl = rcu_dereference_protected(*flp,
  119. lockdep_is_held(&ip6_fl_lock))) != NULL) {
  120. if (atomic_read(&fl->users) == 0) {
  121. unsigned long ttd = fl->lastuse + fl->linger;
  122. if (time_after(ttd, fl->expires))
  123. fl->expires = ttd;
  124. ttd = fl->expires;
  125. if (time_after_eq(now, ttd)) {
  126. *flp = fl->next;
  127. fl_free(fl);
  128. atomic_dec(&fl_size);
  129. continue;
  130. }
  131. if (!sched || time_before(ttd, sched))
  132. sched = ttd;
  133. }
  134. flp = &fl->next;
  135. }
  136. }
  137. if (!sched && atomic_read(&fl_size))
  138. sched = now + FL_MAX_LINGER;
  139. if (sched) {
  140. mod_timer(&ip6_fl_gc_timer, sched);
  141. }
  142. spin_unlock(&ip6_fl_lock);
  143. }
  144. static void __net_exit ip6_fl_purge(struct net *net)
  145. {
  146. int i;
  147. spin_lock(&ip6_fl_lock);
  148. for (i = 0; i <= FL_HASH_MASK; i++) {
  149. struct ip6_flowlabel *fl;
  150. struct ip6_flowlabel __rcu **flp;
  151. flp = &fl_ht[i];
  152. while ((fl = rcu_dereference_protected(*flp,
  153. lockdep_is_held(&ip6_fl_lock))) != NULL) {
  154. if (net_eq(fl->fl_net, net) &&
  155. atomic_read(&fl->users) == 0) {
  156. *flp = fl->next;
  157. fl_free(fl);
  158. atomic_dec(&fl_size);
  159. continue;
  160. }
  161. flp = &fl->next;
  162. }
  163. }
  164. spin_unlock(&ip6_fl_lock);
  165. }
  166. static struct ip6_flowlabel *fl_intern(struct net *net,
  167. struct ip6_flowlabel *fl, __be32 label)
  168. {
  169. struct ip6_flowlabel *lfl;
  170. fl->label = label & IPV6_FLOWLABEL_MASK;
  171. spin_lock_bh(&ip6_fl_lock);
  172. if (label == 0) {
  173. for (;;) {
  174. fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
  175. if (fl->label) {
  176. lfl = __fl_lookup(net, fl->label);
  177. if (lfl == NULL)
  178. break;
  179. }
  180. }
  181. } else {
  182. /*
  183. * we dropper the ip6_fl_lock, so this entry could reappear
  184. * and we need to recheck with it.
  185. *
  186. * OTOH no need to search the active socket first, like it is
  187. * done in ipv6_flowlabel_opt - sock is locked, so new entry
  188. * with the same label can only appear on another sock
  189. */
  190. lfl = __fl_lookup(net, fl->label);
  191. if (lfl != NULL) {
  192. atomic_inc(&lfl->users);
  193. spin_unlock_bh(&ip6_fl_lock);
  194. return lfl;
  195. }
  196. }
  197. fl->lastuse = jiffies;
  198. fl->next = fl_ht[FL_HASH(fl->label)];
  199. rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
  200. atomic_inc(&fl_size);
  201. spin_unlock_bh(&ip6_fl_lock);
  202. return NULL;
  203. }
  204. /* Socket flowlabel lists */
  205. struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
  206. {
  207. struct ipv6_fl_socklist *sfl;
  208. struct ipv6_pinfo *np = inet6_sk(sk);
  209. label &= IPV6_FLOWLABEL_MASK;
  210. rcu_read_lock_bh();
  211. for_each_sk_fl_rcu(np, sfl) {
  212. struct ip6_flowlabel *fl = sfl->fl;
  213. if (fl->label == label) {
  214. fl->lastuse = jiffies;
  215. atomic_inc(&fl->users);
  216. rcu_read_unlock_bh();
  217. return fl;
  218. }
  219. }
  220. rcu_read_unlock_bh();
  221. return NULL;
  222. }
  223. EXPORT_SYMBOL_GPL(fl6_sock_lookup);
  224. void fl6_free_socklist(struct sock *sk)
  225. {
  226. struct ipv6_pinfo *np = inet6_sk(sk);
  227. struct ipv6_fl_socklist *sfl;
  228. if (!rcu_access_pointer(np->ipv6_fl_list))
  229. return;
  230. spin_lock_bh(&ip6_sk_fl_lock);
  231. while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
  232. lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
  233. np->ipv6_fl_list = sfl->next;
  234. spin_unlock_bh(&ip6_sk_fl_lock);
  235. fl_release(sfl->fl);
  236. kfree_rcu(sfl, rcu);
  237. spin_lock_bh(&ip6_sk_fl_lock);
  238. }
  239. spin_unlock_bh(&ip6_sk_fl_lock);
  240. }
  241. /* Service routines */
  242. /*
  243. It is the only difficult place. flowlabel enforces equal headers
  244. before and including routing header, however user may supply options
  245. following rthdr.
  246. */
  247. struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
  248. struct ip6_flowlabel * fl,
  249. struct ipv6_txoptions * fopt)
  250. {
  251. struct ipv6_txoptions * fl_opt = fl->opt;
  252. if (fopt == NULL || fopt->opt_flen == 0)
  253. return fl_opt;
  254. if (fl_opt != NULL) {
  255. opt_space->hopopt = fl_opt->hopopt;
  256. opt_space->dst0opt = fl_opt->dst0opt;
  257. opt_space->srcrt = fl_opt->srcrt;
  258. opt_space->opt_nflen = fl_opt->opt_nflen;
  259. } else {
  260. if (fopt->opt_nflen == 0)
  261. return fopt;
  262. opt_space->hopopt = NULL;
  263. opt_space->dst0opt = NULL;
  264. opt_space->srcrt = NULL;
  265. opt_space->opt_nflen = 0;
  266. }
  267. opt_space->dst1opt = fopt->dst1opt;
  268. opt_space->opt_flen = fopt->opt_flen;
  269. return opt_space;
  270. }
  271. EXPORT_SYMBOL_GPL(fl6_merge_options);
  272. static unsigned long check_linger(unsigned long ttl)
  273. {
  274. if (ttl < FL_MIN_LINGER)
  275. return FL_MIN_LINGER*HZ;
  276. if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
  277. return 0;
  278. return ttl*HZ;
  279. }
  280. static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
  281. {
  282. linger = check_linger(linger);
  283. if (!linger)
  284. return -EPERM;
  285. expires = check_linger(expires);
  286. if (!expires)
  287. return -EPERM;
  288. spin_lock_bh(&ip6_fl_lock);
  289. fl->lastuse = jiffies;
  290. if (time_before(fl->linger, linger))
  291. fl->linger = linger;
  292. if (time_before(expires, fl->linger))
  293. expires = fl->linger;
  294. if (time_before(fl->expires, fl->lastuse + expires))
  295. fl->expires = fl->lastuse + expires;
  296. spin_unlock_bh(&ip6_fl_lock);
  297. return 0;
  298. }
  299. static struct ip6_flowlabel *
  300. fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
  301. char __user *optval, int optlen, int *err_p)
  302. {
  303. struct ip6_flowlabel *fl = NULL;
  304. int olen;
  305. int addr_type;
  306. int err;
  307. olen = optlen - CMSG_ALIGN(sizeof(*freq));
  308. err = -EINVAL;
  309. if (olen > 64 * 1024)
  310. goto done;
  311. err = -ENOMEM;
  312. fl = kzalloc(sizeof(*fl), GFP_KERNEL);
  313. if (fl == NULL)
  314. goto done;
  315. if (olen > 0) {
  316. struct msghdr msg;
  317. struct flowi6 flowi6;
  318. int junk;
  319. err = -ENOMEM;
  320. fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
  321. if (fl->opt == NULL)
  322. goto done;
  323. memset(fl->opt, 0, sizeof(*fl->opt));
  324. fl->opt->tot_len = sizeof(*fl->opt) + olen;
  325. err = -EFAULT;
  326. if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
  327. goto done;
  328. msg.msg_controllen = olen;
  329. msg.msg_control = (void*)(fl->opt+1);
  330. memset(&flowi6, 0, sizeof(flowi6));
  331. err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
  332. &junk, &junk, &junk);
  333. if (err)
  334. goto done;
  335. err = -EINVAL;
  336. if (fl->opt->opt_flen)
  337. goto done;
  338. if (fl->opt->opt_nflen == 0) {
  339. kfree(fl->opt);
  340. fl->opt = NULL;
  341. }
  342. }
  343. fl->fl_net = hold_net(net);
  344. fl->expires = jiffies;
  345. err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
  346. if (err)
  347. goto done;
  348. fl->share = freq->flr_share;
  349. addr_type = ipv6_addr_type(&freq->flr_dst);
  350. if ((addr_type & IPV6_ADDR_MAPPED) ||
  351. addr_type == IPV6_ADDR_ANY) {
  352. err = -EINVAL;
  353. goto done;
  354. }
  355. fl->dst = freq->flr_dst;
  356. atomic_set(&fl->users, 1);
  357. switch (fl->share) {
  358. case IPV6_FL_S_EXCL:
  359. case IPV6_FL_S_ANY:
  360. break;
  361. case IPV6_FL_S_PROCESS:
  362. fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
  363. break;
  364. case IPV6_FL_S_USER:
  365. fl->owner.uid = current_euid();
  366. break;
  367. default:
  368. err = -EINVAL;
  369. goto done;
  370. }
  371. return fl;
  372. done:
  373. fl_free(fl);
  374. *err_p = err;
  375. return NULL;
  376. }
  377. static int mem_check(struct sock *sk)
  378. {
  379. struct ipv6_pinfo *np = inet6_sk(sk);
  380. struct ipv6_fl_socklist *sfl;
  381. int room = FL_MAX_SIZE - atomic_read(&fl_size);
  382. int count = 0;
  383. if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
  384. return 0;
  385. rcu_read_lock_bh();
  386. for_each_sk_fl_rcu(np, sfl)
  387. count++;
  388. rcu_read_unlock_bh();
  389. if (room <= 0 ||
  390. ((count >= FL_MAX_PER_SOCK ||
  391. (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
  392. !capable(CAP_NET_ADMIN)))
  393. return -ENOBUFS;
  394. return 0;
  395. }
  396. static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
  397. struct ip6_flowlabel *fl)
  398. {
  399. spin_lock_bh(&ip6_sk_fl_lock);
  400. sfl->fl = fl;
  401. sfl->next = np->ipv6_fl_list;
  402. rcu_assign_pointer(np->ipv6_fl_list, sfl);
  403. spin_unlock_bh(&ip6_sk_fl_lock);
  404. }
  405. int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
  406. int flags)
  407. {
  408. struct ipv6_pinfo *np = inet6_sk(sk);
  409. struct ipv6_fl_socklist *sfl;
  410. if (flags & IPV6_FL_F_REMOTE) {
  411. freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
  412. return 0;
  413. }
  414. if (np->repflow) {
  415. freq->flr_label = np->flow_label;
  416. return 0;
  417. }
  418. rcu_read_lock_bh();
  419. for_each_sk_fl_rcu(np, sfl) {
  420. if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
  421. spin_lock_bh(&ip6_fl_lock);
  422. freq->flr_label = sfl->fl->label;
  423. freq->flr_dst = sfl->fl->dst;
  424. freq->flr_share = sfl->fl->share;
  425. freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
  426. freq->flr_linger = sfl->fl->linger / HZ;
  427. spin_unlock_bh(&ip6_fl_lock);
  428. rcu_read_unlock_bh();
  429. return 0;
  430. }
  431. }
  432. rcu_read_unlock_bh();
  433. return -ENOENT;
  434. }
  435. int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
  436. {
  437. int uninitialized_var(err);
  438. struct net *net = sock_net(sk);
  439. struct ipv6_pinfo *np = inet6_sk(sk);
  440. struct in6_flowlabel_req freq;
  441. struct ipv6_fl_socklist *sfl1=NULL;
  442. struct ipv6_fl_socklist *sfl;
  443. struct ipv6_fl_socklist __rcu **sflp;
  444. struct ip6_flowlabel *fl, *fl1 = NULL;
  445. if (optlen < sizeof(freq))
  446. return -EINVAL;
  447. if (copy_from_user(&freq, optval, sizeof(freq)))
  448. return -EFAULT;
  449. switch (freq.flr_action) {
  450. case IPV6_FL_A_PUT:
  451. if (freq.flr_flags & IPV6_FL_F_REFLECT) {
  452. if (sk->sk_protocol != IPPROTO_TCP)
  453. return -ENOPROTOOPT;
  454. if (!np->repflow)
  455. return -ESRCH;
  456. np->flow_label = 0;
  457. np->repflow = 0;
  458. return 0;
  459. }
  460. spin_lock_bh(&ip6_sk_fl_lock);
  461. for (sflp = &np->ipv6_fl_list;
  462. (sfl = rcu_dereference(*sflp))!=NULL;
  463. sflp = &sfl->next) {
  464. if (sfl->fl->label == freq.flr_label) {
  465. if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
  466. np->flow_label &= ~IPV6_FLOWLABEL_MASK;
  467. *sflp = rcu_dereference(sfl->next);
  468. spin_unlock_bh(&ip6_sk_fl_lock);
  469. fl_release(sfl->fl);
  470. kfree_rcu(sfl, rcu);
  471. return 0;
  472. }
  473. }
  474. spin_unlock_bh(&ip6_sk_fl_lock);
  475. return -ESRCH;
  476. case IPV6_FL_A_RENEW:
  477. rcu_read_lock_bh();
  478. for_each_sk_fl_rcu(np, sfl) {
  479. if (sfl->fl->label == freq.flr_label) {
  480. err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
  481. rcu_read_unlock_bh();
  482. return err;
  483. }
  484. }
  485. rcu_read_unlock_bh();
  486. if (freq.flr_share == IPV6_FL_S_NONE &&
  487. ns_capable(net->user_ns, CAP_NET_ADMIN)) {
  488. fl = fl_lookup(net, freq.flr_label);
  489. if (fl) {
  490. err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
  491. fl_release(fl);
  492. return err;
  493. }
  494. }
  495. return -ESRCH;
  496. case IPV6_FL_A_GET:
  497. if (freq.flr_flags & IPV6_FL_F_REFLECT) {
  498. struct net *net = sock_net(sk);
  499. if (net->ipv6.sysctl.flowlabel_consistency) {
  500. net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
  501. return -EPERM;
  502. }
  503. if (sk->sk_protocol != IPPROTO_TCP)
  504. return -ENOPROTOOPT;
  505. np->repflow = 1;
  506. return 0;
  507. }
  508. if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
  509. return -EINVAL;
  510. fl = fl_create(net, sk, &freq, optval, optlen, &err);
  511. if (fl == NULL)
  512. return err;
  513. sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
  514. if (freq.flr_label) {
  515. err = -EEXIST;
  516. rcu_read_lock_bh();
  517. for_each_sk_fl_rcu(np, sfl) {
  518. if (sfl->fl->label == freq.flr_label) {
  519. if (freq.flr_flags&IPV6_FL_F_EXCL) {
  520. rcu_read_unlock_bh();
  521. goto done;
  522. }
  523. fl1 = sfl->fl;
  524. atomic_inc(&fl1->users);
  525. break;
  526. }
  527. }
  528. rcu_read_unlock_bh();
  529. if (fl1 == NULL)
  530. fl1 = fl_lookup(net, freq.flr_label);
  531. if (fl1) {
  532. recheck:
  533. err = -EEXIST;
  534. if (freq.flr_flags&IPV6_FL_F_EXCL)
  535. goto release;
  536. err = -EPERM;
  537. if (fl1->share == IPV6_FL_S_EXCL ||
  538. fl1->share != fl->share ||
  539. ((fl1->share == IPV6_FL_S_PROCESS) &&
  540. (fl1->owner.pid == fl->owner.pid)) ||
  541. ((fl1->share == IPV6_FL_S_USER) &&
  542. uid_eq(fl1->owner.uid, fl->owner.uid)))
  543. goto release;
  544. err = -ENOMEM;
  545. if (sfl1 == NULL)
  546. goto release;
  547. if (fl->linger > fl1->linger)
  548. fl1->linger = fl->linger;
  549. if ((long)(fl->expires - fl1->expires) > 0)
  550. fl1->expires = fl->expires;
  551. fl_link(np, sfl1, fl1);
  552. fl_free(fl);
  553. return 0;
  554. release:
  555. fl_release(fl1);
  556. goto done;
  557. }
  558. }
  559. err = -ENOENT;
  560. if (!(freq.flr_flags&IPV6_FL_F_CREATE))
  561. goto done;
  562. err = -ENOMEM;
  563. if (sfl1 == NULL || (err = mem_check(sk)) != 0)
  564. goto done;
  565. fl1 = fl_intern(net, fl, freq.flr_label);
  566. if (fl1 != NULL)
  567. goto recheck;
  568. if (!freq.flr_label) {
  569. if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
  570. &fl->label, sizeof(fl->label))) {
  571. /* Intentionally ignore fault. */
  572. }
  573. }
  574. fl_link(np, sfl1, fl);
  575. return 0;
  576. default:
  577. return -EINVAL;
  578. }
  579. done:
  580. fl_free(fl);
  581. kfree(sfl1);
  582. return err;
  583. }
  584. #ifdef CONFIG_PROC_FS
  585. struct ip6fl_iter_state {
  586. struct seq_net_private p;
  587. struct pid_namespace *pid_ns;
  588. int bucket;
  589. };
  590. #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
  591. static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
  592. {
  593. struct ip6_flowlabel *fl = NULL;
  594. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  595. struct net *net = seq_file_net(seq);
  596. for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
  597. for_each_fl_rcu(state->bucket, fl) {
  598. if (net_eq(fl->fl_net, net))
  599. goto out;
  600. }
  601. }
  602. fl = NULL;
  603. out:
  604. return fl;
  605. }
  606. static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
  607. {
  608. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  609. struct net *net = seq_file_net(seq);
  610. for_each_fl_continue_rcu(fl) {
  611. if (net_eq(fl->fl_net, net))
  612. goto out;
  613. }
  614. try_again:
  615. if (++state->bucket <= FL_HASH_MASK) {
  616. for_each_fl_rcu(state->bucket, fl) {
  617. if (net_eq(fl->fl_net, net))
  618. goto out;
  619. }
  620. goto try_again;
  621. }
  622. fl = NULL;
  623. out:
  624. return fl;
  625. }
  626. static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
  627. {
  628. struct ip6_flowlabel *fl = ip6fl_get_first(seq);
  629. if (fl)
  630. while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
  631. --pos;
  632. return pos ? NULL : fl;
  633. }
  634. static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
  635. __acquires(RCU)
  636. {
  637. rcu_read_lock_bh();
  638. return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  639. }
  640. static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  641. {
  642. struct ip6_flowlabel *fl;
  643. if (v == SEQ_START_TOKEN)
  644. fl = ip6fl_get_first(seq);
  645. else
  646. fl = ip6fl_get_next(seq, v);
  647. ++*pos;
  648. return fl;
  649. }
  650. static void ip6fl_seq_stop(struct seq_file *seq, void *v)
  651. __releases(RCU)
  652. {
  653. rcu_read_unlock_bh();
  654. }
  655. static int ip6fl_seq_show(struct seq_file *seq, void *v)
  656. {
  657. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  658. if (v == SEQ_START_TOKEN)
  659. seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
  660. "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
  661. else {
  662. struct ip6_flowlabel *fl = v;
  663. seq_printf(seq,
  664. "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
  665. (unsigned int)ntohl(fl->label),
  666. fl->share,
  667. ((fl->share == IPV6_FL_S_PROCESS) ?
  668. pid_nr_ns(fl->owner.pid, state->pid_ns) :
  669. ((fl->share == IPV6_FL_S_USER) ?
  670. from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
  671. 0)),
  672. atomic_read(&fl->users),
  673. fl->linger/HZ,
  674. (long)(fl->expires - jiffies)/HZ,
  675. &fl->dst,
  676. fl->opt ? fl->opt->opt_nflen : 0);
  677. }
  678. return 0;
  679. }
  680. static const struct seq_operations ip6fl_seq_ops = {
  681. .start = ip6fl_seq_start,
  682. .next = ip6fl_seq_next,
  683. .stop = ip6fl_seq_stop,
  684. .show = ip6fl_seq_show,
  685. };
  686. static int ip6fl_seq_open(struct inode *inode, struct file *file)
  687. {
  688. struct seq_file *seq;
  689. struct ip6fl_iter_state *state;
  690. int err;
  691. err = seq_open_net(inode, file, &ip6fl_seq_ops,
  692. sizeof(struct ip6fl_iter_state));
  693. if (!err) {
  694. seq = file->private_data;
  695. state = ip6fl_seq_private(seq);
  696. rcu_read_lock();
  697. state->pid_ns = get_pid_ns(task_active_pid_ns(current));
  698. rcu_read_unlock();
  699. }
  700. return err;
  701. }
  702. static int ip6fl_seq_release(struct inode *inode, struct file *file)
  703. {
  704. struct seq_file *seq = file->private_data;
  705. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  706. put_pid_ns(state->pid_ns);
  707. return seq_release_net(inode, file);
  708. }
  709. static const struct file_operations ip6fl_seq_fops = {
  710. .owner = THIS_MODULE,
  711. .open = ip6fl_seq_open,
  712. .read = seq_read,
  713. .llseek = seq_lseek,
  714. .release = ip6fl_seq_release,
  715. };
  716. static int __net_init ip6_flowlabel_proc_init(struct net *net)
  717. {
  718. if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
  719. &ip6fl_seq_fops))
  720. return -ENOMEM;
  721. return 0;
  722. }
  723. static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
  724. {
  725. remove_proc_entry("ip6_flowlabel", net->proc_net);
  726. }
  727. #else
  728. static inline int ip6_flowlabel_proc_init(struct net *net)
  729. {
  730. return 0;
  731. }
  732. static inline void ip6_flowlabel_proc_fini(struct net *net)
  733. {
  734. }
  735. #endif
  736. static void __net_exit ip6_flowlabel_net_exit(struct net *net)
  737. {
  738. ip6_fl_purge(net);
  739. ip6_flowlabel_proc_fini(net);
  740. }
  741. static struct pernet_operations ip6_flowlabel_net_ops = {
  742. .init = ip6_flowlabel_proc_init,
  743. .exit = ip6_flowlabel_net_exit,
  744. };
  745. int ip6_flowlabel_init(void)
  746. {
  747. return register_pernet_subsys(&ip6_flowlabel_net_ops);
  748. }
  749. void ip6_flowlabel_cleanup(void)
  750. {
  751. del_timer(&ip6_fl_gc_timer);
  752. unregister_pernet_subsys(&ip6_flowlabel_net_ops);
  753. }