ip6_flowlabel.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. /*
  2. * ip6_flowlabel.c IPv6 flowlabel manager.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. */
  11. #include <linux/capability.h>
  12. #include <linux/errno.h>
  13. #include <linux/types.h>
  14. #include <linux/socket.h>
  15. #include <linux/net.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/in6.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/slab.h>
  21. #include <linux/export.h>
  22. #include <linux/pid_namespace.h>
  23. #include <net/net_namespace.h>
  24. #include <net/sock.h>
  25. #include <net/ipv6.h>
  26. #include <net/rawv6.h>
  27. #include <net/transp_v6.h>
  28. #include <asm/uaccess.h>
  29. #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
  30. in old IPv6 RFC. Well, it was reasonable value.
  31. */
  32. #define FL_MAX_LINGER 150 /* Maximal linger timeout */
  33. /* FL hash table */
  34. #define FL_MAX_PER_SOCK 32
  35. #define FL_MAX_SIZE 4096
  36. #define FL_HASH_MASK 255
  37. #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
  38. static atomic_t fl_size = ATOMIC_INIT(0);
  39. static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
  40. static void ip6_fl_gc(unsigned long dummy);
  41. static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
  42. /* FL hash table lock: it protects only of GC */
  43. static DEFINE_SPINLOCK(ip6_fl_lock);
  44. /* Big socket sock */
  45. static DEFINE_SPINLOCK(ip6_sk_fl_lock);
  46. #define for_each_fl_rcu(hash, fl) \
  47. for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
  48. fl != NULL; \
  49. fl = rcu_dereference_bh(fl->next))
  50. #define for_each_fl_continue_rcu(fl) \
  51. for (fl = rcu_dereference_bh(fl->next); \
  52. fl != NULL; \
  53. fl = rcu_dereference_bh(fl->next))
  54. #define for_each_sk_fl_rcu(np, sfl) \
  55. for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
  56. sfl != NULL; \
  57. sfl = rcu_dereference_bh(sfl->next))
  58. static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
  59. {
  60. struct ip6_flowlabel *fl;
  61. for_each_fl_rcu(FL_HASH(label), fl) {
  62. if (fl->label == label && net_eq(fl->fl_net, net))
  63. return fl;
  64. }
  65. return NULL;
  66. }
  67. static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
  68. {
  69. struct ip6_flowlabel *fl;
  70. rcu_read_lock_bh();
  71. fl = __fl_lookup(net, label);
  72. if (fl && !atomic_inc_not_zero(&fl->users))
  73. fl = NULL;
  74. rcu_read_unlock_bh();
  75. return fl;
  76. }
  77. static void fl_free(struct ip6_flowlabel *fl)
  78. {
  79. if (fl) {
  80. if (fl->share == IPV6_FL_S_PROCESS)
  81. put_pid(fl->owner.pid);
  82. release_net(fl->fl_net);
  83. kfree(fl->opt);
  84. kfree_rcu(fl, rcu);
  85. }
  86. }
  87. static void fl_release(struct ip6_flowlabel *fl)
  88. {
  89. spin_lock_bh(&ip6_fl_lock);
  90. fl->lastuse = jiffies;
  91. if (atomic_dec_and_test(&fl->users)) {
  92. unsigned long ttd = fl->lastuse + fl->linger;
  93. if (time_after(ttd, fl->expires))
  94. fl->expires = ttd;
  95. ttd = fl->expires;
  96. if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
  97. struct ipv6_txoptions *opt = fl->opt;
  98. fl->opt = NULL;
  99. kfree(opt);
  100. }
  101. if (!timer_pending(&ip6_fl_gc_timer) ||
  102. time_after(ip6_fl_gc_timer.expires, ttd))
  103. mod_timer(&ip6_fl_gc_timer, ttd);
  104. }
  105. spin_unlock_bh(&ip6_fl_lock);
  106. }
  107. static void ip6_fl_gc(unsigned long dummy)
  108. {
  109. int i;
  110. unsigned long now = jiffies;
  111. unsigned long sched = 0;
  112. spin_lock(&ip6_fl_lock);
  113. for (i=0; i<=FL_HASH_MASK; i++) {
  114. struct ip6_flowlabel *fl;
  115. struct ip6_flowlabel __rcu **flp;
  116. flp = &fl_ht[i];
  117. while ((fl = rcu_dereference_protected(*flp,
  118. lockdep_is_held(&ip6_fl_lock))) != NULL) {
  119. if (atomic_read(&fl->users) == 0) {
  120. unsigned long ttd = fl->lastuse + fl->linger;
  121. if (time_after(ttd, fl->expires))
  122. fl->expires = ttd;
  123. ttd = fl->expires;
  124. if (time_after_eq(now, ttd)) {
  125. *flp = fl->next;
  126. fl_free(fl);
  127. atomic_dec(&fl_size);
  128. continue;
  129. }
  130. if (!sched || time_before(ttd, sched))
  131. sched = ttd;
  132. }
  133. flp = &fl->next;
  134. }
  135. }
  136. if (!sched && atomic_read(&fl_size))
  137. sched = now + FL_MAX_LINGER;
  138. if (sched) {
  139. mod_timer(&ip6_fl_gc_timer, sched);
  140. }
  141. spin_unlock(&ip6_fl_lock);
  142. }
  143. static void __net_exit ip6_fl_purge(struct net *net)
  144. {
  145. int i;
  146. spin_lock(&ip6_fl_lock);
  147. for (i = 0; i <= FL_HASH_MASK; i++) {
  148. struct ip6_flowlabel *fl;
  149. struct ip6_flowlabel __rcu **flp;
  150. flp = &fl_ht[i];
  151. while ((fl = rcu_dereference_protected(*flp,
  152. lockdep_is_held(&ip6_fl_lock))) != NULL) {
  153. if (net_eq(fl->fl_net, net) &&
  154. atomic_read(&fl->users) == 0) {
  155. *flp = fl->next;
  156. fl_free(fl);
  157. atomic_dec(&fl_size);
  158. continue;
  159. }
  160. flp = &fl->next;
  161. }
  162. }
  163. spin_unlock(&ip6_fl_lock);
  164. }
  165. static struct ip6_flowlabel *fl_intern(struct net *net,
  166. struct ip6_flowlabel *fl, __be32 label)
  167. {
  168. struct ip6_flowlabel *lfl;
  169. fl->label = label & IPV6_FLOWLABEL_MASK;
  170. spin_lock_bh(&ip6_fl_lock);
  171. if (label == 0) {
  172. for (;;) {
  173. fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
  174. if (fl->label) {
  175. lfl = __fl_lookup(net, fl->label);
  176. if (lfl == NULL)
  177. break;
  178. }
  179. }
  180. } else {
  181. /*
  182. * we dropper the ip6_fl_lock, so this entry could reappear
  183. * and we need to recheck with it.
  184. *
  185. * OTOH no need to search the active socket first, like it is
  186. * done in ipv6_flowlabel_opt - sock is locked, so new entry
  187. * with the same label can only appear on another sock
  188. */
  189. lfl = __fl_lookup(net, fl->label);
  190. if (lfl != NULL) {
  191. atomic_inc(&lfl->users);
  192. spin_unlock_bh(&ip6_fl_lock);
  193. return lfl;
  194. }
  195. }
  196. fl->lastuse = jiffies;
  197. fl->next = fl_ht[FL_HASH(fl->label)];
  198. rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
  199. atomic_inc(&fl_size);
  200. spin_unlock_bh(&ip6_fl_lock);
  201. return NULL;
  202. }
  203. /* Socket flowlabel lists */
  204. struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
  205. {
  206. struct ipv6_fl_socklist *sfl;
  207. struct ipv6_pinfo *np = inet6_sk(sk);
  208. label &= IPV6_FLOWLABEL_MASK;
  209. rcu_read_lock_bh();
  210. for_each_sk_fl_rcu(np, sfl) {
  211. struct ip6_flowlabel *fl = sfl->fl;
  212. if (fl->label == label) {
  213. fl->lastuse = jiffies;
  214. atomic_inc(&fl->users);
  215. rcu_read_unlock_bh();
  216. return fl;
  217. }
  218. }
  219. rcu_read_unlock_bh();
  220. return NULL;
  221. }
  222. EXPORT_SYMBOL_GPL(fl6_sock_lookup);
  223. void fl6_free_socklist(struct sock *sk)
  224. {
  225. struct ipv6_pinfo *np = inet6_sk(sk);
  226. struct ipv6_fl_socklist *sfl;
  227. if (!rcu_access_pointer(np->ipv6_fl_list))
  228. return;
  229. spin_lock_bh(&ip6_sk_fl_lock);
  230. while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
  231. lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
  232. np->ipv6_fl_list = sfl->next;
  233. spin_unlock_bh(&ip6_sk_fl_lock);
  234. fl_release(sfl->fl);
  235. kfree_rcu(sfl, rcu);
  236. spin_lock_bh(&ip6_sk_fl_lock);
  237. }
  238. spin_unlock_bh(&ip6_sk_fl_lock);
  239. }
  240. /* Service routines */
  241. /*
  242. It is the only difficult place. flowlabel enforces equal headers
  243. before and including routing header, however user may supply options
  244. following rthdr.
  245. */
  246. struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
  247. struct ip6_flowlabel * fl,
  248. struct ipv6_txoptions * fopt)
  249. {
  250. struct ipv6_txoptions * fl_opt = fl->opt;
  251. if (fopt == NULL || fopt->opt_flen == 0)
  252. return fl_opt;
  253. if (fl_opt != NULL) {
  254. opt_space->hopopt = fl_opt->hopopt;
  255. opt_space->dst0opt = fl_opt->dst0opt;
  256. opt_space->srcrt = fl_opt->srcrt;
  257. opt_space->opt_nflen = fl_opt->opt_nflen;
  258. } else {
  259. if (fopt->opt_nflen == 0)
  260. return fopt;
  261. opt_space->hopopt = NULL;
  262. opt_space->dst0opt = NULL;
  263. opt_space->srcrt = NULL;
  264. opt_space->opt_nflen = 0;
  265. }
  266. opt_space->dst1opt = fopt->dst1opt;
  267. opt_space->opt_flen = fopt->opt_flen;
  268. return opt_space;
  269. }
  270. EXPORT_SYMBOL_GPL(fl6_merge_options);
  271. static unsigned long check_linger(unsigned long ttl)
  272. {
  273. if (ttl < FL_MIN_LINGER)
  274. return FL_MIN_LINGER*HZ;
  275. if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
  276. return 0;
  277. return ttl*HZ;
  278. }
  279. static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
  280. {
  281. linger = check_linger(linger);
  282. if (!linger)
  283. return -EPERM;
  284. expires = check_linger(expires);
  285. if (!expires)
  286. return -EPERM;
  287. spin_lock_bh(&ip6_fl_lock);
  288. fl->lastuse = jiffies;
  289. if (time_before(fl->linger, linger))
  290. fl->linger = linger;
  291. if (time_before(expires, fl->linger))
  292. expires = fl->linger;
  293. if (time_before(fl->expires, fl->lastuse + expires))
  294. fl->expires = fl->lastuse + expires;
  295. spin_unlock_bh(&ip6_fl_lock);
  296. return 0;
  297. }
  298. static struct ip6_flowlabel *
  299. fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
  300. char __user *optval, int optlen, int *err_p)
  301. {
  302. struct ip6_flowlabel *fl = NULL;
  303. int olen;
  304. int addr_type;
  305. int err;
  306. olen = optlen - CMSG_ALIGN(sizeof(*freq));
  307. err = -EINVAL;
  308. if (olen > 64 * 1024)
  309. goto done;
  310. err = -ENOMEM;
  311. fl = kzalloc(sizeof(*fl), GFP_KERNEL);
  312. if (fl == NULL)
  313. goto done;
  314. if (olen > 0) {
  315. struct msghdr msg;
  316. struct flowi6 flowi6;
  317. int junk;
  318. err = -ENOMEM;
  319. fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
  320. if (fl->opt == NULL)
  321. goto done;
  322. memset(fl->opt, 0, sizeof(*fl->opt));
  323. fl->opt->tot_len = sizeof(*fl->opt) + olen;
  324. err = -EFAULT;
  325. if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
  326. goto done;
  327. msg.msg_controllen = olen;
  328. msg.msg_control = (void*)(fl->opt+1);
  329. memset(&flowi6, 0, sizeof(flowi6));
  330. err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
  331. &junk, &junk, &junk);
  332. if (err)
  333. goto done;
  334. err = -EINVAL;
  335. if (fl->opt->opt_flen)
  336. goto done;
  337. if (fl->opt->opt_nflen == 0) {
  338. kfree(fl->opt);
  339. fl->opt = NULL;
  340. }
  341. }
  342. fl->fl_net = hold_net(net);
  343. fl->expires = jiffies;
  344. err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
  345. if (err)
  346. goto done;
  347. fl->share = freq->flr_share;
  348. addr_type = ipv6_addr_type(&freq->flr_dst);
  349. if ((addr_type & IPV6_ADDR_MAPPED) ||
  350. addr_type == IPV6_ADDR_ANY) {
  351. err = -EINVAL;
  352. goto done;
  353. }
  354. fl->dst = freq->flr_dst;
  355. atomic_set(&fl->users, 1);
  356. switch (fl->share) {
  357. case IPV6_FL_S_EXCL:
  358. case IPV6_FL_S_ANY:
  359. break;
  360. case IPV6_FL_S_PROCESS:
  361. fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
  362. break;
  363. case IPV6_FL_S_USER:
  364. fl->owner.uid = current_euid();
  365. break;
  366. default:
  367. err = -EINVAL;
  368. goto done;
  369. }
  370. return fl;
  371. done:
  372. fl_free(fl);
  373. *err_p = err;
  374. return NULL;
  375. }
  376. static int mem_check(struct sock *sk)
  377. {
  378. struct ipv6_pinfo *np = inet6_sk(sk);
  379. struct ipv6_fl_socklist *sfl;
  380. int room = FL_MAX_SIZE - atomic_read(&fl_size);
  381. int count = 0;
  382. if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
  383. return 0;
  384. rcu_read_lock_bh();
  385. for_each_sk_fl_rcu(np, sfl)
  386. count++;
  387. rcu_read_unlock_bh();
  388. if (room <= 0 ||
  389. ((count >= FL_MAX_PER_SOCK ||
  390. (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
  391. !capable(CAP_NET_ADMIN)))
  392. return -ENOBUFS;
  393. return 0;
  394. }
  395. static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
  396. struct ip6_flowlabel *fl)
  397. {
  398. spin_lock_bh(&ip6_sk_fl_lock);
  399. sfl->fl = fl;
  400. sfl->next = np->ipv6_fl_list;
  401. rcu_assign_pointer(np->ipv6_fl_list, sfl);
  402. spin_unlock_bh(&ip6_sk_fl_lock);
  403. }
  404. int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
  405. int flags)
  406. {
  407. struct ipv6_pinfo *np = inet6_sk(sk);
  408. struct ipv6_fl_socklist *sfl;
  409. if (flags & IPV6_FL_F_REMOTE) {
  410. freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
  411. return 0;
  412. }
  413. if (np->repflow) {
  414. freq->flr_label = np->flow_label;
  415. return 0;
  416. }
  417. rcu_read_lock_bh();
  418. for_each_sk_fl_rcu(np, sfl) {
  419. if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
  420. spin_lock_bh(&ip6_fl_lock);
  421. freq->flr_label = sfl->fl->label;
  422. freq->flr_dst = sfl->fl->dst;
  423. freq->flr_share = sfl->fl->share;
  424. freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
  425. freq->flr_linger = sfl->fl->linger / HZ;
  426. spin_unlock_bh(&ip6_fl_lock);
  427. rcu_read_unlock_bh();
  428. return 0;
  429. }
  430. }
  431. rcu_read_unlock_bh();
  432. return -ENOENT;
  433. }
  434. int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
  435. {
  436. int uninitialized_var(err);
  437. struct net *net = sock_net(sk);
  438. struct ipv6_pinfo *np = inet6_sk(sk);
  439. struct in6_flowlabel_req freq;
  440. struct ipv6_fl_socklist *sfl1=NULL;
  441. struct ipv6_fl_socklist *sfl;
  442. struct ipv6_fl_socklist __rcu **sflp;
  443. struct ip6_flowlabel *fl, *fl1 = NULL;
  444. if (optlen < sizeof(freq))
  445. return -EINVAL;
  446. if (copy_from_user(&freq, optval, sizeof(freq)))
  447. return -EFAULT;
  448. switch (freq.flr_action) {
  449. case IPV6_FL_A_PUT:
  450. if (freq.flr_flags & IPV6_FL_F_REFLECT) {
  451. if (sk->sk_protocol != IPPROTO_TCP)
  452. return -ENOPROTOOPT;
  453. if (!np->repflow)
  454. return -ESRCH;
  455. np->flow_label = 0;
  456. np->repflow = 0;
  457. return 0;
  458. }
  459. spin_lock_bh(&ip6_sk_fl_lock);
  460. for (sflp = &np->ipv6_fl_list;
  461. (sfl = rcu_dereference(*sflp))!=NULL;
  462. sflp = &sfl->next) {
  463. if (sfl->fl->label == freq.flr_label) {
  464. if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
  465. np->flow_label &= ~IPV6_FLOWLABEL_MASK;
  466. *sflp = rcu_dereference(sfl->next);
  467. spin_unlock_bh(&ip6_sk_fl_lock);
  468. fl_release(sfl->fl);
  469. kfree_rcu(sfl, rcu);
  470. return 0;
  471. }
  472. }
  473. spin_unlock_bh(&ip6_sk_fl_lock);
  474. return -ESRCH;
  475. case IPV6_FL_A_RENEW:
  476. rcu_read_lock_bh();
  477. for_each_sk_fl_rcu(np, sfl) {
  478. if (sfl->fl->label == freq.flr_label) {
  479. err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
  480. rcu_read_unlock_bh();
  481. return err;
  482. }
  483. }
  484. rcu_read_unlock_bh();
  485. if (freq.flr_share == IPV6_FL_S_NONE &&
  486. ns_capable(net->user_ns, CAP_NET_ADMIN)) {
  487. fl = fl_lookup(net, freq.flr_label);
  488. if (fl) {
  489. err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
  490. fl_release(fl);
  491. return err;
  492. }
  493. }
  494. return -ESRCH;
  495. case IPV6_FL_A_GET:
  496. if (freq.flr_flags & IPV6_FL_F_REFLECT) {
  497. struct net *net = sock_net(sk);
  498. if (net->ipv6.sysctl.flowlabel_consistency) {
  499. net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
  500. return -EPERM;
  501. }
  502. if (sk->sk_protocol != IPPROTO_TCP)
  503. return -ENOPROTOOPT;
  504. np->repflow = 1;
  505. return 0;
  506. }
  507. if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
  508. return -EINVAL;
  509. fl = fl_create(net, sk, &freq, optval, optlen, &err);
  510. if (fl == NULL)
  511. return err;
  512. sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
  513. if (freq.flr_label) {
  514. err = -EEXIST;
  515. rcu_read_lock_bh();
  516. for_each_sk_fl_rcu(np, sfl) {
  517. if (sfl->fl->label == freq.flr_label) {
  518. if (freq.flr_flags&IPV6_FL_F_EXCL) {
  519. rcu_read_unlock_bh();
  520. goto done;
  521. }
  522. fl1 = sfl->fl;
  523. atomic_inc(&fl1->users);
  524. break;
  525. }
  526. }
  527. rcu_read_unlock_bh();
  528. if (fl1 == NULL)
  529. fl1 = fl_lookup(net, freq.flr_label);
  530. if (fl1) {
  531. recheck:
  532. err = -EEXIST;
  533. if (freq.flr_flags&IPV6_FL_F_EXCL)
  534. goto release;
  535. err = -EPERM;
  536. if (fl1->share == IPV6_FL_S_EXCL ||
  537. fl1->share != fl->share ||
  538. ((fl1->share == IPV6_FL_S_PROCESS) &&
  539. (fl1->owner.pid == fl->owner.pid)) ||
  540. ((fl1->share == IPV6_FL_S_USER) &&
  541. uid_eq(fl1->owner.uid, fl->owner.uid)))
  542. goto release;
  543. err = -ENOMEM;
  544. if (sfl1 == NULL)
  545. goto release;
  546. if (fl->linger > fl1->linger)
  547. fl1->linger = fl->linger;
  548. if ((long)(fl->expires - fl1->expires) > 0)
  549. fl1->expires = fl->expires;
  550. fl_link(np, sfl1, fl1);
  551. fl_free(fl);
  552. return 0;
  553. release:
  554. fl_release(fl1);
  555. goto done;
  556. }
  557. }
  558. err = -ENOENT;
  559. if (!(freq.flr_flags&IPV6_FL_F_CREATE))
  560. goto done;
  561. err = -ENOMEM;
  562. if (sfl1 == NULL || (err = mem_check(sk)) != 0)
  563. goto done;
  564. fl1 = fl_intern(net, fl, freq.flr_label);
  565. if (fl1 != NULL)
  566. goto recheck;
  567. if (!freq.flr_label) {
  568. if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
  569. &fl->label, sizeof(fl->label))) {
  570. /* Intentionally ignore fault. */
  571. }
  572. }
  573. fl_link(np, sfl1, fl);
  574. return 0;
  575. default:
  576. return -EINVAL;
  577. }
  578. done:
  579. fl_free(fl);
  580. kfree(sfl1);
  581. return err;
  582. }
  583. #ifdef CONFIG_PROC_FS
  584. struct ip6fl_iter_state {
  585. struct seq_net_private p;
  586. struct pid_namespace *pid_ns;
  587. int bucket;
  588. };
  589. #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
  590. static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
  591. {
  592. struct ip6_flowlabel *fl = NULL;
  593. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  594. struct net *net = seq_file_net(seq);
  595. for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
  596. for_each_fl_rcu(state->bucket, fl) {
  597. if (net_eq(fl->fl_net, net))
  598. goto out;
  599. }
  600. }
  601. fl = NULL;
  602. out:
  603. return fl;
  604. }
  605. static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
  606. {
  607. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  608. struct net *net = seq_file_net(seq);
  609. for_each_fl_continue_rcu(fl) {
  610. if (net_eq(fl->fl_net, net))
  611. goto out;
  612. }
  613. try_again:
  614. if (++state->bucket <= FL_HASH_MASK) {
  615. for_each_fl_rcu(state->bucket, fl) {
  616. if (net_eq(fl->fl_net, net))
  617. goto out;
  618. }
  619. goto try_again;
  620. }
  621. fl = NULL;
  622. out:
  623. return fl;
  624. }
  625. static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
  626. {
  627. struct ip6_flowlabel *fl = ip6fl_get_first(seq);
  628. if (fl)
  629. while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
  630. --pos;
  631. return pos ? NULL : fl;
  632. }
  633. static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
  634. __acquires(RCU)
  635. {
  636. rcu_read_lock_bh();
  637. return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  638. }
  639. static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  640. {
  641. struct ip6_flowlabel *fl;
  642. if (v == SEQ_START_TOKEN)
  643. fl = ip6fl_get_first(seq);
  644. else
  645. fl = ip6fl_get_next(seq, v);
  646. ++*pos;
  647. return fl;
  648. }
  649. static void ip6fl_seq_stop(struct seq_file *seq, void *v)
  650. __releases(RCU)
  651. {
  652. rcu_read_unlock_bh();
  653. }
  654. static int ip6fl_seq_show(struct seq_file *seq, void *v)
  655. {
  656. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  657. if (v == SEQ_START_TOKEN)
  658. seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
  659. "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
  660. else {
  661. struct ip6_flowlabel *fl = v;
  662. seq_printf(seq,
  663. "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
  664. (unsigned int)ntohl(fl->label),
  665. fl->share,
  666. ((fl->share == IPV6_FL_S_PROCESS) ?
  667. pid_nr_ns(fl->owner.pid, state->pid_ns) :
  668. ((fl->share == IPV6_FL_S_USER) ?
  669. from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
  670. 0)),
  671. atomic_read(&fl->users),
  672. fl->linger/HZ,
  673. (long)(fl->expires - jiffies)/HZ,
  674. &fl->dst,
  675. fl->opt ? fl->opt->opt_nflen : 0);
  676. }
  677. return 0;
  678. }
  679. static const struct seq_operations ip6fl_seq_ops = {
  680. .start = ip6fl_seq_start,
  681. .next = ip6fl_seq_next,
  682. .stop = ip6fl_seq_stop,
  683. .show = ip6fl_seq_show,
  684. };
  685. static int ip6fl_seq_open(struct inode *inode, struct file *file)
  686. {
  687. struct seq_file *seq;
  688. struct ip6fl_iter_state *state;
  689. int err;
  690. err = seq_open_net(inode, file, &ip6fl_seq_ops,
  691. sizeof(struct ip6fl_iter_state));
  692. if (!err) {
  693. seq = file->private_data;
  694. state = ip6fl_seq_private(seq);
  695. rcu_read_lock();
  696. state->pid_ns = get_pid_ns(task_active_pid_ns(current));
  697. rcu_read_unlock();
  698. }
  699. return err;
  700. }
  701. static int ip6fl_seq_release(struct inode *inode, struct file *file)
  702. {
  703. struct seq_file *seq = file->private_data;
  704. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  705. put_pid_ns(state->pid_ns);
  706. return seq_release_net(inode, file);
  707. }
  708. static const struct file_operations ip6fl_seq_fops = {
  709. .owner = THIS_MODULE,
  710. .open = ip6fl_seq_open,
  711. .read = seq_read,
  712. .llseek = seq_lseek,
  713. .release = ip6fl_seq_release,
  714. };
  715. static int __net_init ip6_flowlabel_proc_init(struct net *net)
  716. {
  717. if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
  718. &ip6fl_seq_fops))
  719. return -ENOMEM;
  720. return 0;
  721. }
  722. static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
  723. {
  724. remove_proc_entry("ip6_flowlabel", net->proc_net);
  725. }
  726. #else
  727. static inline int ip6_flowlabel_proc_init(struct net *net)
  728. {
  729. return 0;
  730. }
  731. static inline void ip6_flowlabel_proc_fini(struct net *net)
  732. {
  733. }
  734. #endif
  735. static void __net_exit ip6_flowlabel_net_exit(struct net *net)
  736. {
  737. ip6_fl_purge(net);
  738. ip6_flowlabel_proc_fini(net);
  739. }
  740. static struct pernet_operations ip6_flowlabel_net_ops = {
  741. .init = ip6_flowlabel_proc_init,
  742. .exit = ip6_flowlabel_net_exit,
  743. };
  744. int ip6_flowlabel_init(void)
  745. {
  746. return register_pernet_subsys(&ip6_flowlabel_net_ops);
  747. }
  748. void ip6_flowlabel_cleanup(void)
  749. {
  750. del_timer(&ip6_fl_gc_timer);
  751. unregister_pernet_subsys(&ip6_flowlabel_net_ops);
  752. }