fib_rules.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989
  1. /*
  2. * net/core/fib_rules.c Generic Routing Rules
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation, version 2.
  7. *
  8. * Authors: Thomas Graf <tgraf@suug.ch>
  9. */
  10. #include <linux/types.h>
  11. #include <linux/kernel.h>
  12. #include <linux/slab.h>
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <net/net_namespace.h>
  16. #include <net/sock.h>
  17. #include <net/fib_rules.h>
  18. #include <net/ip_tunnels.h>
  19. static const struct fib_kuid_range fib_kuid_range_unset = {
  20. KUIDT_INIT(0),
  21. KUIDT_INIT(~0),
  22. };
  23. bool fib_rule_matchall(const struct fib_rule *rule)
  24. {
  25. if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
  26. rule->flags)
  27. return false;
  28. if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
  29. return false;
  30. if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
  31. !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
  32. return false;
  33. return true;
  34. }
  35. EXPORT_SYMBOL_GPL(fib_rule_matchall);
  36. int fib_default_rule_add(struct fib_rules_ops *ops,
  37. u32 pref, u32 table, u32 flags)
  38. {
  39. struct fib_rule *r;
  40. r = kzalloc(ops->rule_size, GFP_KERNEL);
  41. if (r == NULL)
  42. return -ENOMEM;
  43. refcount_set(&r->refcnt, 1);
  44. r->action = FR_ACT_TO_TBL;
  45. r->pref = pref;
  46. r->table = table;
  47. r->flags = flags;
  48. r->fr_net = ops->fro_net;
  49. r->uid_range = fib_kuid_range_unset;
  50. r->suppress_prefixlen = -1;
  51. r->suppress_ifgroup = -1;
  52. /* The lock is not required here, the list in unreacheable
  53. * at the moment this function is called */
  54. list_add_tail(&r->list, &ops->rules_list);
  55. return 0;
  56. }
  57. EXPORT_SYMBOL(fib_default_rule_add);
  58. static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
  59. {
  60. struct list_head *pos;
  61. struct fib_rule *rule;
  62. if (!list_empty(&ops->rules_list)) {
  63. pos = ops->rules_list.next;
  64. if (pos->next != &ops->rules_list) {
  65. rule = list_entry(pos->next, struct fib_rule, list);
  66. if (rule->pref)
  67. return rule->pref - 1;
  68. }
  69. }
  70. return 0;
  71. }
  72. static void notify_rule_change(int event, struct fib_rule *rule,
  73. struct fib_rules_ops *ops, struct nlmsghdr *nlh,
  74. u32 pid);
  75. static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
  76. {
  77. struct fib_rules_ops *ops;
  78. rcu_read_lock();
  79. list_for_each_entry_rcu(ops, &net->rules_ops, list) {
  80. if (ops->family == family) {
  81. if (!try_module_get(ops->owner))
  82. ops = NULL;
  83. rcu_read_unlock();
  84. return ops;
  85. }
  86. }
  87. rcu_read_unlock();
  88. return NULL;
  89. }
  90. static void rules_ops_put(struct fib_rules_ops *ops)
  91. {
  92. if (ops)
  93. module_put(ops->owner);
  94. }
  95. static void flush_route_cache(struct fib_rules_ops *ops)
  96. {
  97. if (ops->flush_cache)
  98. ops->flush_cache(ops);
  99. }
  100. static int __fib_rules_register(struct fib_rules_ops *ops)
  101. {
  102. int err = -EEXIST;
  103. struct fib_rules_ops *o;
  104. struct net *net;
  105. net = ops->fro_net;
  106. if (ops->rule_size < sizeof(struct fib_rule))
  107. return -EINVAL;
  108. if (ops->match == NULL || ops->configure == NULL ||
  109. ops->compare == NULL || ops->fill == NULL ||
  110. ops->action == NULL)
  111. return -EINVAL;
  112. spin_lock(&net->rules_mod_lock);
  113. list_for_each_entry(o, &net->rules_ops, list)
  114. if (ops->family == o->family)
  115. goto errout;
  116. list_add_tail_rcu(&ops->list, &net->rules_ops);
  117. err = 0;
  118. errout:
  119. spin_unlock(&net->rules_mod_lock);
  120. return err;
  121. }
  122. struct fib_rules_ops *
  123. fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
  124. {
  125. struct fib_rules_ops *ops;
  126. int err;
  127. ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
  128. if (ops == NULL)
  129. return ERR_PTR(-ENOMEM);
  130. INIT_LIST_HEAD(&ops->rules_list);
  131. ops->fro_net = net;
  132. err = __fib_rules_register(ops);
  133. if (err) {
  134. kfree(ops);
  135. ops = ERR_PTR(err);
  136. }
  137. return ops;
  138. }
  139. EXPORT_SYMBOL_GPL(fib_rules_register);
  140. static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
  141. {
  142. struct fib_rule *rule, *tmp;
  143. list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
  144. list_del_rcu(&rule->list);
  145. if (ops->delete)
  146. ops->delete(rule);
  147. fib_rule_put(rule);
  148. }
  149. }
  150. void fib_rules_unregister(struct fib_rules_ops *ops)
  151. {
  152. struct net *net = ops->fro_net;
  153. spin_lock(&net->rules_mod_lock);
  154. list_del_rcu(&ops->list);
  155. spin_unlock(&net->rules_mod_lock);
  156. fib_rules_cleanup_ops(ops);
  157. kfree_rcu(ops, rcu);
  158. }
  159. EXPORT_SYMBOL_GPL(fib_rules_unregister);
  160. static int uid_range_set(struct fib_kuid_range *range)
  161. {
  162. return uid_valid(range->start) && uid_valid(range->end);
  163. }
  164. static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
  165. {
  166. struct fib_rule_uid_range *in;
  167. struct fib_kuid_range out;
  168. in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
  169. out.start = make_kuid(current_user_ns(), in->start);
  170. out.end = make_kuid(current_user_ns(), in->end);
  171. return out;
  172. }
  173. static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
  174. {
  175. struct fib_rule_uid_range out = {
  176. from_kuid_munged(current_user_ns(), range->start),
  177. from_kuid_munged(current_user_ns(), range->end)
  178. };
  179. return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
  180. }
  181. static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
  182. struct flowi *fl, int flags,
  183. struct fib_lookup_arg *arg)
  184. {
  185. int ret = 0;
  186. if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
  187. goto out;
  188. if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
  189. goto out;
  190. if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
  191. goto out;
  192. if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
  193. goto out;
  194. if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
  195. goto out;
  196. if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
  197. uid_gt(fl->flowi_uid, rule->uid_range.end))
  198. goto out;
  199. ret = ops->match(rule, fl, flags);
  200. out:
  201. return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
  202. }
  203. int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
  204. int flags, struct fib_lookup_arg *arg)
  205. {
  206. struct fib_rule *rule;
  207. int err;
  208. rcu_read_lock();
  209. list_for_each_entry_rcu(rule, &ops->rules_list, list) {
  210. jumped:
  211. if (!fib_rule_match(rule, ops, fl, flags, arg))
  212. continue;
  213. if (rule->action == FR_ACT_GOTO) {
  214. struct fib_rule *target;
  215. target = rcu_dereference(rule->ctarget);
  216. if (target == NULL) {
  217. continue;
  218. } else {
  219. rule = target;
  220. goto jumped;
  221. }
  222. } else if (rule->action == FR_ACT_NOP)
  223. continue;
  224. else
  225. err = ops->action(rule, fl, flags, arg);
  226. if (!err && ops->suppress && ops->suppress(rule, arg))
  227. continue;
  228. if (err != -EAGAIN) {
  229. if ((arg->flags & FIB_LOOKUP_NOREF) ||
  230. likely(refcount_inc_not_zero(&rule->refcnt))) {
  231. arg->rule = rule;
  232. goto out;
  233. }
  234. break;
  235. }
  236. }
  237. err = -ESRCH;
  238. out:
  239. rcu_read_unlock();
  240. return err;
  241. }
  242. EXPORT_SYMBOL_GPL(fib_rules_lookup);
  243. static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
  244. struct fib_rules_ops *ops)
  245. {
  246. int err = -EINVAL;
  247. if (frh->src_len)
  248. if (tb[FRA_SRC] == NULL ||
  249. frh->src_len > (ops->addr_size * 8) ||
  250. nla_len(tb[FRA_SRC]) != ops->addr_size)
  251. goto errout;
  252. if (frh->dst_len)
  253. if (tb[FRA_DST] == NULL ||
  254. frh->dst_len > (ops->addr_size * 8) ||
  255. nla_len(tb[FRA_DST]) != ops->addr_size)
  256. goto errout;
  257. err = 0;
  258. errout:
  259. return err;
  260. }
  261. static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
  262. struct nlattr **tb, struct fib_rule *rule)
  263. {
  264. struct fib_rule *r;
  265. list_for_each_entry(r, &ops->rules_list, list) {
  266. if (r->action != rule->action)
  267. continue;
  268. if (r->table != rule->table)
  269. continue;
  270. if (r->pref != rule->pref)
  271. continue;
  272. if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
  273. continue;
  274. if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
  275. continue;
  276. if (r->mark != rule->mark)
  277. continue;
  278. if (r->mark_mask != rule->mark_mask)
  279. continue;
  280. if (r->tun_id != rule->tun_id)
  281. continue;
  282. if (r->fr_net != rule->fr_net)
  283. continue;
  284. if (r->l3mdev != rule->l3mdev)
  285. continue;
  286. if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
  287. !uid_eq(r->uid_range.end, rule->uid_range.end))
  288. continue;
  289. if (!ops->compare(r, frh, tb))
  290. continue;
  291. return 1;
  292. }
  293. return 0;
  294. }
  295. int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
  296. struct netlink_ext_ack *extack)
  297. {
  298. struct net *net = sock_net(skb->sk);
  299. struct fib_rule_hdr *frh = nlmsg_data(nlh);
  300. struct fib_rules_ops *ops = NULL;
  301. struct fib_rule *rule, *r, *last = NULL;
  302. struct nlattr *tb[FRA_MAX+1];
  303. int err = -EINVAL, unresolved = 0;
  304. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
  305. goto errout;
  306. ops = lookup_rules_ops(net, frh->family);
  307. if (ops == NULL) {
  308. err = -EAFNOSUPPORT;
  309. goto errout;
  310. }
  311. err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
  312. if (err < 0)
  313. goto errout;
  314. err = validate_rulemsg(frh, tb, ops);
  315. if (err < 0)
  316. goto errout;
  317. rule = kzalloc(ops->rule_size, GFP_KERNEL);
  318. if (rule == NULL) {
  319. err = -ENOMEM;
  320. goto errout;
  321. }
  322. refcount_set(&rule->refcnt, 1);
  323. rule->fr_net = net;
  324. rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
  325. : fib_default_rule_pref(ops);
  326. if (tb[FRA_IIFNAME]) {
  327. struct net_device *dev;
  328. rule->iifindex = -1;
  329. nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
  330. dev = __dev_get_by_name(net, rule->iifname);
  331. if (dev)
  332. rule->iifindex = dev->ifindex;
  333. }
  334. if (tb[FRA_OIFNAME]) {
  335. struct net_device *dev;
  336. rule->oifindex = -1;
  337. nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
  338. dev = __dev_get_by_name(net, rule->oifname);
  339. if (dev)
  340. rule->oifindex = dev->ifindex;
  341. }
  342. if (tb[FRA_FWMARK]) {
  343. rule->mark = nla_get_u32(tb[FRA_FWMARK]);
  344. if (rule->mark)
  345. /* compatibility: if the mark value is non-zero all bits
  346. * are compared unless a mask is explicitly specified.
  347. */
  348. rule->mark_mask = 0xFFFFFFFF;
  349. }
  350. if (tb[FRA_FWMASK])
  351. rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
  352. if (tb[FRA_TUN_ID])
  353. rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
  354. err = -EINVAL;
  355. if (tb[FRA_L3MDEV]) {
  356. #ifdef CONFIG_NET_L3_MASTER_DEV
  357. rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]);
  358. if (rule->l3mdev != 1)
  359. #endif
  360. goto errout_free;
  361. }
  362. rule->action = frh->action;
  363. rule->flags = frh->flags;
  364. rule->table = frh_get_table(frh, tb);
  365. if (tb[FRA_SUPPRESS_PREFIXLEN])
  366. rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
  367. else
  368. rule->suppress_prefixlen = -1;
  369. if (tb[FRA_SUPPRESS_IFGROUP])
  370. rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
  371. else
  372. rule->suppress_ifgroup = -1;
  373. if (tb[FRA_GOTO]) {
  374. if (rule->action != FR_ACT_GOTO)
  375. goto errout_free;
  376. rule->target = nla_get_u32(tb[FRA_GOTO]);
  377. /* Backward jumps are prohibited to avoid endless loops */
  378. if (rule->target <= rule->pref)
  379. goto errout_free;
  380. list_for_each_entry(r, &ops->rules_list, list) {
  381. if (r->pref == rule->target) {
  382. RCU_INIT_POINTER(rule->ctarget, r);
  383. break;
  384. }
  385. }
  386. if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
  387. unresolved = 1;
  388. } else if (rule->action == FR_ACT_GOTO)
  389. goto errout_free;
  390. if (rule->l3mdev && rule->table)
  391. goto errout_free;
  392. if (tb[FRA_UID_RANGE]) {
  393. if (current_user_ns() != net->user_ns) {
  394. err = -EPERM;
  395. goto errout_free;
  396. }
  397. rule->uid_range = nla_get_kuid_range(tb);
  398. if (!uid_range_set(&rule->uid_range) ||
  399. !uid_lte(rule->uid_range.start, rule->uid_range.end))
  400. goto errout_free;
  401. } else {
  402. rule->uid_range = fib_kuid_range_unset;
  403. }
  404. if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
  405. rule_exists(ops, frh, tb, rule)) {
  406. err = -EEXIST;
  407. goto errout_free;
  408. }
  409. err = ops->configure(rule, skb, frh, tb);
  410. if (err < 0)
  411. goto errout_free;
  412. list_for_each_entry(r, &ops->rules_list, list) {
  413. if (r->pref > rule->pref)
  414. break;
  415. last = r;
  416. }
  417. if (last)
  418. list_add_rcu(&rule->list, &last->list);
  419. else
  420. list_add_rcu(&rule->list, &ops->rules_list);
  421. if (ops->unresolved_rules) {
  422. /*
  423. * There are unresolved goto rules in the list, check if
  424. * any of them are pointing to this new rule.
  425. */
  426. list_for_each_entry(r, &ops->rules_list, list) {
  427. if (r->action == FR_ACT_GOTO &&
  428. r->target == rule->pref &&
  429. rtnl_dereference(r->ctarget) == NULL) {
  430. rcu_assign_pointer(r->ctarget, rule);
  431. if (--ops->unresolved_rules == 0)
  432. break;
  433. }
  434. }
  435. }
  436. if (rule->action == FR_ACT_GOTO)
  437. ops->nr_goto_rules++;
  438. if (unresolved)
  439. ops->unresolved_rules++;
  440. if (rule->tun_id)
  441. ip_tunnel_need_metadata();
  442. notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
  443. flush_route_cache(ops);
  444. rules_ops_put(ops);
  445. return 0;
  446. errout_free:
  447. kfree(rule);
  448. errout:
  449. rules_ops_put(ops);
  450. return err;
  451. }
  452. EXPORT_SYMBOL_GPL(fib_nl_newrule);
  453. int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
  454. struct netlink_ext_ack *extack)
  455. {
  456. struct net *net = sock_net(skb->sk);
  457. struct fib_rule_hdr *frh = nlmsg_data(nlh);
  458. struct fib_rules_ops *ops = NULL;
  459. struct fib_rule *rule, *r;
  460. struct nlattr *tb[FRA_MAX+1];
  461. struct fib_kuid_range range;
  462. int err = -EINVAL;
  463. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
  464. goto errout;
  465. ops = lookup_rules_ops(net, frh->family);
  466. if (ops == NULL) {
  467. err = -EAFNOSUPPORT;
  468. goto errout;
  469. }
  470. err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
  471. if (err < 0)
  472. goto errout;
  473. err = validate_rulemsg(frh, tb, ops);
  474. if (err < 0)
  475. goto errout;
  476. if (tb[FRA_UID_RANGE]) {
  477. range = nla_get_kuid_range(tb);
  478. if (!uid_range_set(&range)) {
  479. err = -EINVAL;
  480. goto errout;
  481. }
  482. } else {
  483. range = fib_kuid_range_unset;
  484. }
  485. list_for_each_entry(rule, &ops->rules_list, list) {
  486. if (frh->action && (frh->action != rule->action))
  487. continue;
  488. if (frh_get_table(frh, tb) &&
  489. (frh_get_table(frh, tb) != rule->table))
  490. continue;
  491. if (tb[FRA_PRIORITY] &&
  492. (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
  493. continue;
  494. if (tb[FRA_IIFNAME] &&
  495. nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
  496. continue;
  497. if (tb[FRA_OIFNAME] &&
  498. nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
  499. continue;
  500. if (tb[FRA_FWMARK] &&
  501. (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
  502. continue;
  503. if (tb[FRA_FWMASK] &&
  504. (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
  505. continue;
  506. if (tb[FRA_TUN_ID] &&
  507. (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
  508. continue;
  509. if (tb[FRA_L3MDEV] &&
  510. (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV])))
  511. continue;
  512. if (uid_range_set(&range) &&
  513. (!uid_eq(rule->uid_range.start, range.start) ||
  514. !uid_eq(rule->uid_range.end, range.end)))
  515. continue;
  516. if (!ops->compare(rule, frh, tb))
  517. continue;
  518. if (rule->flags & FIB_RULE_PERMANENT) {
  519. err = -EPERM;
  520. goto errout;
  521. }
  522. if (ops->delete) {
  523. err = ops->delete(rule);
  524. if (err)
  525. goto errout;
  526. }
  527. if (rule->tun_id)
  528. ip_tunnel_unneed_metadata();
  529. list_del_rcu(&rule->list);
  530. if (rule->action == FR_ACT_GOTO) {
  531. ops->nr_goto_rules--;
  532. if (rtnl_dereference(rule->ctarget) == NULL)
  533. ops->unresolved_rules--;
  534. }
  535. /*
  536. * Check if this rule is a target to any of them. If so,
  537. * adjust to the next one with the same preference or
  538. * disable them. As this operation is eventually very
  539. * expensive, it is only performed if goto rules, except
  540. * current if it is goto rule, have actually been added.
  541. */
  542. if (ops->nr_goto_rules > 0) {
  543. struct fib_rule *n;
  544. n = list_next_entry(rule, list);
  545. if (&n->list == &ops->rules_list || n->pref != rule->pref)
  546. n = NULL;
  547. list_for_each_entry(r, &ops->rules_list, list) {
  548. if (rtnl_dereference(r->ctarget) != rule)
  549. continue;
  550. rcu_assign_pointer(r->ctarget, n);
  551. if (!n)
  552. ops->unresolved_rules++;
  553. }
  554. }
  555. notify_rule_change(RTM_DELRULE, rule, ops, nlh,
  556. NETLINK_CB(skb).portid);
  557. fib_rule_put(rule);
  558. flush_route_cache(ops);
  559. rules_ops_put(ops);
  560. return 0;
  561. }
  562. err = -ENOENT;
  563. errout:
  564. rules_ops_put(ops);
  565. return err;
  566. }
  567. EXPORT_SYMBOL_GPL(fib_nl_delrule);
  568. static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
  569. struct fib_rule *rule)
  570. {
  571. size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
  572. + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
  573. + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
  574. + nla_total_size(4) /* FRA_PRIORITY */
  575. + nla_total_size(4) /* FRA_TABLE */
  576. + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
  577. + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
  578. + nla_total_size(4) /* FRA_FWMARK */
  579. + nla_total_size(4) /* FRA_FWMASK */
  580. + nla_total_size_64bit(8) /* FRA_TUN_ID */
  581. + nla_total_size(sizeof(struct fib_kuid_range));
  582. if (ops->nlmsg_payload)
  583. payload += ops->nlmsg_payload(rule);
  584. return payload;
  585. }
  586. static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
  587. u32 pid, u32 seq, int type, int flags,
  588. struct fib_rules_ops *ops)
  589. {
  590. struct nlmsghdr *nlh;
  591. struct fib_rule_hdr *frh;
  592. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
  593. if (nlh == NULL)
  594. return -EMSGSIZE;
  595. frh = nlmsg_data(nlh);
  596. frh->family = ops->family;
  597. frh->table = rule->table;
  598. if (nla_put_u32(skb, FRA_TABLE, rule->table))
  599. goto nla_put_failure;
  600. if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
  601. goto nla_put_failure;
  602. frh->res1 = 0;
  603. frh->res2 = 0;
  604. frh->action = rule->action;
  605. frh->flags = rule->flags;
  606. if (rule->action == FR_ACT_GOTO &&
  607. rcu_access_pointer(rule->ctarget) == NULL)
  608. frh->flags |= FIB_RULE_UNRESOLVED;
  609. if (rule->iifname[0]) {
  610. if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
  611. goto nla_put_failure;
  612. if (rule->iifindex == -1)
  613. frh->flags |= FIB_RULE_IIF_DETACHED;
  614. }
  615. if (rule->oifname[0]) {
  616. if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
  617. goto nla_put_failure;
  618. if (rule->oifindex == -1)
  619. frh->flags |= FIB_RULE_OIF_DETACHED;
  620. }
  621. if ((rule->pref &&
  622. nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
  623. (rule->mark &&
  624. nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
  625. ((rule->mark_mask || rule->mark) &&
  626. nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
  627. (rule->target &&
  628. nla_put_u32(skb, FRA_GOTO, rule->target)) ||
  629. (rule->tun_id &&
  630. nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
  631. (rule->l3mdev &&
  632. nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
  633. (uid_range_set(&rule->uid_range) &&
  634. nla_put_uid_range(skb, &rule->uid_range)))
  635. goto nla_put_failure;
  636. if (rule->suppress_ifgroup != -1) {
  637. if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
  638. goto nla_put_failure;
  639. }
  640. if (ops->fill(rule, skb, frh) < 0)
  641. goto nla_put_failure;
  642. nlmsg_end(skb, nlh);
  643. return 0;
  644. nla_put_failure:
  645. nlmsg_cancel(skb, nlh);
  646. return -EMSGSIZE;
  647. }
  648. static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
  649. struct fib_rules_ops *ops)
  650. {
  651. int idx = 0;
  652. struct fib_rule *rule;
  653. int err = 0;
  654. rcu_read_lock();
  655. list_for_each_entry_rcu(rule, &ops->rules_list, list) {
  656. if (idx < cb->args[1])
  657. goto skip;
  658. err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
  659. cb->nlh->nlmsg_seq, RTM_NEWRULE,
  660. NLM_F_MULTI, ops);
  661. if (err)
  662. break;
  663. skip:
  664. idx++;
  665. }
  666. rcu_read_unlock();
  667. cb->args[1] = idx;
  668. rules_ops_put(ops);
  669. return err;
  670. }
  671. static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
  672. {
  673. struct net *net = sock_net(skb->sk);
  674. struct fib_rules_ops *ops;
  675. int idx = 0, family;
  676. family = rtnl_msg_family(cb->nlh);
  677. if (family != AF_UNSPEC) {
  678. /* Protocol specific dump request */
  679. ops = lookup_rules_ops(net, family);
  680. if (ops == NULL)
  681. return -EAFNOSUPPORT;
  682. dump_rules(skb, cb, ops);
  683. return skb->len;
  684. }
  685. rcu_read_lock();
  686. list_for_each_entry_rcu(ops, &net->rules_ops, list) {
  687. if (idx < cb->args[0] || !try_module_get(ops->owner))
  688. goto skip;
  689. if (dump_rules(skb, cb, ops) < 0)
  690. break;
  691. cb->args[1] = 0;
  692. skip:
  693. idx++;
  694. }
  695. rcu_read_unlock();
  696. cb->args[0] = idx;
  697. return skb->len;
  698. }
  699. static void notify_rule_change(int event, struct fib_rule *rule,
  700. struct fib_rules_ops *ops, struct nlmsghdr *nlh,
  701. u32 pid)
  702. {
  703. struct net *net;
  704. struct sk_buff *skb;
  705. int err = -ENOBUFS;
  706. net = ops->fro_net;
  707. skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
  708. if (skb == NULL)
  709. goto errout;
  710. err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
  711. if (err < 0) {
  712. /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
  713. WARN_ON(err == -EMSGSIZE);
  714. kfree_skb(skb);
  715. goto errout;
  716. }
  717. rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
  718. return;
  719. errout:
  720. if (err < 0)
  721. rtnl_set_sk_err(net, ops->nlgroup, err);
  722. }
  723. static void attach_rules(struct list_head *rules, struct net_device *dev)
  724. {
  725. struct fib_rule *rule;
  726. list_for_each_entry(rule, rules, list) {
  727. if (rule->iifindex == -1 &&
  728. strcmp(dev->name, rule->iifname) == 0)
  729. rule->iifindex = dev->ifindex;
  730. if (rule->oifindex == -1 &&
  731. strcmp(dev->name, rule->oifname) == 0)
  732. rule->oifindex = dev->ifindex;
  733. }
  734. }
  735. static void detach_rules(struct list_head *rules, struct net_device *dev)
  736. {
  737. struct fib_rule *rule;
  738. list_for_each_entry(rule, rules, list) {
  739. if (rule->iifindex == dev->ifindex)
  740. rule->iifindex = -1;
  741. if (rule->oifindex == dev->ifindex)
  742. rule->oifindex = -1;
  743. }
  744. }
  745. static int fib_rules_event(struct notifier_block *this, unsigned long event,
  746. void *ptr)
  747. {
  748. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  749. struct net *net = dev_net(dev);
  750. struct fib_rules_ops *ops;
  751. ASSERT_RTNL();
  752. switch (event) {
  753. case NETDEV_REGISTER:
  754. list_for_each_entry(ops, &net->rules_ops, list)
  755. attach_rules(&ops->rules_list, dev);
  756. break;
  757. case NETDEV_CHANGENAME:
  758. list_for_each_entry(ops, &net->rules_ops, list) {
  759. detach_rules(&ops->rules_list, dev);
  760. attach_rules(&ops->rules_list, dev);
  761. }
  762. break;
  763. case NETDEV_UNREGISTER:
  764. list_for_each_entry(ops, &net->rules_ops, list)
  765. detach_rules(&ops->rules_list, dev);
  766. break;
  767. }
  768. return NOTIFY_DONE;
  769. }
  770. static struct notifier_block fib_rules_notifier = {
  771. .notifier_call = fib_rules_event,
  772. };
  773. static int __net_init fib_rules_net_init(struct net *net)
  774. {
  775. INIT_LIST_HEAD(&net->rules_ops);
  776. spin_lock_init(&net->rules_mod_lock);
  777. return 0;
  778. }
  779. static struct pernet_operations fib_rules_net_ops = {
  780. .init = fib_rules_net_init,
  781. };
  782. static int __init fib_rules_init(void)
  783. {
  784. int err;
  785. rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
  786. rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
  787. rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
  788. err = register_pernet_subsys(&fib_rules_net_ops);
  789. if (err < 0)
  790. goto fail;
  791. err = register_netdevice_notifier(&fib_rules_notifier);
  792. if (err < 0)
  793. goto fail_unregister;
  794. return 0;
  795. fail_unregister:
  796. unregister_pernet_subsys(&fib_rules_net_ops);
  797. fail:
  798. rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
  799. rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
  800. rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
  801. return err;
  802. }
  803. subsys_initcall(fib_rules_init);