ipv6.h 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099
  1. /*
  2. * Linux INET6 implementation
  3. *
  4. * Authors:
  5. * Pedro Roque <roque@di.fc.ul.pt>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #ifndef _NET_IPV6_H
  13. #define _NET_IPV6_H
  14. #include <linux/ipv6.h>
  15. #include <linux/hardirq.h>
  16. #include <linux/jhash.h>
  17. #include <linux/refcount.h>
  18. #include <net/if_inet6.h>
  19. #include <net/ndisc.h>
  20. #include <net/flow.h>
  21. #include <net/flow_dissector.h>
  22. #include <net/snmp.h>
  23. #define SIN6_LEN_RFC2133 24
  24. #define IPV6_MAXPLEN 65535
  25. /*
  26. * NextHeader field of IPv6 header
  27. */
  28. #define NEXTHDR_HOP 0 /* Hop-by-hop option header. */
  29. #define NEXTHDR_TCP 6 /* TCP segment. */
  30. #define NEXTHDR_UDP 17 /* UDP message. */
  31. #define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
  32. #define NEXTHDR_ROUTING 43 /* Routing header. */
  33. #define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */
  34. #define NEXTHDR_GRE 47 /* GRE header. */
  35. #define NEXTHDR_ESP 50 /* Encapsulating security payload. */
  36. #define NEXTHDR_AUTH 51 /* Authentication header. */
  37. #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
  38. #define NEXTHDR_NONE 59 /* No next header */
  39. #define NEXTHDR_DEST 60 /* Destination options header. */
  40. #define NEXTHDR_SCTP 132 /* SCTP message. */
  41. #define NEXTHDR_MOBILITY 135 /* Mobility header. */
  42. #define NEXTHDR_MAX 255
  43. #define IPV6_DEFAULT_HOPLIMIT 64
  44. #define IPV6_DEFAULT_MCASTHOPS 1
  45. /* Limits on Hop-by-Hop and Destination options.
  46. *
  47. * Per RFC8200 there is no limit on the maximum number or lengths of options in
  48. * Hop-by-Hop or Destination options other then the packet must fit in an MTU.
  49. * We allow configurable limits in order to mitigate potential denial of
  50. * service attacks.
  51. *
  52. * There are three limits that may be set:
  53. * - Limit the number of options in a Hop-by-Hop or Destination options
  54. * extension header
  55. * - Limit the byte length of a Hop-by-Hop or Destination options extension
  56. * header
  57. * - Disallow unknown options
  58. *
  59. * The limits are expressed in corresponding sysctls:
  60. *
  61. * ipv6.sysctl.max_dst_opts_cnt
  62. * ipv6.sysctl.max_hbh_opts_cnt
  63. * ipv6.sysctl.max_dst_opts_len
  64. * ipv6.sysctl.max_hbh_opts_len
  65. *
  66. * max_*_opts_cnt is the number of TLVs that are allowed for Destination
  67. * options or Hop-by-Hop options. If the number is less than zero then unknown
  68. * TLVs are disallowed and the number of known options that are allowed is the
  69. * absolute value. Setting the value to INT_MAX indicates no limit.
  70. *
  71. * max_*_opts_len is the length limit in bytes of a Destination or
  72. * Hop-by-Hop options extension header. Setting the value to INT_MAX
  73. * indicates no length limit.
  74. *
  75. * If a limit is exceeded when processing an extension header the packet is
  76. * silently discarded.
  77. */
  78. /* Default limits for Hop-by-Hop and Destination options */
  79. #define IP6_DEFAULT_MAX_DST_OPTS_CNT 8
  80. #define IP6_DEFAULT_MAX_HBH_OPTS_CNT 8
  81. #define IP6_DEFAULT_MAX_DST_OPTS_LEN INT_MAX /* No limit */
  82. #define IP6_DEFAULT_MAX_HBH_OPTS_LEN INT_MAX /* No limit */
  83. /*
  84. * Addr type
  85. *
  86. * type - unicast | multicast
  87. * scope - local | site | global
  88. * v4 - compat
  89. * v4mapped
  90. * any
  91. * loopback
  92. */
  93. #define IPV6_ADDR_ANY 0x0000U
  94. #define IPV6_ADDR_UNICAST 0x0001U
  95. #define IPV6_ADDR_MULTICAST 0x0002U
  96. #define IPV6_ADDR_LOOPBACK 0x0010U
  97. #define IPV6_ADDR_LINKLOCAL 0x0020U
  98. #define IPV6_ADDR_SITELOCAL 0x0040U
  99. #define IPV6_ADDR_COMPATv4 0x0080U
  100. #define IPV6_ADDR_SCOPE_MASK 0x00f0U
  101. #define IPV6_ADDR_MAPPED 0x1000U
  102. /*
  103. * Addr scopes
  104. */
  105. #define IPV6_ADDR_MC_SCOPE(a) \
  106. ((a)->s6_addr[1] & 0x0f) /* nonstandard */
  107. #define __IPV6_ADDR_SCOPE_INVALID -1
  108. #define IPV6_ADDR_SCOPE_NODELOCAL 0x01
  109. #define IPV6_ADDR_SCOPE_LINKLOCAL 0x02
  110. #define IPV6_ADDR_SCOPE_SITELOCAL 0x05
  111. #define IPV6_ADDR_SCOPE_ORGLOCAL 0x08
  112. #define IPV6_ADDR_SCOPE_GLOBAL 0x0e
  113. /*
  114. * Addr flags
  115. */
  116. #define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
  117. ((a)->s6_addr[1] & 0x10)
  118. #define IPV6_ADDR_MC_FLAG_PREFIX(a) \
  119. ((a)->s6_addr[1] & 0x20)
  120. #define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \
  121. ((a)->s6_addr[1] & 0x40)
  122. /*
  123. * fragmentation header
  124. */
  125. struct frag_hdr {
  126. __u8 nexthdr;
  127. __u8 reserved;
  128. __be16 frag_off;
  129. __be32 identification;
  130. };
  131. #define IP6_MF 0x0001
  132. #define IP6_OFFSET 0xFFF8
  133. #define IP6_REPLY_MARK(net, mark) \
  134. ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
  135. #include <net/sock.h>
  136. /* sysctls */
  137. extern int sysctl_mld_max_msf;
  138. extern int sysctl_mld_qrv;
  139. #define _DEVINC(net, statname, mod, idev, field) \
  140. ({ \
  141. struct inet6_dev *_idev = (idev); \
  142. if (likely(_idev != NULL)) \
  143. mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
  144. mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
  145. })
  146. /* per device counters are atomic_long_t */
  147. #define _DEVINCATOMIC(net, statname, mod, idev, field) \
  148. ({ \
  149. struct inet6_dev *_idev = (idev); \
  150. if (likely(_idev != NULL)) \
  151. SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
  152. mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
  153. })
  154. /* per device and per net counters are atomic_long_t */
  155. #define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \
  156. ({ \
  157. struct inet6_dev *_idev = (idev); \
  158. if (likely(_idev != NULL)) \
  159. SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
  160. SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
  161. })
  162. #define _DEVADD(net, statname, mod, idev, field, val) \
  163. ({ \
  164. struct inet6_dev *_idev = (idev); \
  165. if (likely(_idev != NULL)) \
  166. mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
  167. mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
  168. })
  169. #define _DEVUPD(net, statname, mod, idev, field, val) \
  170. ({ \
  171. struct inet6_dev *_idev = (idev); \
  172. if (likely(_idev != NULL)) \
  173. mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
  174. mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
  175. })
  176. /* MIBs */
  177. #define IP6_INC_STATS(net, idev,field) \
  178. _DEVINC(net, ipv6, , idev, field)
  179. #define __IP6_INC_STATS(net, idev,field) \
  180. _DEVINC(net, ipv6, __, idev, field)
  181. #define IP6_ADD_STATS(net, idev,field,val) \
  182. _DEVADD(net, ipv6, , idev, field, val)
  183. #define __IP6_ADD_STATS(net, idev,field,val) \
  184. _DEVADD(net, ipv6, __, idev, field, val)
  185. #define IP6_UPD_PO_STATS(net, idev,field,val) \
  186. _DEVUPD(net, ipv6, , idev, field, val)
  187. #define __IP6_UPD_PO_STATS(net, idev,field,val) \
  188. _DEVUPD(net, ipv6, __, idev, field, val)
  189. #define ICMP6_INC_STATS(net, idev, field) \
  190. _DEVINCATOMIC(net, icmpv6, , idev, field)
  191. #define __ICMP6_INC_STATS(net, idev, field) \
  192. _DEVINCATOMIC(net, icmpv6, __, idev, field)
  193. #define ICMP6MSGOUT_INC_STATS(net, idev, field) \
  194. _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
  195. #define ICMP6MSGIN_INC_STATS(net, idev, field) \
  196. _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
  197. struct ip6_ra_chain {
  198. struct ip6_ra_chain *next;
  199. struct sock *sk;
  200. int sel;
  201. void (*destructor)(struct sock *);
  202. };
  203. extern struct ip6_ra_chain *ip6_ra_chain;
  204. extern rwlock_t ip6_ra_lock;
  205. /*
  206. This structure is prepared by protocol, when parsing
  207. ancillary data and passed to IPv6.
  208. */
  209. struct ipv6_txoptions {
  210. refcount_t refcnt;
  211. /* Length of this structure */
  212. int tot_len;
  213. /* length of extension headers */
  214. __u16 opt_flen; /* after fragment hdr */
  215. __u16 opt_nflen; /* before fragment hdr */
  216. struct ipv6_opt_hdr *hopopt;
  217. struct ipv6_opt_hdr *dst0opt;
  218. struct ipv6_rt_hdr *srcrt; /* Routing Header */
  219. struct ipv6_opt_hdr *dst1opt;
  220. struct rcu_head rcu;
  221. /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
  222. };
  223. struct ip6_flowlabel {
  224. struct ip6_flowlabel __rcu *next;
  225. __be32 label;
  226. atomic_t users;
  227. struct in6_addr dst;
  228. struct ipv6_txoptions *opt;
  229. unsigned long linger;
  230. struct rcu_head rcu;
  231. u8 share;
  232. union {
  233. struct pid *pid;
  234. kuid_t uid;
  235. } owner;
  236. unsigned long lastuse;
  237. unsigned long expires;
  238. struct net *fl_net;
  239. };
  240. #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
  241. #define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
  242. #define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000)
  243. #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
  244. #define IPV6_TCLASS_SHIFT 20
  245. struct ipv6_fl_socklist {
  246. struct ipv6_fl_socklist __rcu *next;
  247. struct ip6_flowlabel *fl;
  248. struct rcu_head rcu;
  249. };
  250. struct ipcm6_cookie {
  251. __s16 hlimit;
  252. __s16 tclass;
  253. __s8 dontfrag;
  254. struct ipv6_txoptions *opt;
  255. };
  256. static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
  257. {
  258. struct ipv6_txoptions *opt;
  259. rcu_read_lock();
  260. opt = rcu_dereference(np->opt);
  261. if (opt) {
  262. if (!refcount_inc_not_zero(&opt->refcnt))
  263. opt = NULL;
  264. else
  265. opt = rcu_pointer_handoff(opt);
  266. }
  267. rcu_read_unlock();
  268. return opt;
  269. }
  270. static inline void txopt_put(struct ipv6_txoptions *opt)
  271. {
  272. if (opt && refcount_dec_and_test(&opt->refcnt))
  273. kfree_rcu(opt, rcu);
  274. }
  275. struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
  276. struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
  277. struct ip6_flowlabel *fl,
  278. struct ipv6_txoptions *fopt);
  279. void fl6_free_socklist(struct sock *sk);
  280. int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
  281. int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
  282. int flags);
  283. int ip6_flowlabel_init(void);
  284. void ip6_flowlabel_cleanup(void);
  285. bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
  286. static inline void fl6_sock_release(struct ip6_flowlabel *fl)
  287. {
  288. if (fl)
  289. atomic_dec(&fl->users);
  290. }
  291. void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
  292. void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
  293. struct icmp6hdr *thdr, int len);
  294. int ip6_ra_control(struct sock *sk, int sel);
  295. int ipv6_parse_hopopts(struct sk_buff *skb);
  296. struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
  297. struct ipv6_txoptions *opt);
  298. struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
  299. struct ipv6_txoptions *opt,
  300. int newtype,
  301. struct ipv6_opt_hdr __user *newopt,
  302. int newoptlen);
  303. struct ipv6_txoptions *
  304. ipv6_renew_options_kern(struct sock *sk,
  305. struct ipv6_txoptions *opt,
  306. int newtype,
  307. struct ipv6_opt_hdr *newopt,
  308. int newoptlen);
  309. struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
  310. struct ipv6_txoptions *opt);
  311. bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
  312. const struct inet6_skb_parm *opt);
  313. struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
  314. struct ipv6_txoptions *opt);
  315. static inline bool ipv6_accept_ra(struct inet6_dev *idev)
  316. {
  317. /* If forwarding is enabled, RA are not accepted unless the special
  318. * hybrid mode (accept_ra=2) is enabled.
  319. */
  320. return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
  321. idev->cnf.accept_ra;
  322. }
  323. #if IS_ENABLED(CONFIG_IPV6)
  324. static inline int ip6_frag_mem(struct net *net)
  325. {
  326. return sum_frag_mem_limit(&net->ipv6.frags);
  327. }
  328. #endif
  329. #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
  330. #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
  331. #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
  332. int __ipv6_addr_type(const struct in6_addr *addr);
  333. static inline int ipv6_addr_type(const struct in6_addr *addr)
  334. {
  335. return __ipv6_addr_type(addr) & 0xffff;
  336. }
  337. static inline int ipv6_addr_scope(const struct in6_addr *addr)
  338. {
  339. return __ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK;
  340. }
  341. static inline int __ipv6_addr_src_scope(int type)
  342. {
  343. return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16);
  344. }
  345. static inline int ipv6_addr_src_scope(const struct in6_addr *addr)
  346. {
  347. return __ipv6_addr_src_scope(__ipv6_addr_type(addr));
  348. }
  349. static inline bool __ipv6_addr_needs_scope_id(int type)
  350. {
  351. return type & IPV6_ADDR_LINKLOCAL ||
  352. (type & IPV6_ADDR_MULTICAST &&
  353. (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)));
  354. }
  355. static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface)
  356. {
  357. return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0;
  358. }
  359. static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2)
  360. {
  361. return memcmp(a1, a2, sizeof(struct in6_addr));
  362. }
  363. static inline bool
  364. ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
  365. const struct in6_addr *a2)
  366. {
  367. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  368. const unsigned long *ul1 = (const unsigned long *)a1;
  369. const unsigned long *ulm = (const unsigned long *)m;
  370. const unsigned long *ul2 = (const unsigned long *)a2;
  371. return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
  372. ((ul1[1] ^ ul2[1]) & ulm[1]));
  373. #else
  374. return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) |
  375. ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) |
  376. ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) |
  377. ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
  378. #endif
  379. }
  380. static inline void ipv6_addr_prefix(struct in6_addr *pfx,
  381. const struct in6_addr *addr,
  382. int plen)
  383. {
  384. /* caller must guarantee 0 <= plen <= 128 */
  385. int o = plen >> 3,
  386. b = plen & 0x7;
  387. memset(pfx->s6_addr, 0, sizeof(pfx->s6_addr));
  388. memcpy(pfx->s6_addr, addr, o);
  389. if (b != 0)
  390. pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b);
  391. }
  392. static inline void ipv6_addr_prefix_copy(struct in6_addr *addr,
  393. const struct in6_addr *pfx,
  394. int plen)
  395. {
  396. /* caller must guarantee 0 <= plen <= 128 */
  397. int o = plen >> 3,
  398. b = plen & 0x7;
  399. memcpy(addr->s6_addr, pfx, o);
  400. if (b != 0) {
  401. addr->s6_addr[o] &= ~(0xff00 >> b);
  402. addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b));
  403. }
  404. }
  405. static inline void __ipv6_addr_set_half(__be32 *addr,
  406. __be32 wh, __be32 wl)
  407. {
  408. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  409. #if defined(__BIG_ENDIAN)
  410. if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) {
  411. *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl));
  412. return;
  413. }
  414. #elif defined(__LITTLE_ENDIAN)
  415. if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) {
  416. *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh));
  417. return;
  418. }
  419. #endif
  420. #endif
  421. addr[0] = wh;
  422. addr[1] = wl;
  423. }
  424. static inline void ipv6_addr_set(struct in6_addr *addr,
  425. __be32 w1, __be32 w2,
  426. __be32 w3, __be32 w4)
  427. {
  428. __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2);
  429. __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4);
  430. }
  431. static inline bool ipv6_addr_equal(const struct in6_addr *a1,
  432. const struct in6_addr *a2)
  433. {
  434. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  435. const unsigned long *ul1 = (const unsigned long *)a1;
  436. const unsigned long *ul2 = (const unsigned long *)a2;
  437. return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
  438. #else
  439. return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
  440. (a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
  441. (a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
  442. (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
  443. #endif
  444. }
  445. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  446. static inline bool __ipv6_prefix_equal64_half(const __be64 *a1,
  447. const __be64 *a2,
  448. unsigned int len)
  449. {
  450. if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len))))
  451. return false;
  452. return true;
  453. }
  454. static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
  455. const struct in6_addr *addr2,
  456. unsigned int prefixlen)
  457. {
  458. const __be64 *a1 = (const __be64 *)addr1;
  459. const __be64 *a2 = (const __be64 *)addr2;
  460. if (prefixlen >= 64) {
  461. if (a1[0] ^ a2[0])
  462. return false;
  463. return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64);
  464. }
  465. return __ipv6_prefix_equal64_half(a1, a2, prefixlen);
  466. }
  467. #else
  468. static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
  469. const struct in6_addr *addr2,
  470. unsigned int prefixlen)
  471. {
  472. const __be32 *a1 = addr1->s6_addr32;
  473. const __be32 *a2 = addr2->s6_addr32;
  474. unsigned int pdw, pbi;
  475. /* check complete u32 in prefix */
  476. pdw = prefixlen >> 5;
  477. if (pdw && memcmp(a1, a2, pdw << 2))
  478. return false;
  479. /* check incomplete u32 in prefix */
  480. pbi = prefixlen & 0x1f;
  481. if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi))))
  482. return false;
  483. return true;
  484. }
  485. #endif
  486. struct inet_frag_queue;
  487. enum ip6_defrag_users {
  488. IP6_DEFRAG_LOCAL_DELIVER,
  489. IP6_DEFRAG_CONNTRACK_IN,
  490. __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
  491. IP6_DEFRAG_CONNTRACK_OUT,
  492. __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
  493. IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
  494. __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
  495. };
  496. struct ip6_create_arg {
  497. __be32 id;
  498. u32 user;
  499. const struct in6_addr *src;
  500. const struct in6_addr *dst;
  501. int iif;
  502. u8 ecn;
  503. };
  504. void ip6_frag_init(struct inet_frag_queue *q, const void *a);
  505. bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
  506. /*
  507. * Equivalent of ipv4 struct ip
  508. */
  509. struct frag_queue {
  510. struct inet_frag_queue q;
  511. __be32 id; /* fragment id */
  512. u32 user;
  513. struct in6_addr saddr;
  514. struct in6_addr daddr;
  515. int iif;
  516. unsigned int csum;
  517. __u16 nhoffset;
  518. u8 ecn;
  519. };
  520. void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
  521. struct inet_frags *frags);
  522. static inline bool ipv6_addr_any(const struct in6_addr *a)
  523. {
  524. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  525. const unsigned long *ul = (const unsigned long *)a;
  526. return (ul[0] | ul[1]) == 0UL;
  527. #else
  528. return (a->s6_addr32[0] | a->s6_addr32[1] |
  529. a->s6_addr32[2] | a->s6_addr32[3]) == 0;
  530. #endif
  531. }
  532. static inline u32 ipv6_addr_hash(const struct in6_addr *a)
  533. {
  534. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  535. const unsigned long *ul = (const unsigned long *)a;
  536. unsigned long x = ul[0] ^ ul[1];
  537. return (u32)(x ^ (x >> 32));
  538. #else
  539. return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^
  540. a->s6_addr32[2] ^ a->s6_addr32[3]);
  541. #endif
  542. }
  543. /* more secured version of ipv6_addr_hash() */
  544. static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
  545. {
  546. u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
  547. return jhash_3words(v,
  548. (__force u32)a->s6_addr32[2],
  549. (__force u32)a->s6_addr32[3],
  550. initval);
  551. }
  552. static inline bool ipv6_addr_loopback(const struct in6_addr *a)
  553. {
  554. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  555. const __be64 *be = (const __be64 *)a;
  556. return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL;
  557. #else
  558. return (a->s6_addr32[0] | a->s6_addr32[1] |
  559. a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0;
  560. #endif
  561. }
  562. /*
  563. * Note that we must __force cast these to unsigned long to make sparse happy,
  564. * since all of the endian-annotated types are fixed size regardless of arch.
  565. */
  566. static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
  567. {
  568. return (
  569. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  570. *(unsigned long *)a |
  571. #else
  572. (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
  573. #endif
  574. (__force unsigned long)(a->s6_addr32[2] ^
  575. cpu_to_be32(0x0000ffff))) == 0UL;
  576. }
  577. /*
  578. * Check for a RFC 4843 ORCHID address
  579. * (Overlay Routable Cryptographic Hash Identifiers)
  580. */
  581. static inline bool ipv6_addr_orchid(const struct in6_addr *a)
  582. {
  583. return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
  584. }
  585. static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
  586. {
  587. return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
  588. }
  589. static inline void ipv6_addr_set_v4mapped(const __be32 addr,
  590. struct in6_addr *v4mapped)
  591. {
  592. ipv6_addr_set(v4mapped,
  593. 0, 0,
  594. htonl(0x0000FFFF),
  595. addr);
  596. }
  597. /*
  598. * find the first different bit between two addresses
  599. * length of address must be a multiple of 32bits
  600. */
  601. static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen)
  602. {
  603. const __be32 *a1 = token1, *a2 = token2;
  604. int i;
  605. addrlen >>= 2;
  606. for (i = 0; i < addrlen; i++) {
  607. __be32 xb = a1[i] ^ a2[i];
  608. if (xb)
  609. return i * 32 + 31 - __fls(ntohl(xb));
  610. }
  611. /*
  612. * we should *never* get to this point since that
  613. * would mean the addrs are equal
  614. *
  615. * However, we do get to it 8) And exacly, when
  616. * addresses are equal 8)
  617. *
  618. * ip route add 1111::/128 via ...
  619. * ip route add 1111::/64 via ...
  620. * and we are here.
  621. *
  622. * Ideally, this function should stop comparison
  623. * at prefix length. It does not, but it is still OK,
  624. * if returned value is greater than prefix length.
  625. * --ANK (980803)
  626. */
  627. return addrlen << 5;
  628. }
  629. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  630. static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen)
  631. {
  632. const __be64 *a1 = token1, *a2 = token2;
  633. int i;
  634. addrlen >>= 3;
  635. for (i = 0; i < addrlen; i++) {
  636. __be64 xb = a1[i] ^ a2[i];
  637. if (xb)
  638. return i * 64 + 63 - __fls(be64_to_cpu(xb));
  639. }
  640. return addrlen << 6;
  641. }
  642. #endif
  643. static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen)
  644. {
  645. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  646. if (__builtin_constant_p(addrlen) && !(addrlen & 7))
  647. return __ipv6_addr_diff64(token1, token2, addrlen);
  648. #endif
  649. return __ipv6_addr_diff32(token1, token2, addrlen);
  650. }
  651. static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2)
  652. {
  653. return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
  654. }
  655. __be32 ipv6_select_ident(struct net *net,
  656. const struct in6_addr *daddr,
  657. const struct in6_addr *saddr);
  658. __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
  659. int ip6_dst_hoplimit(struct dst_entry *dst);
  660. static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
  661. struct dst_entry *dst)
  662. {
  663. int hlimit;
  664. if (ipv6_addr_is_multicast(&fl6->daddr))
  665. hlimit = np->mcast_hops;
  666. else
  667. hlimit = np->hop_limit;
  668. if (hlimit < 0)
  669. hlimit = ip6_dst_hoplimit(dst);
  670. return hlimit;
  671. }
  672. /* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store
  673. * Equivalent to : flow->v6addrs.src = iph->saddr;
  674. * flow->v6addrs.dst = iph->daddr;
  675. */
  676. static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
  677. const struct ipv6hdr *iph)
  678. {
  679. BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
  680. offsetof(typeof(flow->addrs), v6addrs.src) +
  681. sizeof(flow->addrs.v6addrs.src));
  682. memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
  683. flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  684. }
  685. #if IS_ENABLED(CONFIG_IPV6)
  686. /* Sysctl settings for net ipv6.auto_flowlabels */
  687. #define IP6_AUTO_FLOW_LABEL_OFF 0
  688. #define IP6_AUTO_FLOW_LABEL_OPTOUT 1
  689. #define IP6_AUTO_FLOW_LABEL_OPTIN 2
  690. #define IP6_AUTO_FLOW_LABEL_FORCED 3
  691. #define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED
  692. #define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT
  693. static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
  694. __be32 flowlabel, bool autolabel,
  695. struct flowi6 *fl6)
  696. {
  697. u32 hash;
  698. /* @flowlabel may include more than a flow label, eg, the traffic class.
  699. * Here we want only the flow label value.
  700. */
  701. flowlabel &= IPV6_FLOWLABEL_MASK;
  702. if (flowlabel ||
  703. net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
  704. (!autolabel &&
  705. net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
  706. return flowlabel;
  707. hash = skb_get_hash_flowi6(skb, fl6);
  708. /* Since this is being sent on the wire obfuscate hash a bit
  709. * to minimize possbility that any useful information to an
  710. * attacker is leaked. Only lower 20 bits are relevant.
  711. */
  712. rol32(hash, 16);
  713. flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
  714. if (net->ipv6.sysctl.flowlabel_state_ranges)
  715. flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
  716. return flowlabel;
  717. }
  718. static inline int ip6_default_np_autolabel(struct net *net)
  719. {
  720. switch (net->ipv6.sysctl.auto_flowlabels) {
  721. case IP6_AUTO_FLOW_LABEL_OFF:
  722. case IP6_AUTO_FLOW_LABEL_OPTIN:
  723. default:
  724. return 0;
  725. case IP6_AUTO_FLOW_LABEL_OPTOUT:
  726. case IP6_AUTO_FLOW_LABEL_FORCED:
  727. return 1;
  728. }
  729. }
  730. #else
  731. static inline void ip6_set_txhash(struct sock *sk) { }
  732. static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
  733. __be32 flowlabel, bool autolabel,
  734. struct flowi6 *fl6)
  735. {
  736. return flowlabel;
  737. }
  738. static inline int ip6_default_np_autolabel(struct net *net)
  739. {
  740. return 0;
  741. }
  742. #endif
  743. /*
  744. * Header manipulation
  745. */
  746. static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass,
  747. __be32 flowlabel)
  748. {
  749. *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel;
  750. }
  751. static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
  752. {
  753. return *(__be32 *)hdr & IPV6_FLOWINFO_MASK;
  754. }
  755. static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr)
  756. {
  757. return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK;
  758. }
  759. static inline u8 ip6_tclass(__be32 flowinfo)
  760. {
  761. return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
  762. }
  763. static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
  764. {
  765. return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
  766. }
  767. /*
  768. * Prototypes exported by ipv6
  769. */
  770. /*
  771. * rcv function (called from netdevice level)
  772. */
  773. int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
  774. struct packet_type *pt, struct net_device *orig_dev);
  775. int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
  776. /*
  777. * upper-layer output functions
  778. */
  779. int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
  780. __u32 mark, struct ipv6_txoptions *opt, int tclass);
  781. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
  782. int ip6_append_data(struct sock *sk,
  783. int getfrag(void *from, char *to, int offset, int len,
  784. int odd, struct sk_buff *skb),
  785. void *from, int length, int transhdrlen,
  786. struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
  787. struct rt6_info *rt, unsigned int flags,
  788. const struct sockcm_cookie *sockc);
  789. int ip6_push_pending_frames(struct sock *sk);
  790. void ip6_flush_pending_frames(struct sock *sk);
  791. int ip6_send_skb(struct sk_buff *skb);
  792. struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
  793. struct inet_cork_full *cork,
  794. struct inet6_cork *v6_cork);
  795. struct sk_buff *ip6_make_skb(struct sock *sk,
  796. int getfrag(void *from, char *to, int offset,
  797. int len, int odd, struct sk_buff *skb),
  798. void *from, int length, int transhdrlen,
  799. struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
  800. struct rt6_info *rt, unsigned int flags,
  801. const struct sockcm_cookie *sockc);
  802. static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
  803. {
  804. return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork,
  805. &inet6_sk(sk)->cork);
  806. }
  807. int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
  808. struct flowi6 *fl6);
  809. struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
  810. const struct in6_addr *final_dst);
  811. struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
  812. const struct in6_addr *final_dst);
  813. struct dst_entry *ip6_blackhole_route(struct net *net,
  814. struct dst_entry *orig_dst);
  815. /*
  816. * skb processing functions
  817. */
  818. int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
  819. int ip6_forward(struct sk_buff *skb);
  820. int ip6_input(struct sk_buff *skb);
  821. int ip6_mc_input(struct sk_buff *skb);
  822. int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
  823. int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
  824. /*
  825. * Extension header (options) processing
  826. */
  827. void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  828. u8 *proto, struct in6_addr **daddr_p,
  829. struct in6_addr *saddr);
  830. void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  831. u8 *proto);
  832. int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
  833. __be16 *frag_offp);
  834. bool ipv6_ext_hdr(u8 nexthdr);
  835. enum {
  836. IP6_FH_F_FRAG = (1 << 0),
  837. IP6_FH_F_AUTH = (1 << 1),
  838. IP6_FH_F_SKIP_RH = (1 << 2),
  839. };
  840. /* find specified header and get offset to it */
  841. int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
  842. unsigned short *fragoff, int *fragflg);
  843. int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type);
  844. struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
  845. const struct ipv6_txoptions *opt,
  846. struct in6_addr *orig);
  847. /*
  848. * socket options (ipv6_sockglue.c)
  849. */
  850. int ipv6_setsockopt(struct sock *sk, int level, int optname,
  851. char __user *optval, unsigned int optlen);
  852. int ipv6_getsockopt(struct sock *sk, int level, int optname,
  853. char __user *optval, int __user *optlen);
  854. int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
  855. char __user *optval, unsigned int optlen);
  856. int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
  857. char __user *optval, int __user *optlen);
  858. int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
  859. int addr_len);
  860. int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
  861. int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
  862. int addr_len);
  863. int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
  864. void ip6_datagram_release_cb(struct sock *sk);
  865. int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
  866. int *addr_len);
  867. int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
  868. int *addr_len);
  869. void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
  870. u32 info, u8 *payload);
  871. void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
  872. void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
  873. int inet6_release(struct socket *sock);
  874. int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
  875. int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
  876. int peer);
  877. int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
  878. int inet6_hash_connect(struct inet_timewait_death_row *death_row,
  879. struct sock *sk);
  880. /*
  881. * reassembly.c
  882. */
  883. extern const struct proto_ops inet6_stream_ops;
  884. extern const struct proto_ops inet6_dgram_ops;
  885. extern const struct proto_ops inet6_sockraw_ops;
  886. struct group_source_req;
  887. struct group_filter;
  888. int ip6_mc_source(int add, int omode, struct sock *sk,
  889. struct group_source_req *pgsr);
  890. int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
  891. int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
  892. struct group_filter __user *optval, int __user *optlen);
  893. #ifdef CONFIG_PROC_FS
  894. int ac6_proc_init(struct net *net);
  895. void ac6_proc_exit(struct net *net);
  896. int raw6_proc_init(void);
  897. void raw6_proc_exit(void);
  898. int tcp6_proc_init(struct net *net);
  899. void tcp6_proc_exit(struct net *net);
  900. int udp6_proc_init(struct net *net);
  901. void udp6_proc_exit(struct net *net);
  902. int udplite6_proc_init(void);
  903. void udplite6_proc_exit(void);
  904. int ipv6_misc_proc_init(void);
  905. void ipv6_misc_proc_exit(void);
  906. int snmp6_register_dev(struct inet6_dev *idev);
  907. int snmp6_unregister_dev(struct inet6_dev *idev);
  908. #else
  909. static inline int ac6_proc_init(struct net *net) { return 0; }
  910. static inline void ac6_proc_exit(struct net *net) { }
  911. static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; }
  912. static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
  913. #endif
  914. #ifdef CONFIG_SYSCTL
  915. extern struct ctl_table ipv6_route_table_template[];
  916. struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
  917. struct ctl_table *ipv6_route_sysctl_init(struct net *net);
  918. int ipv6_sysctl_register(void);
  919. void ipv6_sysctl_unregister(void);
  920. #endif
  921. int ipv6_sock_mc_join(struct sock *sk, int ifindex,
  922. const struct in6_addr *addr);
  923. int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
  924. const struct in6_addr *addr);
  925. #endif /* _NET_IPV6_H */