ipv6.h 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109
  1. /*
  2. * Linux INET6 implementation
  3. *
  4. * Authors:
  5. * Pedro Roque <roque@di.fc.ul.pt>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #ifndef _NET_IPV6_H
  13. #define _NET_IPV6_H
  14. #include <linux/ipv6.h>
  15. #include <linux/hardirq.h>
  16. #include <linux/jhash.h>
  17. #include <linux/refcount.h>
  18. #include <net/if_inet6.h>
  19. #include <net/ndisc.h>
  20. #include <net/flow.h>
  21. #include <net/flow_dissector.h>
  22. #include <net/snmp.h>
  23. #include <net/netns/hash.h>
  24. #define SIN6_LEN_RFC2133 24
  25. #define IPV6_MAXPLEN 65535
  26. /*
  27. * NextHeader field of IPv6 header
  28. */
  29. #define NEXTHDR_HOP 0 /* Hop-by-hop option header. */
  30. #define NEXTHDR_TCP 6 /* TCP segment. */
  31. #define NEXTHDR_UDP 17 /* UDP message. */
  32. #define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
  33. #define NEXTHDR_ROUTING 43 /* Routing header. */
  34. #define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */
  35. #define NEXTHDR_GRE 47 /* GRE header. */
  36. #define NEXTHDR_ESP 50 /* Encapsulating security payload. */
  37. #define NEXTHDR_AUTH 51 /* Authentication header. */
  38. #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
  39. #define NEXTHDR_NONE 59 /* No next header */
  40. #define NEXTHDR_DEST 60 /* Destination options header. */
  41. #define NEXTHDR_SCTP 132 /* SCTP message. */
  42. #define NEXTHDR_MOBILITY 135 /* Mobility header. */
  43. #define NEXTHDR_MAX 255
  44. #define IPV6_DEFAULT_HOPLIMIT 64
  45. #define IPV6_DEFAULT_MCASTHOPS 1
  46. /* Limits on Hop-by-Hop and Destination options.
  47. *
  48. * Per RFC8200 there is no limit on the maximum number or lengths of options in
  49. * Hop-by-Hop or Destination options other then the packet must fit in an MTU.
  50. * We allow configurable limits in order to mitigate potential denial of
  51. * service attacks.
  52. *
  53. * There are three limits that may be set:
  54. * - Limit the number of options in a Hop-by-Hop or Destination options
  55. * extension header
  56. * - Limit the byte length of a Hop-by-Hop or Destination options extension
  57. * header
  58. * - Disallow unknown options
  59. *
  60. * The limits are expressed in corresponding sysctls:
  61. *
  62. * ipv6.sysctl.max_dst_opts_cnt
  63. * ipv6.sysctl.max_hbh_opts_cnt
  64. * ipv6.sysctl.max_dst_opts_len
  65. * ipv6.sysctl.max_hbh_opts_len
  66. *
  67. * max_*_opts_cnt is the number of TLVs that are allowed for Destination
  68. * options or Hop-by-Hop options. If the number is less than zero then unknown
  69. * TLVs are disallowed and the number of known options that are allowed is the
  70. * absolute value. Setting the value to INT_MAX indicates no limit.
  71. *
  72. * max_*_opts_len is the length limit in bytes of a Destination or
  73. * Hop-by-Hop options extension header. Setting the value to INT_MAX
  74. * indicates no length limit.
  75. *
  76. * If a limit is exceeded when processing an extension header the packet is
  77. * silently discarded.
  78. */
  79. /* Default limits for Hop-by-Hop and Destination options */
  80. #define IP6_DEFAULT_MAX_DST_OPTS_CNT 8
  81. #define IP6_DEFAULT_MAX_HBH_OPTS_CNT 8
  82. #define IP6_DEFAULT_MAX_DST_OPTS_LEN INT_MAX /* No limit */
  83. #define IP6_DEFAULT_MAX_HBH_OPTS_LEN INT_MAX /* No limit */
  84. /*
  85. * Addr type
  86. *
  87. * type - unicast | multicast
  88. * scope - local | site | global
  89. * v4 - compat
  90. * v4mapped
  91. * any
  92. * loopback
  93. */
  94. #define IPV6_ADDR_ANY 0x0000U
  95. #define IPV6_ADDR_UNICAST 0x0001U
  96. #define IPV6_ADDR_MULTICAST 0x0002U
  97. #define IPV6_ADDR_LOOPBACK 0x0010U
  98. #define IPV6_ADDR_LINKLOCAL 0x0020U
  99. #define IPV6_ADDR_SITELOCAL 0x0040U
  100. #define IPV6_ADDR_COMPATv4 0x0080U
  101. #define IPV6_ADDR_SCOPE_MASK 0x00f0U
  102. #define IPV6_ADDR_MAPPED 0x1000U
  103. /*
  104. * Addr scopes
  105. */
  106. #define IPV6_ADDR_MC_SCOPE(a) \
  107. ((a)->s6_addr[1] & 0x0f) /* nonstandard */
  108. #define __IPV6_ADDR_SCOPE_INVALID -1
  109. #define IPV6_ADDR_SCOPE_NODELOCAL 0x01
  110. #define IPV6_ADDR_SCOPE_LINKLOCAL 0x02
  111. #define IPV6_ADDR_SCOPE_SITELOCAL 0x05
  112. #define IPV6_ADDR_SCOPE_ORGLOCAL 0x08
  113. #define IPV6_ADDR_SCOPE_GLOBAL 0x0e
  114. /*
  115. * Addr flags
  116. */
  117. #define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
  118. ((a)->s6_addr[1] & 0x10)
  119. #define IPV6_ADDR_MC_FLAG_PREFIX(a) \
  120. ((a)->s6_addr[1] & 0x20)
  121. #define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \
  122. ((a)->s6_addr[1] & 0x40)
  123. /*
  124. * fragmentation header
  125. */
  126. struct frag_hdr {
  127. __u8 nexthdr;
  128. __u8 reserved;
  129. __be16 frag_off;
  130. __be32 identification;
  131. };
  132. #define IP6_MF 0x0001
  133. #define IP6_OFFSET 0xFFF8
  134. #define IP6_REPLY_MARK(net, mark) \
  135. ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
  136. #include <net/sock.h>
  137. /* sysctls */
  138. extern int sysctl_mld_max_msf;
  139. extern int sysctl_mld_qrv;
  140. #define _DEVINC(net, statname, mod, idev, field) \
  141. ({ \
  142. struct inet6_dev *_idev = (idev); \
  143. if (likely(_idev != NULL)) \
  144. mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
  145. mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
  146. })
  147. /* per device counters are atomic_long_t */
  148. #define _DEVINCATOMIC(net, statname, mod, idev, field) \
  149. ({ \
  150. struct inet6_dev *_idev = (idev); \
  151. if (likely(_idev != NULL)) \
  152. SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
  153. mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
  154. })
  155. /* per device and per net counters are atomic_long_t */
  156. #define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \
  157. ({ \
  158. struct inet6_dev *_idev = (idev); \
  159. if (likely(_idev != NULL)) \
  160. SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
  161. SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
  162. })
  163. #define _DEVADD(net, statname, mod, idev, field, val) \
  164. ({ \
  165. struct inet6_dev *_idev = (idev); \
  166. if (likely(_idev != NULL)) \
  167. mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
  168. mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
  169. })
  170. #define _DEVUPD(net, statname, mod, idev, field, val) \
  171. ({ \
  172. struct inet6_dev *_idev = (idev); \
  173. if (likely(_idev != NULL)) \
  174. mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
  175. mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
  176. })
  177. /* MIBs */
  178. #define IP6_INC_STATS(net, idev,field) \
  179. _DEVINC(net, ipv6, , idev, field)
  180. #define __IP6_INC_STATS(net, idev,field) \
  181. _DEVINC(net, ipv6, __, idev, field)
  182. #define IP6_ADD_STATS(net, idev,field,val) \
  183. _DEVADD(net, ipv6, , idev, field, val)
  184. #define __IP6_ADD_STATS(net, idev,field,val) \
  185. _DEVADD(net, ipv6, __, idev, field, val)
  186. #define IP6_UPD_PO_STATS(net, idev,field,val) \
  187. _DEVUPD(net, ipv6, , idev, field, val)
  188. #define __IP6_UPD_PO_STATS(net, idev,field,val) \
  189. _DEVUPD(net, ipv6, __, idev, field, val)
  190. #define ICMP6_INC_STATS(net, idev, field) \
  191. _DEVINCATOMIC(net, icmpv6, , idev, field)
  192. #define __ICMP6_INC_STATS(net, idev, field) \
  193. _DEVINCATOMIC(net, icmpv6, __, idev, field)
  194. #define ICMP6MSGOUT_INC_STATS(net, idev, field) \
  195. _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
  196. #define ICMP6MSGIN_INC_STATS(net, idev, field) \
  197. _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
  198. struct ip6_ra_chain {
  199. struct ip6_ra_chain *next;
  200. struct sock *sk;
  201. int sel;
  202. void (*destructor)(struct sock *);
  203. };
  204. extern struct ip6_ra_chain *ip6_ra_chain;
  205. extern rwlock_t ip6_ra_lock;
  206. /*
  207. This structure is prepared by protocol, when parsing
  208. ancillary data and passed to IPv6.
  209. */
  210. struct ipv6_txoptions {
  211. refcount_t refcnt;
  212. /* Length of this structure */
  213. int tot_len;
  214. /* length of extension headers */
  215. __u16 opt_flen; /* after fragment hdr */
  216. __u16 opt_nflen; /* before fragment hdr */
  217. struct ipv6_opt_hdr *hopopt;
  218. struct ipv6_opt_hdr *dst0opt;
  219. struct ipv6_rt_hdr *srcrt; /* Routing Header */
  220. struct ipv6_opt_hdr *dst1opt;
  221. struct rcu_head rcu;
  222. /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
  223. };
  224. struct ip6_flowlabel {
  225. struct ip6_flowlabel __rcu *next;
  226. __be32 label;
  227. atomic_t users;
  228. struct in6_addr dst;
  229. struct ipv6_txoptions *opt;
  230. unsigned long linger;
  231. struct rcu_head rcu;
  232. u8 share;
  233. union {
  234. struct pid *pid;
  235. kuid_t uid;
  236. } owner;
  237. unsigned long lastuse;
  238. unsigned long expires;
  239. struct net *fl_net;
  240. };
  241. #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
  242. #define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
  243. #define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000)
  244. #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
  245. #define IPV6_TCLASS_SHIFT 20
  246. struct ipv6_fl_socklist {
  247. struct ipv6_fl_socklist __rcu *next;
  248. struct ip6_flowlabel *fl;
  249. struct rcu_head rcu;
  250. };
  251. struct ipcm6_cookie {
  252. __s16 hlimit;
  253. __s16 tclass;
  254. __s8 dontfrag;
  255. struct ipv6_txoptions *opt;
  256. };
  257. static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
  258. {
  259. struct ipv6_txoptions *opt;
  260. rcu_read_lock();
  261. opt = rcu_dereference(np->opt);
  262. if (opt) {
  263. if (!refcount_inc_not_zero(&opt->refcnt))
  264. opt = NULL;
  265. else
  266. opt = rcu_pointer_handoff(opt);
  267. }
  268. rcu_read_unlock();
  269. return opt;
  270. }
  271. static inline void txopt_put(struct ipv6_txoptions *opt)
  272. {
  273. if (opt && refcount_dec_and_test(&opt->refcnt))
  274. kfree_rcu(opt, rcu);
  275. }
  276. struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
  277. struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
  278. struct ip6_flowlabel *fl,
  279. struct ipv6_txoptions *fopt);
  280. void fl6_free_socklist(struct sock *sk);
  281. int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
  282. int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
  283. int flags);
  284. int ip6_flowlabel_init(void);
  285. void ip6_flowlabel_cleanup(void);
  286. bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
  287. static inline void fl6_sock_release(struct ip6_flowlabel *fl)
  288. {
  289. if (fl)
  290. atomic_dec(&fl->users);
  291. }
  292. void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
  293. void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
  294. struct icmp6hdr *thdr, int len);
  295. int ip6_ra_control(struct sock *sk, int sel);
  296. int ipv6_parse_hopopts(struct sk_buff *skb);
  297. struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
  298. struct ipv6_txoptions *opt);
  299. struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
  300. struct ipv6_txoptions *opt,
  301. int newtype,
  302. struct ipv6_opt_hdr __user *newopt,
  303. int newoptlen);
  304. struct ipv6_txoptions *
  305. ipv6_renew_options_kern(struct sock *sk,
  306. struct ipv6_txoptions *opt,
  307. int newtype,
  308. struct ipv6_opt_hdr *newopt,
  309. int newoptlen);
  310. struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
  311. struct ipv6_txoptions *opt);
  312. bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
  313. const struct inet6_skb_parm *opt);
  314. struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
  315. struct ipv6_txoptions *opt);
  316. static inline bool ipv6_accept_ra(struct inet6_dev *idev)
  317. {
  318. /* If forwarding is enabled, RA are not accepted unless the special
  319. * hybrid mode (accept_ra=2) is enabled.
  320. */
  321. return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
  322. idev->cnf.accept_ra;
  323. }
  324. #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
  325. #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
  326. #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
  327. int __ipv6_addr_type(const struct in6_addr *addr);
  328. static inline int ipv6_addr_type(const struct in6_addr *addr)
  329. {
  330. return __ipv6_addr_type(addr) & 0xffff;
  331. }
  332. static inline int ipv6_addr_scope(const struct in6_addr *addr)
  333. {
  334. return __ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK;
  335. }
  336. static inline int __ipv6_addr_src_scope(int type)
  337. {
  338. return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16);
  339. }
  340. static inline int ipv6_addr_src_scope(const struct in6_addr *addr)
  341. {
  342. return __ipv6_addr_src_scope(__ipv6_addr_type(addr));
  343. }
  344. static inline bool __ipv6_addr_needs_scope_id(int type)
  345. {
  346. return type & IPV6_ADDR_LINKLOCAL ||
  347. (type & IPV6_ADDR_MULTICAST &&
  348. (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)));
  349. }
  350. static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface)
  351. {
  352. return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0;
  353. }
  354. static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2)
  355. {
  356. return memcmp(a1, a2, sizeof(struct in6_addr));
  357. }
  358. static inline bool
  359. ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
  360. const struct in6_addr *a2)
  361. {
  362. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  363. const unsigned long *ul1 = (const unsigned long *)a1;
  364. const unsigned long *ulm = (const unsigned long *)m;
  365. const unsigned long *ul2 = (const unsigned long *)a2;
  366. return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
  367. ((ul1[1] ^ ul2[1]) & ulm[1]));
  368. #else
  369. return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) |
  370. ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) |
  371. ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) |
  372. ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
  373. #endif
  374. }
  375. static inline void ipv6_addr_prefix(struct in6_addr *pfx,
  376. const struct in6_addr *addr,
  377. int plen)
  378. {
  379. /* caller must guarantee 0 <= plen <= 128 */
  380. int o = plen >> 3,
  381. b = plen & 0x7;
  382. memset(pfx->s6_addr, 0, sizeof(pfx->s6_addr));
  383. memcpy(pfx->s6_addr, addr, o);
  384. if (b != 0)
  385. pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b);
  386. }
  387. static inline void ipv6_addr_prefix_copy(struct in6_addr *addr,
  388. const struct in6_addr *pfx,
  389. int plen)
  390. {
  391. /* caller must guarantee 0 <= plen <= 128 */
  392. int o = plen >> 3,
  393. b = plen & 0x7;
  394. memcpy(addr->s6_addr, pfx, o);
  395. if (b != 0) {
  396. addr->s6_addr[o] &= ~(0xff00 >> b);
  397. addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b));
  398. }
  399. }
  400. static inline void __ipv6_addr_set_half(__be32 *addr,
  401. __be32 wh, __be32 wl)
  402. {
  403. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  404. #if defined(__BIG_ENDIAN)
  405. if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) {
  406. *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl));
  407. return;
  408. }
  409. #elif defined(__LITTLE_ENDIAN)
  410. if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) {
  411. *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh));
  412. return;
  413. }
  414. #endif
  415. #endif
  416. addr[0] = wh;
  417. addr[1] = wl;
  418. }
  419. static inline void ipv6_addr_set(struct in6_addr *addr,
  420. __be32 w1, __be32 w2,
  421. __be32 w3, __be32 w4)
  422. {
  423. __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2);
  424. __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4);
  425. }
  426. static inline bool ipv6_addr_equal(const struct in6_addr *a1,
  427. const struct in6_addr *a2)
  428. {
  429. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  430. const unsigned long *ul1 = (const unsigned long *)a1;
  431. const unsigned long *ul2 = (const unsigned long *)a2;
  432. return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
  433. #else
  434. return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
  435. (a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
  436. (a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
  437. (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
  438. #endif
  439. }
  440. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  441. static inline bool __ipv6_prefix_equal64_half(const __be64 *a1,
  442. const __be64 *a2,
  443. unsigned int len)
  444. {
  445. if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len))))
  446. return false;
  447. return true;
  448. }
  449. static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
  450. const struct in6_addr *addr2,
  451. unsigned int prefixlen)
  452. {
  453. const __be64 *a1 = (const __be64 *)addr1;
  454. const __be64 *a2 = (const __be64 *)addr2;
  455. if (prefixlen >= 64) {
  456. if (a1[0] ^ a2[0])
  457. return false;
  458. return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64);
  459. }
  460. return __ipv6_prefix_equal64_half(a1, a2, prefixlen);
  461. }
  462. #else
  463. static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
  464. const struct in6_addr *addr2,
  465. unsigned int prefixlen)
  466. {
  467. const __be32 *a1 = addr1->s6_addr32;
  468. const __be32 *a2 = addr2->s6_addr32;
  469. unsigned int pdw, pbi;
  470. /* check complete u32 in prefix */
  471. pdw = prefixlen >> 5;
  472. if (pdw && memcmp(a1, a2, pdw << 2))
  473. return false;
  474. /* check incomplete u32 in prefix */
  475. pbi = prefixlen & 0x1f;
  476. if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi))))
  477. return false;
  478. return true;
  479. }
  480. #endif
  481. struct inet_frag_queue;
  482. enum ip6_defrag_users {
  483. IP6_DEFRAG_LOCAL_DELIVER,
  484. IP6_DEFRAG_CONNTRACK_IN,
  485. __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
  486. IP6_DEFRAG_CONNTRACK_OUT,
  487. __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
  488. IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
  489. __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
  490. };
  491. void ip6_frag_init(struct inet_frag_queue *q, const void *a);
  492. extern const struct rhashtable_params ip6_rhash_params;
  493. /*
  494. * Equivalent of ipv4 struct ip
  495. */
  496. struct frag_queue {
  497. struct inet_frag_queue q;
  498. int iif;
  499. __u16 nhoffset;
  500. u8 ecn;
  501. };
  502. void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
  503. static inline bool ipv6_addr_any(const struct in6_addr *a)
  504. {
  505. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  506. const unsigned long *ul = (const unsigned long *)a;
  507. return (ul[0] | ul[1]) == 0UL;
  508. #else
  509. return (a->s6_addr32[0] | a->s6_addr32[1] |
  510. a->s6_addr32[2] | a->s6_addr32[3]) == 0;
  511. #endif
  512. }
  513. static inline u32 ipv6_addr_hash(const struct in6_addr *a)
  514. {
  515. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  516. const unsigned long *ul = (const unsigned long *)a;
  517. unsigned long x = ul[0] ^ ul[1];
  518. return (u32)(x ^ (x >> 32));
  519. #else
  520. return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^
  521. a->s6_addr32[2] ^ a->s6_addr32[3]);
  522. #endif
  523. }
  524. /* more secured version of ipv6_addr_hash() */
  525. static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
  526. {
  527. u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
  528. return jhash_3words(v,
  529. (__force u32)a->s6_addr32[2],
  530. (__force u32)a->s6_addr32[3],
  531. initval);
  532. }
  533. static inline bool ipv6_addr_loopback(const struct in6_addr *a)
  534. {
  535. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  536. const __be64 *be = (const __be64 *)a;
  537. return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL;
  538. #else
  539. return (a->s6_addr32[0] | a->s6_addr32[1] |
  540. a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0;
  541. #endif
  542. }
  543. /*
  544. * Note that we must __force cast these to unsigned long to make sparse happy,
  545. * since all of the endian-annotated types are fixed size regardless of arch.
  546. */
  547. static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
  548. {
  549. return (
  550. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  551. *(unsigned long *)a |
  552. #else
  553. (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
  554. #endif
  555. (__force unsigned long)(a->s6_addr32[2] ^
  556. cpu_to_be32(0x0000ffff))) == 0UL;
  557. }
  558. static inline u32 ipv6_portaddr_hash(const struct net *net,
  559. const struct in6_addr *addr6,
  560. unsigned int port)
  561. {
  562. unsigned int hash, mix = net_hash_mix(net);
  563. if (ipv6_addr_any(addr6))
  564. hash = jhash_1word(0, mix);
  565. else if (ipv6_addr_v4mapped(addr6))
  566. hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
  567. else
  568. hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
  569. return hash ^ port;
  570. }
  571. /*
  572. * Check for a RFC 4843 ORCHID address
  573. * (Overlay Routable Cryptographic Hash Identifiers)
  574. */
  575. static inline bool ipv6_addr_orchid(const struct in6_addr *a)
  576. {
  577. return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
  578. }
  579. static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
  580. {
  581. return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
  582. }
  583. static inline void ipv6_addr_set_v4mapped(const __be32 addr,
  584. struct in6_addr *v4mapped)
  585. {
  586. ipv6_addr_set(v4mapped,
  587. 0, 0,
  588. htonl(0x0000FFFF),
  589. addr);
  590. }
  591. /*
  592. * find the first different bit between two addresses
  593. * length of address must be a multiple of 32bits
  594. */
  595. static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen)
  596. {
  597. const __be32 *a1 = token1, *a2 = token2;
  598. int i;
  599. addrlen >>= 2;
  600. for (i = 0; i < addrlen; i++) {
  601. __be32 xb = a1[i] ^ a2[i];
  602. if (xb)
  603. return i * 32 + 31 - __fls(ntohl(xb));
  604. }
  605. /*
  606. * we should *never* get to this point since that
  607. * would mean the addrs are equal
  608. *
  609. * However, we do get to it 8) And exacly, when
  610. * addresses are equal 8)
  611. *
  612. * ip route add 1111::/128 via ...
  613. * ip route add 1111::/64 via ...
  614. * and we are here.
  615. *
  616. * Ideally, this function should stop comparison
  617. * at prefix length. It does not, but it is still OK,
  618. * if returned value is greater than prefix length.
  619. * --ANK (980803)
  620. */
  621. return addrlen << 5;
  622. }
  623. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  624. static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen)
  625. {
  626. const __be64 *a1 = token1, *a2 = token2;
  627. int i;
  628. addrlen >>= 3;
  629. for (i = 0; i < addrlen; i++) {
  630. __be64 xb = a1[i] ^ a2[i];
  631. if (xb)
  632. return i * 64 + 63 - __fls(be64_to_cpu(xb));
  633. }
  634. return addrlen << 6;
  635. }
  636. #endif
  637. static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen)
  638. {
  639. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  640. if (__builtin_constant_p(addrlen) && !(addrlen & 7))
  641. return __ipv6_addr_diff64(token1, token2, addrlen);
  642. #endif
  643. return __ipv6_addr_diff32(token1, token2, addrlen);
  644. }
  645. static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2)
  646. {
  647. return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
  648. }
  649. __be32 ipv6_select_ident(struct net *net,
  650. const struct in6_addr *daddr,
  651. const struct in6_addr *saddr);
  652. __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
  653. int ip6_dst_hoplimit(struct dst_entry *dst);
  654. static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
  655. struct dst_entry *dst)
  656. {
  657. int hlimit;
  658. if (ipv6_addr_is_multicast(&fl6->daddr))
  659. hlimit = np->mcast_hops;
  660. else
  661. hlimit = np->hop_limit;
  662. if (hlimit < 0)
  663. hlimit = ip6_dst_hoplimit(dst);
  664. return hlimit;
  665. }
  666. /* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store
  667. * Equivalent to : flow->v6addrs.src = iph->saddr;
  668. * flow->v6addrs.dst = iph->daddr;
  669. */
  670. static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
  671. const struct ipv6hdr *iph)
  672. {
  673. BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
  674. offsetof(typeof(flow->addrs), v6addrs.src) +
  675. sizeof(flow->addrs.v6addrs.src));
  676. memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
  677. flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  678. }
  679. #if IS_ENABLED(CONFIG_IPV6)
  680. /* Sysctl settings for net ipv6.auto_flowlabels */
  681. #define IP6_AUTO_FLOW_LABEL_OFF 0
  682. #define IP6_AUTO_FLOW_LABEL_OPTOUT 1
  683. #define IP6_AUTO_FLOW_LABEL_OPTIN 2
  684. #define IP6_AUTO_FLOW_LABEL_FORCED 3
  685. #define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED
  686. #define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT
  687. static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
  688. __be32 flowlabel, bool autolabel,
  689. struct flowi6 *fl6)
  690. {
  691. u32 hash;
  692. /* @flowlabel may include more than a flow label, eg, the traffic class.
  693. * Here we want only the flow label value.
  694. */
  695. flowlabel &= IPV6_FLOWLABEL_MASK;
  696. if (flowlabel ||
  697. net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
  698. (!autolabel &&
  699. net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
  700. return flowlabel;
  701. hash = skb_get_hash_flowi6(skb, fl6);
  702. /* Since this is being sent on the wire obfuscate hash a bit
  703. * to minimize possbility that any useful information to an
  704. * attacker is leaked. Only lower 20 bits are relevant.
  705. */
  706. rol32(hash, 16);
  707. flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
  708. if (net->ipv6.sysctl.flowlabel_state_ranges)
  709. flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
  710. return flowlabel;
  711. }
  712. static inline int ip6_default_np_autolabel(struct net *net)
  713. {
  714. switch (net->ipv6.sysctl.auto_flowlabels) {
  715. case IP6_AUTO_FLOW_LABEL_OFF:
  716. case IP6_AUTO_FLOW_LABEL_OPTIN:
  717. default:
  718. return 0;
  719. case IP6_AUTO_FLOW_LABEL_OPTOUT:
  720. case IP6_AUTO_FLOW_LABEL_FORCED:
  721. return 1;
  722. }
  723. }
  724. #else
  725. static inline void ip6_set_txhash(struct sock *sk) { }
  726. static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
  727. __be32 flowlabel, bool autolabel,
  728. struct flowi6 *fl6)
  729. {
  730. return flowlabel;
  731. }
  732. static inline int ip6_default_np_autolabel(struct net *net)
  733. {
  734. return 0;
  735. }
  736. #endif
  737. #if IS_ENABLED(CONFIG_IPV6)
  738. static inline int ip6_multipath_hash_policy(const struct net *net)
  739. {
  740. return net->ipv6.sysctl.multipath_hash_policy;
  741. }
  742. #else
  743. static inline int ip6_multipath_hash_policy(const struct net *net)
  744. {
  745. return 0;
  746. }
  747. #endif
  748. /*
  749. * Header manipulation
  750. */
  751. static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass,
  752. __be32 flowlabel)
  753. {
  754. *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel;
  755. }
  756. static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
  757. {
  758. return *(__be32 *)hdr & IPV6_FLOWINFO_MASK;
  759. }
  760. static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr)
  761. {
  762. return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK;
  763. }
  764. static inline u8 ip6_tclass(__be32 flowinfo)
  765. {
  766. return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
  767. }
  768. static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
  769. {
  770. return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
  771. }
  772. /*
  773. * Prototypes exported by ipv6
  774. */
  775. /*
  776. * rcv function (called from netdevice level)
  777. */
  778. int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
  779. struct packet_type *pt, struct net_device *orig_dev);
  780. int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
  781. /*
  782. * upper-layer output functions
  783. */
  784. int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
  785. __u32 mark, struct ipv6_txoptions *opt, int tclass);
  786. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
  787. int ip6_append_data(struct sock *sk,
  788. int getfrag(void *from, char *to, int offset, int len,
  789. int odd, struct sk_buff *skb),
  790. void *from, int length, int transhdrlen,
  791. struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
  792. struct rt6_info *rt, unsigned int flags,
  793. const struct sockcm_cookie *sockc);
  794. int ip6_push_pending_frames(struct sock *sk);
  795. void ip6_flush_pending_frames(struct sock *sk);
  796. int ip6_send_skb(struct sk_buff *skb);
  797. struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
  798. struct inet_cork_full *cork,
  799. struct inet6_cork *v6_cork);
  800. struct sk_buff *ip6_make_skb(struct sock *sk,
  801. int getfrag(void *from, char *to, int offset,
  802. int len, int odd, struct sk_buff *skb),
  803. void *from, int length, int transhdrlen,
  804. struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
  805. struct rt6_info *rt, unsigned int flags,
  806. const struct sockcm_cookie *sockc);
  807. static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
  808. {
  809. return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork,
  810. &inet6_sk(sk)->cork);
  811. }
  812. unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst);
  813. int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
  814. struct flowi6 *fl6);
  815. struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
  816. const struct in6_addr *final_dst);
  817. struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
  818. const struct in6_addr *final_dst,
  819. bool connected);
  820. struct dst_entry *ip6_blackhole_route(struct net *net,
  821. struct dst_entry *orig_dst);
  822. /*
  823. * skb processing functions
  824. */
  825. int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
  826. int ip6_forward(struct sk_buff *skb);
  827. int ip6_input(struct sk_buff *skb);
  828. int ip6_mc_input(struct sk_buff *skb);
  829. int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
  830. int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
  831. /*
  832. * Extension header (options) processing
  833. */
  834. void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  835. u8 *proto, struct in6_addr **daddr_p,
  836. struct in6_addr *saddr);
  837. void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  838. u8 *proto);
  839. int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
  840. __be16 *frag_offp);
  841. bool ipv6_ext_hdr(u8 nexthdr);
  842. enum {
  843. IP6_FH_F_FRAG = (1 << 0),
  844. IP6_FH_F_AUTH = (1 << 1),
  845. IP6_FH_F_SKIP_RH = (1 << 2),
  846. };
  847. /* find specified header and get offset to it */
  848. int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
  849. unsigned short *fragoff, int *fragflg);
  850. int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type);
  851. struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
  852. const struct ipv6_txoptions *opt,
  853. struct in6_addr *orig);
  854. /*
  855. * socket options (ipv6_sockglue.c)
  856. */
  857. int ipv6_setsockopt(struct sock *sk, int level, int optname,
  858. char __user *optval, unsigned int optlen);
  859. int ipv6_getsockopt(struct sock *sk, int level, int optname,
  860. char __user *optval, int __user *optlen);
  861. int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
  862. char __user *optval, unsigned int optlen);
  863. int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
  864. char __user *optval, int __user *optlen);
  865. int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
  866. int addr_len);
  867. int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
  868. int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
  869. int addr_len);
  870. int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
  871. void ip6_datagram_release_cb(struct sock *sk);
  872. int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
  873. int *addr_len);
  874. int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
  875. int *addr_len);
  876. void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
  877. u32 info, u8 *payload);
  878. void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
  879. void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
  880. int inet6_release(struct socket *sock);
  881. int __inet6_bind(struct sock *sock, struct sockaddr *uaddr, int addr_len,
  882. bool force_bind_address_no_port, bool with_lock);
  883. int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
  884. int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
  885. int peer);
  886. int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
  887. int inet6_hash_connect(struct inet_timewait_death_row *death_row,
  888. struct sock *sk);
  889. /*
  890. * reassembly.c
  891. */
  892. extern const struct proto_ops inet6_stream_ops;
  893. extern const struct proto_ops inet6_dgram_ops;
  894. extern const struct proto_ops inet6_sockraw_ops;
  895. struct group_source_req;
  896. struct group_filter;
  897. int ip6_mc_source(int add, int omode, struct sock *sk,
  898. struct group_source_req *pgsr);
  899. int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
  900. int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
  901. struct group_filter __user *optval, int __user *optlen);
  902. #ifdef CONFIG_PROC_FS
  903. int ac6_proc_init(struct net *net);
  904. void ac6_proc_exit(struct net *net);
  905. int raw6_proc_init(void);
  906. void raw6_proc_exit(void);
  907. int tcp6_proc_init(struct net *net);
  908. void tcp6_proc_exit(struct net *net);
  909. int udp6_proc_init(struct net *net);
  910. void udp6_proc_exit(struct net *net);
  911. int udplite6_proc_init(void);
  912. void udplite6_proc_exit(void);
  913. int ipv6_misc_proc_init(void);
  914. void ipv6_misc_proc_exit(void);
  915. int snmp6_register_dev(struct inet6_dev *idev);
  916. int snmp6_unregister_dev(struct inet6_dev *idev);
  917. #else
  918. static inline int ac6_proc_init(struct net *net) { return 0; }
  919. static inline void ac6_proc_exit(struct net *net) { }
  920. static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; }
  921. static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
  922. #endif
  923. #ifdef CONFIG_SYSCTL
  924. extern struct ctl_table ipv6_route_table_template[];
  925. struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
  926. struct ctl_table *ipv6_route_sysctl_init(struct net *net);
  927. int ipv6_sysctl_register(void);
  928. void ipv6_sysctl_unregister(void);
  929. #endif
  930. int ipv6_sock_mc_join(struct sock *sk, int ifindex,
  931. const struct in6_addr *addr);
  932. int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
  933. const struct in6_addr *addr);
  934. #endif /* _NET_IPV6_H */