gtp.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361
  1. /* GTP according to GSM TS 09.60 / 3GPP TS 29.060
  2. *
  3. * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
  4. * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
  5. *
  6. * Author: Harald Welte <hwelte@sysmocom.de>
  7. * Pablo Neira Ayuso <pablo@netfilter.org>
  8. * Andreas Schultz <aschultz@travelping.com>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/module.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/udp.h>
  19. #include <linux/rculist.h>
  20. #include <linux/jhash.h>
  21. #include <linux/if_tunnel.h>
  22. #include <linux/net.h>
  23. #include <linux/file.h>
  24. #include <linux/gtp.h>
  25. #include <net/net_namespace.h>
  26. #include <net/protocol.h>
  27. #include <net/ip.h>
  28. #include <net/udp.h>
  29. #include <net/udp_tunnel.h>
  30. #include <net/icmp.h>
  31. #include <net/xfrm.h>
  32. #include <net/genetlink.h>
  33. #include <net/netns/generic.h>
  34. #include <net/gtp.h>
  35. /* An active session for the subscriber. */
  36. struct pdp_ctx {
  37. struct hlist_node hlist_tid;
  38. struct hlist_node hlist_addr;
  39. union {
  40. u64 tid;
  41. struct {
  42. u64 tid;
  43. u16 flow;
  44. } v0;
  45. struct {
  46. u32 i_tei;
  47. u32 o_tei;
  48. } v1;
  49. } u;
  50. u8 gtp_version;
  51. u16 af;
  52. struct in_addr ms_addr_ip4;
  53. struct in_addr sgsn_addr_ip4;
  54. atomic_t tx_seq;
  55. struct rcu_head rcu_head;
  56. };
  57. /* One instance of the GTP device. */
  58. struct gtp_dev {
  59. struct list_head list;
  60. struct socket *sock0;
  61. struct socket *sock1u;
  62. struct net_device *dev;
  63. unsigned int hash_size;
  64. struct hlist_head *tid_hash;
  65. struct hlist_head *addr_hash;
  66. };
  67. static unsigned int gtp_net_id __read_mostly;
  68. struct gtp_net {
  69. struct list_head gtp_dev_list;
  70. };
  71. static u32 gtp_h_initval;
  72. static inline u32 gtp0_hashfn(u64 tid)
  73. {
  74. u32 *tid32 = (u32 *) &tid;
  75. return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
  76. }
  77. static inline u32 gtp1u_hashfn(u32 tid)
  78. {
  79. return jhash_1word(tid, gtp_h_initval);
  80. }
  81. static inline u32 ipv4_hashfn(__be32 ip)
  82. {
  83. return jhash_1word((__force u32)ip, gtp_h_initval);
  84. }
  85. /* Resolve a PDP context structure based on the 64bit TID. */
  86. static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
  87. {
  88. struct hlist_head *head;
  89. struct pdp_ctx *pdp;
  90. head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
  91. hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
  92. if (pdp->gtp_version == GTP_V0 &&
  93. pdp->u.v0.tid == tid)
  94. return pdp;
  95. }
  96. return NULL;
  97. }
  98. /* Resolve a PDP context structure based on the 32bit TEI. */
  99. static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
  100. {
  101. struct hlist_head *head;
  102. struct pdp_ctx *pdp;
  103. head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
  104. hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
  105. if (pdp->gtp_version == GTP_V1 &&
  106. pdp->u.v1.i_tei == tid)
  107. return pdp;
  108. }
  109. return NULL;
  110. }
  111. /* Resolve a PDP context based on IPv4 address of MS. */
  112. static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
  113. {
  114. struct hlist_head *head;
  115. struct pdp_ctx *pdp;
  116. head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
  117. hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
  118. if (pdp->af == AF_INET &&
  119. pdp->ms_addr_ip4.s_addr == ms_addr)
  120. return pdp;
  121. }
  122. return NULL;
  123. }
  124. static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
  125. unsigned int hdrlen)
  126. {
  127. struct iphdr *iph;
  128. if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
  129. return false;
  130. iph = (struct iphdr *)(skb->data + hdrlen);
  131. return iph->saddr == pctx->ms_addr_ip4.s_addr;
  132. }
  133. /* Check if the inner IP source address in this packet is assigned to any
  134. * existing mobile subscriber.
  135. */
  136. static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
  137. unsigned int hdrlen)
  138. {
  139. switch (ntohs(skb->protocol)) {
  140. case ETH_P_IP:
  141. return gtp_check_src_ms_ipv4(skb, pctx, hdrlen);
  142. }
  143. return false;
  144. }
  145. /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
  146. static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
  147. bool xnet)
  148. {
  149. unsigned int hdrlen = sizeof(struct udphdr) +
  150. sizeof(struct gtp0_header);
  151. struct gtp0_header *gtp0;
  152. struct pdp_ctx *pctx;
  153. if (!pskb_may_pull(skb, hdrlen))
  154. return -1;
  155. gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
  156. if ((gtp0->flags >> 5) != GTP_V0)
  157. return 1;
  158. if (gtp0->type != GTP_TPDU)
  159. return 1;
  160. pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
  161. if (!pctx) {
  162. netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
  163. return 1;
  164. }
  165. if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
  166. netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
  167. return 1;
  168. }
  169. /* Get rid of the GTP + UDP headers. */
  170. return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
  171. }
  172. static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
  173. bool xnet)
  174. {
  175. unsigned int hdrlen = sizeof(struct udphdr) +
  176. sizeof(struct gtp1_header);
  177. struct gtp1_header *gtp1;
  178. struct pdp_ctx *pctx;
  179. if (!pskb_may_pull(skb, hdrlen))
  180. return -1;
  181. gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
  182. if ((gtp1->flags >> 5) != GTP_V1)
  183. return 1;
  184. if (gtp1->type != GTP_TPDU)
  185. return 1;
  186. /* From 29.060: "This field shall be present if and only if any one or
  187. * more of the S, PN and E flags are set.".
  188. *
  189. * If any of the bit is set, then the remaining ones also have to be
  190. * set.
  191. */
  192. if (gtp1->flags & GTP1_F_MASK)
  193. hdrlen += 4;
  194. /* Make sure the header is larger enough, including extensions. */
  195. if (!pskb_may_pull(skb, hdrlen))
  196. return -1;
  197. gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
  198. pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
  199. if (!pctx) {
  200. netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
  201. return 1;
  202. }
  203. if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
  204. netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
  205. return 1;
  206. }
  207. /* Get rid of the GTP + UDP headers. */
  208. return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
  209. }
  210. static void gtp_encap_disable(struct gtp_dev *gtp)
  211. {
  212. if (gtp->sock0 && gtp->sock0->sk) {
  213. udp_sk(gtp->sock0->sk)->encap_type = 0;
  214. rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
  215. }
  216. if (gtp->sock1u && gtp->sock1u->sk) {
  217. udp_sk(gtp->sock1u->sk)->encap_type = 0;
  218. rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
  219. }
  220. gtp->sock0 = NULL;
  221. gtp->sock1u = NULL;
  222. }
  223. static void gtp_encap_destroy(struct sock *sk)
  224. {
  225. struct gtp_dev *gtp;
  226. gtp = rcu_dereference_sk_user_data(sk);
  227. if (gtp)
  228. gtp_encap_disable(gtp);
  229. }
  230. /* UDP encapsulation receive handler. See net/ipv4/udp.c.
  231. * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
  232. */
  233. static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
  234. {
  235. struct pcpu_sw_netstats *stats;
  236. struct gtp_dev *gtp;
  237. bool xnet;
  238. int ret;
  239. gtp = rcu_dereference_sk_user_data(sk);
  240. if (!gtp)
  241. return 1;
  242. netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
  243. xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
  244. switch (udp_sk(sk)->encap_type) {
  245. case UDP_ENCAP_GTP0:
  246. netdev_dbg(gtp->dev, "received GTP0 packet\n");
  247. ret = gtp0_udp_encap_recv(gtp, skb, xnet);
  248. break;
  249. case UDP_ENCAP_GTP1U:
  250. netdev_dbg(gtp->dev, "received GTP1U packet\n");
  251. ret = gtp1u_udp_encap_recv(gtp, skb, xnet);
  252. break;
  253. default:
  254. ret = -1; /* Shouldn't happen. */
  255. }
  256. switch (ret) {
  257. case 1:
  258. netdev_dbg(gtp->dev, "pass up to the process\n");
  259. return 1;
  260. case 0:
  261. netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
  262. break;
  263. case -1:
  264. netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
  265. kfree_skb(skb);
  266. return 0;
  267. }
  268. /* Now that the UDP and the GTP header have been removed, set up the
  269. * new network header. This is required by the upper layer to
  270. * calculate the transport header.
  271. */
  272. skb_reset_network_header(skb);
  273. skb->dev = gtp->dev;
  274. stats = this_cpu_ptr(gtp->dev->tstats);
  275. u64_stats_update_begin(&stats->syncp);
  276. stats->rx_packets++;
  277. stats->rx_bytes += skb->len;
  278. u64_stats_update_end(&stats->syncp);
  279. netif_rx(skb);
  280. return 0;
  281. }
  282. static int gtp_dev_init(struct net_device *dev)
  283. {
  284. struct gtp_dev *gtp = netdev_priv(dev);
  285. gtp->dev = dev;
  286. dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
  287. if (!dev->tstats)
  288. return -ENOMEM;
  289. return 0;
  290. }
  291. static void gtp_dev_uninit(struct net_device *dev)
  292. {
  293. struct gtp_dev *gtp = netdev_priv(dev);
  294. gtp_encap_disable(gtp);
  295. free_percpu(dev->tstats);
  296. }
  297. static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
  298. const struct sock *sk, __be32 daddr)
  299. {
  300. memset(fl4, 0, sizeof(*fl4));
  301. fl4->flowi4_oif = sk->sk_bound_dev_if;
  302. fl4->daddr = daddr;
  303. fl4->saddr = inet_sk(sk)->inet_saddr;
  304. fl4->flowi4_tos = RT_CONN_FLAGS(sk);
  305. fl4->flowi4_proto = sk->sk_protocol;
  306. return ip_route_output_key(net, fl4);
  307. }
  308. static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
  309. {
  310. int payload_len = skb->len;
  311. struct gtp0_header *gtp0;
  312. gtp0 = (struct gtp0_header *) skb_push(skb, sizeof(*gtp0));
  313. gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
  314. gtp0->type = GTP_TPDU;
  315. gtp0->length = htons(payload_len);
  316. gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
  317. gtp0->flow = htons(pctx->u.v0.flow);
  318. gtp0->number = 0xff;
  319. gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
  320. gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
  321. }
  322. static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
  323. {
  324. int payload_len = skb->len;
  325. struct gtp1_header *gtp1;
  326. gtp1 = (struct gtp1_header *) skb_push(skb, sizeof(*gtp1));
  327. /* Bits 8 7 6 5 4 3 2 1
  328. * +--+--+--+--+--+--+--+--+
  329. * |version |PT| 0| E| S|PN|
  330. * +--+--+--+--+--+--+--+--+
  331. * 0 0 1 1 1 0 0 0
  332. */
  333. gtp1->flags = 0x30; /* v1, GTP-non-prime. */
  334. gtp1->type = GTP_TPDU;
  335. gtp1->length = htons(payload_len);
  336. gtp1->tid = htonl(pctx->u.v1.o_tei);
  337. /* TODO: Suppport for extension header, sequence number and N-PDU.
  338. * Update the length field if any of them is available.
  339. */
  340. }
  341. struct gtp_pktinfo {
  342. struct sock *sk;
  343. struct iphdr *iph;
  344. struct flowi4 fl4;
  345. struct rtable *rt;
  346. struct pdp_ctx *pctx;
  347. struct net_device *dev;
  348. __be16 gtph_port;
  349. };
  350. static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
  351. {
  352. switch (pktinfo->pctx->gtp_version) {
  353. case GTP_V0:
  354. pktinfo->gtph_port = htons(GTP0_PORT);
  355. gtp0_push_header(skb, pktinfo->pctx);
  356. break;
  357. case GTP_V1:
  358. pktinfo->gtph_port = htons(GTP1U_PORT);
  359. gtp1_push_header(skb, pktinfo->pctx);
  360. break;
  361. }
  362. }
  363. static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
  364. struct sock *sk, struct iphdr *iph,
  365. struct pdp_ctx *pctx, struct rtable *rt,
  366. struct flowi4 *fl4,
  367. struct net_device *dev)
  368. {
  369. pktinfo->sk = sk;
  370. pktinfo->iph = iph;
  371. pktinfo->pctx = pctx;
  372. pktinfo->rt = rt;
  373. pktinfo->fl4 = *fl4;
  374. pktinfo->dev = dev;
  375. }
  376. static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
  377. struct gtp_pktinfo *pktinfo)
  378. {
  379. struct gtp_dev *gtp = netdev_priv(dev);
  380. struct pdp_ctx *pctx;
  381. struct rtable *rt;
  382. struct flowi4 fl4;
  383. struct iphdr *iph;
  384. struct sock *sk;
  385. __be16 df;
  386. int mtu;
  387. /* Read the IP destination address and resolve the PDP context.
  388. * Prepend PDP header with TEI/TID from PDP ctx.
  389. */
  390. iph = ip_hdr(skb);
  391. pctx = ipv4_pdp_find(gtp, iph->daddr);
  392. if (!pctx) {
  393. netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
  394. &iph->daddr);
  395. return -ENOENT;
  396. }
  397. netdev_dbg(dev, "found PDP context %p\n", pctx);
  398. switch (pctx->gtp_version) {
  399. case GTP_V0:
  400. if (gtp->sock0)
  401. sk = gtp->sock0->sk;
  402. else
  403. sk = NULL;
  404. break;
  405. case GTP_V1:
  406. if (gtp->sock1u)
  407. sk = gtp->sock1u->sk;
  408. else
  409. sk = NULL;
  410. break;
  411. default:
  412. return -ENOENT;
  413. }
  414. if (!sk) {
  415. netdev_dbg(dev, "no userspace socket is available, skip\n");
  416. return -ENOENT;
  417. }
  418. rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
  419. pctx->sgsn_addr_ip4.s_addr);
  420. if (IS_ERR(rt)) {
  421. netdev_dbg(dev, "no route to SSGN %pI4\n",
  422. &pctx->sgsn_addr_ip4.s_addr);
  423. dev->stats.tx_carrier_errors++;
  424. goto err;
  425. }
  426. if (rt->dst.dev == dev) {
  427. netdev_dbg(dev, "circular route to SSGN %pI4\n",
  428. &pctx->sgsn_addr_ip4.s_addr);
  429. dev->stats.collisions++;
  430. goto err_rt;
  431. }
  432. skb_dst_drop(skb);
  433. /* This is similar to tnl_update_pmtu(). */
  434. df = iph->frag_off;
  435. if (df) {
  436. mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
  437. sizeof(struct iphdr) - sizeof(struct udphdr);
  438. switch (pctx->gtp_version) {
  439. case GTP_V0:
  440. mtu -= sizeof(struct gtp0_header);
  441. break;
  442. case GTP_V1:
  443. mtu -= sizeof(struct gtp1_header);
  444. break;
  445. }
  446. } else {
  447. mtu = dst_mtu(&rt->dst);
  448. }
  449. rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
  450. if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
  451. mtu < ntohs(iph->tot_len)) {
  452. netdev_dbg(dev, "packet too big, fragmentation needed\n");
  453. memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
  454. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
  455. htonl(mtu));
  456. goto err_rt;
  457. }
  458. gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev);
  459. gtp_push_header(skb, pktinfo);
  460. return 0;
  461. err_rt:
  462. ip_rt_put(rt);
  463. err:
  464. return -EBADMSG;
  465. }
  466. static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
  467. {
  468. unsigned int proto = ntohs(skb->protocol);
  469. struct gtp_pktinfo pktinfo;
  470. int err;
  471. /* Ensure there is sufficient headroom. */
  472. if (skb_cow_head(skb, dev->needed_headroom))
  473. goto tx_err;
  474. skb_reset_inner_headers(skb);
  475. /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
  476. rcu_read_lock();
  477. switch (proto) {
  478. case ETH_P_IP:
  479. err = gtp_build_skb_ip4(skb, dev, &pktinfo);
  480. break;
  481. default:
  482. err = -EOPNOTSUPP;
  483. break;
  484. }
  485. rcu_read_unlock();
  486. if (err < 0)
  487. goto tx_err;
  488. switch (proto) {
  489. case ETH_P_IP:
  490. netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
  491. &pktinfo.iph->saddr, &pktinfo.iph->daddr);
  492. udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
  493. pktinfo.fl4.saddr, pktinfo.fl4.daddr,
  494. pktinfo.iph->tos,
  495. ip4_dst_hoplimit(&pktinfo.rt->dst),
  496. 0,
  497. pktinfo.gtph_port, pktinfo.gtph_port,
  498. true, false);
  499. break;
  500. }
  501. return NETDEV_TX_OK;
  502. tx_err:
  503. dev->stats.tx_errors++;
  504. dev_kfree_skb(skb);
  505. return NETDEV_TX_OK;
  506. }
  507. static const struct net_device_ops gtp_netdev_ops = {
  508. .ndo_init = gtp_dev_init,
  509. .ndo_uninit = gtp_dev_uninit,
  510. .ndo_start_xmit = gtp_dev_xmit,
  511. .ndo_get_stats64 = ip_tunnel_get_stats64,
  512. };
  513. static void gtp_link_setup(struct net_device *dev)
  514. {
  515. dev->netdev_ops = &gtp_netdev_ops;
  516. dev->destructor = free_netdev;
  517. dev->hard_header_len = 0;
  518. dev->addr_len = 0;
  519. /* Zero header length. */
  520. dev->type = ARPHRD_NONE;
  521. dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
  522. dev->priv_flags |= IFF_NO_QUEUE;
  523. dev->features |= NETIF_F_LLTX;
  524. netif_keep_dst(dev);
  525. /* Assume largest header, ie. GTPv0. */
  526. dev->needed_headroom = LL_MAX_HEADER +
  527. sizeof(struct iphdr) +
  528. sizeof(struct udphdr) +
  529. sizeof(struct gtp0_header);
  530. }
  531. static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
  532. static void gtp_hashtable_free(struct gtp_dev *gtp);
  533. static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
  534. int fd_gtp0, int fd_gtp1);
  535. static int gtp_newlink(struct net *src_net, struct net_device *dev,
  536. struct nlattr *tb[], struct nlattr *data[])
  537. {
  538. int hashsize, err, fd0, fd1;
  539. struct gtp_dev *gtp;
  540. struct gtp_net *gn;
  541. if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1])
  542. return -EINVAL;
  543. gtp = netdev_priv(dev);
  544. fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
  545. fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
  546. err = gtp_encap_enable(dev, gtp, fd0, fd1);
  547. if (err < 0)
  548. goto out_err;
  549. if (!data[IFLA_GTP_PDP_HASHSIZE])
  550. hashsize = 1024;
  551. else
  552. hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
  553. err = gtp_hashtable_new(gtp, hashsize);
  554. if (err < 0)
  555. goto out_encap;
  556. err = register_netdevice(dev);
  557. if (err < 0) {
  558. netdev_dbg(dev, "failed to register new netdev %d\n", err);
  559. goto out_hashtable;
  560. }
  561. gn = net_generic(dev_net(dev), gtp_net_id);
  562. list_add_rcu(&gtp->list, &gn->gtp_dev_list);
  563. netdev_dbg(dev, "registered new GTP interface\n");
  564. return 0;
  565. out_hashtable:
  566. gtp_hashtable_free(gtp);
  567. out_encap:
  568. gtp_encap_disable(gtp);
  569. out_err:
  570. return err;
  571. }
  572. static void gtp_dellink(struct net_device *dev, struct list_head *head)
  573. {
  574. struct gtp_dev *gtp = netdev_priv(dev);
  575. gtp_encap_disable(gtp);
  576. gtp_hashtable_free(gtp);
  577. list_del_rcu(&gtp->list);
  578. unregister_netdevice_queue(dev, head);
  579. }
  580. static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
  581. [IFLA_GTP_FD0] = { .type = NLA_U32 },
  582. [IFLA_GTP_FD1] = { .type = NLA_U32 },
  583. [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
  584. };
  585. static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
  586. {
  587. if (!data)
  588. return -EINVAL;
  589. return 0;
  590. }
  591. static size_t gtp_get_size(const struct net_device *dev)
  592. {
  593. return nla_total_size(sizeof(__u32)); /* IFLA_GTP_PDP_HASHSIZE */
  594. }
  595. static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
  596. {
  597. struct gtp_dev *gtp = netdev_priv(dev);
  598. if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
  599. goto nla_put_failure;
  600. return 0;
  601. nla_put_failure:
  602. return -EMSGSIZE;
  603. }
  604. static struct rtnl_link_ops gtp_link_ops __read_mostly = {
  605. .kind = "gtp",
  606. .maxtype = IFLA_GTP_MAX,
  607. .policy = gtp_policy,
  608. .priv_size = sizeof(struct gtp_dev),
  609. .setup = gtp_link_setup,
  610. .validate = gtp_validate,
  611. .newlink = gtp_newlink,
  612. .dellink = gtp_dellink,
  613. .get_size = gtp_get_size,
  614. .fill_info = gtp_fill_info,
  615. };
  616. static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
  617. {
  618. struct net *net;
  619. /* Examine the link attributes and figure out which network namespace
  620. * we are talking about.
  621. */
  622. if (tb[GTPA_NET_NS_FD])
  623. net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
  624. else
  625. net = get_net(src_net);
  626. return net;
  627. }
  628. static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
  629. {
  630. int i;
  631. gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
  632. if (gtp->addr_hash == NULL)
  633. return -ENOMEM;
  634. gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
  635. if (gtp->tid_hash == NULL)
  636. goto err1;
  637. gtp->hash_size = hsize;
  638. for (i = 0; i < hsize; i++) {
  639. INIT_HLIST_HEAD(&gtp->addr_hash[i]);
  640. INIT_HLIST_HEAD(&gtp->tid_hash[i]);
  641. }
  642. return 0;
  643. err1:
  644. kfree(gtp->addr_hash);
  645. return -ENOMEM;
  646. }
  647. static void gtp_hashtable_free(struct gtp_dev *gtp)
  648. {
  649. struct pdp_ctx *pctx;
  650. int i;
  651. for (i = 0; i < gtp->hash_size; i++) {
  652. hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
  653. hlist_del_rcu(&pctx->hlist_tid);
  654. hlist_del_rcu(&pctx->hlist_addr);
  655. kfree_rcu(pctx, rcu_head);
  656. }
  657. }
  658. synchronize_rcu();
  659. kfree(gtp->addr_hash);
  660. kfree(gtp->tid_hash);
  661. }
  662. static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
  663. int fd_gtp0, int fd_gtp1)
  664. {
  665. struct udp_tunnel_sock_cfg tuncfg = {NULL};
  666. struct socket *sock0, *sock1u;
  667. int err;
  668. netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1);
  669. sock0 = sockfd_lookup(fd_gtp0, &err);
  670. if (sock0 == NULL) {
  671. netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0);
  672. return -ENOENT;
  673. }
  674. if (sock0->sk->sk_protocol != IPPROTO_UDP) {
  675. netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
  676. err = -EINVAL;
  677. goto err1;
  678. }
  679. sock1u = sockfd_lookup(fd_gtp1, &err);
  680. if (sock1u == NULL) {
  681. netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
  682. err = -ENOENT;
  683. goto err1;
  684. }
  685. if (sock1u->sk->sk_protocol != IPPROTO_UDP) {
  686. netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1);
  687. err = -EINVAL;
  688. goto err2;
  689. }
  690. netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u);
  691. gtp->sock0 = sock0;
  692. gtp->sock1u = sock1u;
  693. tuncfg.sk_user_data = gtp;
  694. tuncfg.encap_rcv = gtp_encap_recv;
  695. tuncfg.encap_destroy = gtp_encap_destroy;
  696. tuncfg.encap_type = UDP_ENCAP_GTP0;
  697. setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
  698. tuncfg.encap_type = UDP_ENCAP_GTP1U;
  699. setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
  700. err = 0;
  701. err2:
  702. sockfd_put(sock1u);
  703. err1:
  704. sockfd_put(sock0);
  705. return err;
  706. }
  707. static struct net_device *gtp_find_dev(struct net *net, int ifindex)
  708. {
  709. struct gtp_net *gn = net_generic(net, gtp_net_id);
  710. struct gtp_dev *gtp;
  711. list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
  712. if (ifindex == gtp->dev->ifindex)
  713. return gtp->dev;
  714. }
  715. return NULL;
  716. }
  717. static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
  718. {
  719. pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
  720. pctx->af = AF_INET;
  721. pctx->sgsn_addr_ip4.s_addr =
  722. nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]);
  723. pctx->ms_addr_ip4.s_addr =
  724. nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
  725. switch (pctx->gtp_version) {
  726. case GTP_V0:
  727. /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
  728. * label needs to be the same for uplink and downlink packets,
  729. * so let's annotate this.
  730. */
  731. pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
  732. pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
  733. break;
  734. case GTP_V1:
  735. pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
  736. pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
  737. break;
  738. default:
  739. break;
  740. }
  741. }
  742. static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
  743. {
  744. struct gtp_dev *gtp = netdev_priv(dev);
  745. u32 hash_ms, hash_tid = 0;
  746. struct pdp_ctx *pctx;
  747. bool found = false;
  748. __be32 ms_addr;
  749. ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
  750. hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
  751. hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
  752. if (pctx->ms_addr_ip4.s_addr == ms_addr) {
  753. found = true;
  754. break;
  755. }
  756. }
  757. if (found) {
  758. if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
  759. return -EEXIST;
  760. if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
  761. return -EOPNOTSUPP;
  762. ipv4_pdp_fill(pctx, info);
  763. if (pctx->gtp_version == GTP_V0)
  764. netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
  765. pctx->u.v0.tid, pctx);
  766. else if (pctx->gtp_version == GTP_V1)
  767. netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
  768. pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
  769. return 0;
  770. }
  771. pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
  772. if (pctx == NULL)
  773. return -ENOMEM;
  774. ipv4_pdp_fill(pctx, info);
  775. atomic_set(&pctx->tx_seq, 0);
  776. switch (pctx->gtp_version) {
  777. case GTP_V0:
  778. /* TS 09.60: "The flow label identifies unambiguously a GTP
  779. * flow.". We use the tid for this instead, I cannot find a
  780. * situation in which this doesn't unambiguosly identify the
  781. * PDP context.
  782. */
  783. hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
  784. break;
  785. case GTP_V1:
  786. hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
  787. break;
  788. }
  789. hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
  790. hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
  791. switch (pctx->gtp_version) {
  792. case GTP_V0:
  793. netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
  794. pctx->u.v0.tid, &pctx->sgsn_addr_ip4,
  795. &pctx->ms_addr_ip4, pctx);
  796. break;
  797. case GTP_V1:
  798. netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
  799. pctx->u.v1.i_tei, pctx->u.v1.o_tei,
  800. &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx);
  801. break;
  802. }
  803. return 0;
  804. }
  805. static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
  806. {
  807. struct net_device *dev;
  808. struct net *net;
  809. if (!info->attrs[GTPA_VERSION] ||
  810. !info->attrs[GTPA_LINK] ||
  811. !info->attrs[GTPA_SGSN_ADDRESS] ||
  812. !info->attrs[GTPA_MS_ADDRESS])
  813. return -EINVAL;
  814. switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
  815. case GTP_V0:
  816. if (!info->attrs[GTPA_TID] ||
  817. !info->attrs[GTPA_FLOW])
  818. return -EINVAL;
  819. break;
  820. case GTP_V1:
  821. if (!info->attrs[GTPA_I_TEI] ||
  822. !info->attrs[GTPA_O_TEI])
  823. return -EINVAL;
  824. break;
  825. default:
  826. return -EINVAL;
  827. }
  828. net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
  829. if (IS_ERR(net))
  830. return PTR_ERR(net);
  831. /* Check if there's an existing gtpX device to configure */
  832. dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
  833. if (dev == NULL) {
  834. put_net(net);
  835. return -ENODEV;
  836. }
  837. put_net(net);
  838. return ipv4_pdp_add(dev, info);
  839. }
  840. static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
  841. {
  842. struct net_device *dev;
  843. struct pdp_ctx *pctx;
  844. struct gtp_dev *gtp;
  845. struct net *net;
  846. if (!info->attrs[GTPA_VERSION] ||
  847. !info->attrs[GTPA_LINK])
  848. return -EINVAL;
  849. net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
  850. if (IS_ERR(net))
  851. return PTR_ERR(net);
  852. /* Check if there's an existing gtpX device to configure */
  853. dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
  854. if (dev == NULL) {
  855. put_net(net);
  856. return -ENODEV;
  857. }
  858. put_net(net);
  859. gtp = netdev_priv(dev);
  860. switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
  861. case GTP_V0:
  862. if (!info->attrs[GTPA_TID])
  863. return -EINVAL;
  864. pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
  865. break;
  866. case GTP_V1:
  867. if (!info->attrs[GTPA_I_TEI])
  868. return -EINVAL;
  869. pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
  870. break;
  871. default:
  872. return -EINVAL;
  873. }
  874. if (pctx == NULL)
  875. return -ENOENT;
  876. if (pctx->gtp_version == GTP_V0)
  877. netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
  878. pctx->u.v0.tid, pctx);
  879. else if (pctx->gtp_version == GTP_V1)
  880. netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
  881. pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
  882. hlist_del_rcu(&pctx->hlist_tid);
  883. hlist_del_rcu(&pctx->hlist_addr);
  884. kfree_rcu(pctx, rcu_head);
  885. return 0;
  886. }
  887. static struct genl_family gtp_genl_family;
  888. static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
  889. u32 type, struct pdp_ctx *pctx)
  890. {
  891. void *genlh;
  892. genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
  893. type);
  894. if (genlh == NULL)
  895. goto nlmsg_failure;
  896. if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
  897. nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) ||
  898. nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
  899. goto nla_put_failure;
  900. switch (pctx->gtp_version) {
  901. case GTP_V0:
  902. if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
  903. nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
  904. goto nla_put_failure;
  905. break;
  906. case GTP_V1:
  907. if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
  908. nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
  909. goto nla_put_failure;
  910. break;
  911. }
  912. genlmsg_end(skb, genlh);
  913. return 0;
  914. nlmsg_failure:
  915. nla_put_failure:
  916. genlmsg_cancel(skb, genlh);
  917. return -EMSGSIZE;
  918. }
  919. static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
  920. {
  921. struct pdp_ctx *pctx = NULL;
  922. struct net_device *dev;
  923. struct sk_buff *skb2;
  924. struct gtp_dev *gtp;
  925. u32 gtp_version;
  926. struct net *net;
  927. int err;
  928. if (!info->attrs[GTPA_VERSION] ||
  929. !info->attrs[GTPA_LINK])
  930. return -EINVAL;
  931. gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
  932. switch (gtp_version) {
  933. case GTP_V0:
  934. case GTP_V1:
  935. break;
  936. default:
  937. return -EINVAL;
  938. }
  939. net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
  940. if (IS_ERR(net))
  941. return PTR_ERR(net);
  942. /* Check if there's an existing gtpX device to configure */
  943. dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
  944. if (dev == NULL) {
  945. put_net(net);
  946. return -ENODEV;
  947. }
  948. put_net(net);
  949. gtp = netdev_priv(dev);
  950. rcu_read_lock();
  951. if (gtp_version == GTP_V0 &&
  952. info->attrs[GTPA_TID]) {
  953. u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
  954. pctx = gtp0_pdp_find(gtp, tid);
  955. } else if (gtp_version == GTP_V1 &&
  956. info->attrs[GTPA_I_TEI]) {
  957. u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
  958. pctx = gtp1_pdp_find(gtp, tid);
  959. } else if (info->attrs[GTPA_MS_ADDRESS]) {
  960. __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
  961. pctx = ipv4_pdp_find(gtp, ip);
  962. }
  963. if (pctx == NULL) {
  964. err = -ENOENT;
  965. goto err_unlock;
  966. }
  967. skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
  968. if (skb2 == NULL) {
  969. err = -ENOMEM;
  970. goto err_unlock;
  971. }
  972. err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
  973. info->snd_seq, info->nlhdr->nlmsg_type, pctx);
  974. if (err < 0)
  975. goto err_unlock_free;
  976. rcu_read_unlock();
  977. return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
  978. err_unlock_free:
  979. kfree_skb(skb2);
  980. err_unlock:
  981. rcu_read_unlock();
  982. return err;
  983. }
  984. static int gtp_genl_dump_pdp(struct sk_buff *skb,
  985. struct netlink_callback *cb)
  986. {
  987. struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
  988. struct net *net = sock_net(skb->sk);
  989. struct gtp_net *gn = net_generic(net, gtp_net_id);
  990. unsigned long tid = cb->args[1];
  991. int i, k = cb->args[0], ret;
  992. struct pdp_ctx *pctx;
  993. if (cb->args[4])
  994. return 0;
  995. list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
  996. if (last_gtp && last_gtp != gtp)
  997. continue;
  998. else
  999. last_gtp = NULL;
  1000. for (i = k; i < gtp->hash_size; i++) {
  1001. hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
  1002. if (tid && tid != pctx->u.tid)
  1003. continue;
  1004. else
  1005. tid = 0;
  1006. ret = gtp_genl_fill_info(skb,
  1007. NETLINK_CB(cb->skb).portid,
  1008. cb->nlh->nlmsg_seq,
  1009. cb->nlh->nlmsg_type, pctx);
  1010. if (ret < 0) {
  1011. cb->args[0] = i;
  1012. cb->args[1] = pctx->u.tid;
  1013. cb->args[2] = (unsigned long)gtp;
  1014. goto out;
  1015. }
  1016. }
  1017. }
  1018. }
  1019. cb->args[4] = 1;
  1020. out:
  1021. return skb->len;
  1022. }
  1023. static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
  1024. [GTPA_LINK] = { .type = NLA_U32, },
  1025. [GTPA_VERSION] = { .type = NLA_U32, },
  1026. [GTPA_TID] = { .type = NLA_U64, },
  1027. [GTPA_SGSN_ADDRESS] = { .type = NLA_U32, },
  1028. [GTPA_MS_ADDRESS] = { .type = NLA_U32, },
  1029. [GTPA_FLOW] = { .type = NLA_U16, },
  1030. [GTPA_NET_NS_FD] = { .type = NLA_U32, },
  1031. [GTPA_I_TEI] = { .type = NLA_U32, },
  1032. [GTPA_O_TEI] = { .type = NLA_U32, },
  1033. };
  1034. static const struct genl_ops gtp_genl_ops[] = {
  1035. {
  1036. .cmd = GTP_CMD_NEWPDP,
  1037. .doit = gtp_genl_new_pdp,
  1038. .policy = gtp_genl_policy,
  1039. .flags = GENL_ADMIN_PERM,
  1040. },
  1041. {
  1042. .cmd = GTP_CMD_DELPDP,
  1043. .doit = gtp_genl_del_pdp,
  1044. .policy = gtp_genl_policy,
  1045. .flags = GENL_ADMIN_PERM,
  1046. },
  1047. {
  1048. .cmd = GTP_CMD_GETPDP,
  1049. .doit = gtp_genl_get_pdp,
  1050. .dumpit = gtp_genl_dump_pdp,
  1051. .policy = gtp_genl_policy,
  1052. .flags = GENL_ADMIN_PERM,
  1053. },
  1054. };
  1055. static struct genl_family gtp_genl_family __ro_after_init = {
  1056. .name = "gtp",
  1057. .version = 0,
  1058. .hdrsize = 0,
  1059. .maxattr = GTPA_MAX,
  1060. .netnsok = true,
  1061. .module = THIS_MODULE,
  1062. .ops = gtp_genl_ops,
  1063. .n_ops = ARRAY_SIZE(gtp_genl_ops),
  1064. };
  1065. static int __net_init gtp_net_init(struct net *net)
  1066. {
  1067. struct gtp_net *gn = net_generic(net, gtp_net_id);
  1068. INIT_LIST_HEAD(&gn->gtp_dev_list);
  1069. return 0;
  1070. }
  1071. static void __net_exit gtp_net_exit(struct net *net)
  1072. {
  1073. struct gtp_net *gn = net_generic(net, gtp_net_id);
  1074. struct gtp_dev *gtp;
  1075. LIST_HEAD(list);
  1076. rtnl_lock();
  1077. list_for_each_entry(gtp, &gn->gtp_dev_list, list)
  1078. gtp_dellink(gtp->dev, &list);
  1079. unregister_netdevice_many(&list);
  1080. rtnl_unlock();
  1081. }
  1082. static struct pernet_operations gtp_net_ops = {
  1083. .init = gtp_net_init,
  1084. .exit = gtp_net_exit,
  1085. .id = &gtp_net_id,
  1086. .size = sizeof(struct gtp_net),
  1087. };
  1088. static int __init gtp_init(void)
  1089. {
  1090. int err;
  1091. get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
  1092. err = rtnl_link_register(&gtp_link_ops);
  1093. if (err < 0)
  1094. goto error_out;
  1095. err = genl_register_family(&gtp_genl_family);
  1096. if (err < 0)
  1097. goto unreg_rtnl_link;
  1098. err = register_pernet_subsys(&gtp_net_ops);
  1099. if (err < 0)
  1100. goto unreg_genl_family;
  1101. pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
  1102. sizeof(struct pdp_ctx));
  1103. return 0;
  1104. unreg_genl_family:
  1105. genl_unregister_family(&gtp_genl_family);
  1106. unreg_rtnl_link:
  1107. rtnl_link_unregister(&gtp_link_ops);
  1108. error_out:
  1109. pr_err("error loading GTP module loaded\n");
  1110. return err;
  1111. }
  1112. late_initcall(gtp_init);
  1113. static void __exit gtp_fini(void)
  1114. {
  1115. unregister_pernet_subsys(&gtp_net_ops);
  1116. genl_unregister_family(&gtp_genl_family);
  1117. rtnl_link_unregister(&gtp_link_ops);
  1118. pr_info("GTP module unloaded\n");
  1119. }
  1120. module_exit(gtp_fini);
  1121. MODULE_LICENSE("GPL");
  1122. MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
  1123. MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
  1124. MODULE_ALIAS_RTNL_LINK("gtp");
  1125. MODULE_ALIAS_GENL_FAMILY("gtp");