netpoll.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. /*
  2. * Common framework for low-level network console, dump, and debugger code
  3. *
  4. * Sep 8 2003 Matt Mackall <mpm@selenic.com>
  5. *
  6. * based on the netconsole code from:
  7. *
  8. * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
  9. * Copyright (C) 2002 Red Hat, Inc.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/moduleparam.h>
  13. #include <linux/kernel.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/string.h>
  17. #include <linux/if_arp.h>
  18. #include <linux/inetdevice.h>
  19. #include <linux/inet.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/netpoll.h>
  22. #include <linux/sched.h>
  23. #include <linux/delay.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/slab.h>
  27. #include <linux/export.h>
  28. #include <linux/if_vlan.h>
  29. #include <net/tcp.h>
  30. #include <net/udp.h>
  31. #include <net/addrconf.h>
  32. #include <net/ndisc.h>
  33. #include <net/ip6_checksum.h>
  34. #include <asm/unaligned.h>
  35. #include <trace/events/napi.h>
  36. /*
  37. * We maintain a small pool of fully-sized skbs, to make sure the
  38. * message gets out even in extreme OOM situations.
  39. */
  40. #define MAX_UDP_CHUNK 1460
  41. #define MAX_SKBS 32
  42. static struct sk_buff_head skb_pool;
  43. #ifdef CONFIG_NETPOLL_TRAP
  44. static atomic_t trapped;
  45. static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
  46. #endif
  47. DEFINE_STATIC_SRCU(netpoll_srcu);
  48. #define USEC_PER_POLL 50
  49. #define MAX_SKB_SIZE \
  50. (sizeof(struct ethhdr) + \
  51. sizeof(struct iphdr) + \
  52. sizeof(struct udphdr) + \
  53. MAX_UDP_CHUNK)
  54. static void zap_completion_queue(void);
  55. static void netpoll_async_cleanup(struct work_struct *work);
  56. static unsigned int carrier_timeout = 4;
  57. module_param(carrier_timeout, uint, 0644);
  58. #define np_info(np, fmt, ...) \
  59. pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
  60. #define np_err(np, fmt, ...) \
  61. pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
  62. #define np_notice(np, fmt, ...) \
  63. pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
  64. static void queue_process(struct work_struct *work)
  65. {
  66. struct netpoll_info *npinfo =
  67. container_of(work, struct netpoll_info, tx_work.work);
  68. struct sk_buff *skb;
  69. unsigned long flags;
  70. while ((skb = skb_dequeue(&npinfo->txq))) {
  71. struct net_device *dev = skb->dev;
  72. const struct net_device_ops *ops = dev->netdev_ops;
  73. struct netdev_queue *txq;
  74. if (!netif_device_present(dev) || !netif_running(dev)) {
  75. __kfree_skb(skb);
  76. continue;
  77. }
  78. txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  79. local_irq_save(flags);
  80. __netif_tx_lock(txq, smp_processor_id());
  81. if (netif_xmit_frozen_or_stopped(txq) ||
  82. ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
  83. skb_queue_head(&npinfo->txq, skb);
  84. __netif_tx_unlock(txq);
  85. local_irq_restore(flags);
  86. schedule_delayed_work(&npinfo->tx_work, HZ/10);
  87. return;
  88. }
  89. __netif_tx_unlock(txq);
  90. local_irq_restore(flags);
  91. }
  92. }
  93. #ifdef CONFIG_NETPOLL_TRAP
  94. static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
  95. unsigned short ulen, __be32 saddr, __be32 daddr)
  96. {
  97. __wsum psum;
  98. if (uh->check == 0 || skb_csum_unnecessary(skb))
  99. return 0;
  100. psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
  101. if (skb->ip_summed == CHECKSUM_COMPLETE &&
  102. !csum_fold(csum_add(psum, skb->csum)))
  103. return 0;
  104. skb->csum = psum;
  105. return __skb_checksum_complete(skb);
  106. }
  107. #endif /* CONFIG_NETPOLL_TRAP */
  108. /*
  109. * Check whether delayed processing was scheduled for our NIC. If so,
  110. * we attempt to grab the poll lock and use ->poll() to pump the card.
  111. * If this fails, either we've recursed in ->poll() or it's already
  112. * running on another CPU.
  113. *
  114. * Note: we don't mask interrupts with this lock because we're using
  115. * trylock here and interrupts are already disabled in the softirq
  116. * case. Further, we test the poll_owner to avoid recursion on UP
  117. * systems where the lock doesn't exist.
  118. *
  119. * In cases where there is bi-directional communications, reading only
  120. * one message at a time can lead to packets being dropped by the
  121. * network adapter, forcing superfluous retries and possibly timeouts.
  122. * Thus, we set our budget to greater than 1.
  123. */
  124. static int poll_one_napi(struct napi_struct *napi, int budget)
  125. {
  126. int work;
  127. /* net_rx_action's ->poll() invocations and our's are
  128. * synchronized by this test which is only made while
  129. * holding the napi->poll_lock.
  130. */
  131. if (!test_bit(NAPI_STATE_SCHED, &napi->state))
  132. return budget;
  133. set_bit(NAPI_STATE_NPSVC, &napi->state);
  134. work = napi->poll(napi, budget);
  135. WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
  136. trace_napi_poll(napi);
  137. clear_bit(NAPI_STATE_NPSVC, &napi->state);
  138. return budget - work;
  139. }
  140. static void poll_napi(struct net_device *dev, int budget)
  141. {
  142. struct napi_struct *napi;
  143. list_for_each_entry(napi, &dev->napi_list, dev_list) {
  144. if (napi->poll_owner != smp_processor_id() &&
  145. spin_trylock(&napi->poll_lock)) {
  146. budget = poll_one_napi(napi, budget);
  147. spin_unlock(&napi->poll_lock);
  148. }
  149. }
  150. }
  151. #ifdef CONFIG_NETPOLL_TRAP
  152. static void service_neigh_queue(struct net_device *dev,
  153. struct netpoll_info *npi)
  154. {
  155. struct sk_buff *skb;
  156. if (dev->flags & IFF_SLAVE) {
  157. struct net_device *bond_dev;
  158. struct netpoll_info *bond_ni;
  159. bond_dev = netdev_master_upper_dev_get_rcu(dev);
  160. bond_ni = rcu_dereference_bh(bond_dev->npinfo);
  161. while ((skb = skb_dequeue(&npi->neigh_tx))) {
  162. skb->dev = bond_dev;
  163. skb_queue_tail(&bond_ni->neigh_tx, skb);
  164. }
  165. }
  166. while ((skb = skb_dequeue(&npi->neigh_tx)))
  167. netpoll_neigh_reply(skb, npi);
  168. }
  169. #else /* !CONFIG_NETPOLL_TRAP */
  170. static inline void service_neigh_queue(struct net_device *dev,
  171. struct netpoll_info *npi)
  172. {
  173. }
  174. #endif /* CONFIG_NETPOLL_TRAP */
  175. static void netpoll_poll_dev(struct net_device *dev)
  176. {
  177. const struct net_device_ops *ops;
  178. struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
  179. bool rx_processing = netpoll_rx_processing(ni);
  180. int budget = rx_processing? 16 : 0;
  181. /* Don't do any rx activity if the dev_lock mutex is held
  182. * the dev_open/close paths use this to block netpoll activity
  183. * while changing device state
  184. */
  185. if (down_trylock(&ni->dev_lock))
  186. return;
  187. if (!netif_running(dev)) {
  188. up(&ni->dev_lock);
  189. return;
  190. }
  191. if (rx_processing)
  192. netpoll_set_trap(1);
  193. ops = dev->netdev_ops;
  194. if (!ops->ndo_poll_controller) {
  195. up(&ni->dev_lock);
  196. return;
  197. }
  198. /* Process pending work on NIC */
  199. ops->ndo_poll_controller(dev);
  200. poll_napi(dev, budget);
  201. if (rx_processing)
  202. netpoll_set_trap(0);
  203. up(&ni->dev_lock);
  204. service_neigh_queue(dev, ni);
  205. zap_completion_queue();
  206. }
  207. void netpoll_rx_disable(struct net_device *dev)
  208. {
  209. struct netpoll_info *ni;
  210. int idx;
  211. might_sleep();
  212. idx = srcu_read_lock(&netpoll_srcu);
  213. ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
  214. if (ni)
  215. down(&ni->dev_lock);
  216. srcu_read_unlock(&netpoll_srcu, idx);
  217. }
  218. EXPORT_SYMBOL(netpoll_rx_disable);
  219. void netpoll_rx_enable(struct net_device *dev)
  220. {
  221. struct netpoll_info *ni;
  222. rcu_read_lock();
  223. ni = rcu_dereference(dev->npinfo);
  224. if (ni)
  225. up(&ni->dev_lock);
  226. rcu_read_unlock();
  227. }
  228. EXPORT_SYMBOL(netpoll_rx_enable);
  229. static void refill_skbs(void)
  230. {
  231. struct sk_buff *skb;
  232. unsigned long flags;
  233. spin_lock_irqsave(&skb_pool.lock, flags);
  234. while (skb_pool.qlen < MAX_SKBS) {
  235. skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
  236. if (!skb)
  237. break;
  238. __skb_queue_tail(&skb_pool, skb);
  239. }
  240. spin_unlock_irqrestore(&skb_pool.lock, flags);
  241. }
  242. static void zap_completion_queue(void)
  243. {
  244. unsigned long flags;
  245. struct softnet_data *sd = &get_cpu_var(softnet_data);
  246. if (sd->completion_queue) {
  247. struct sk_buff *clist;
  248. local_irq_save(flags);
  249. clist = sd->completion_queue;
  250. sd->completion_queue = NULL;
  251. local_irq_restore(flags);
  252. while (clist != NULL) {
  253. struct sk_buff *skb = clist;
  254. clist = clist->next;
  255. if (skb->destructor) {
  256. atomic_inc(&skb->users);
  257. dev_kfree_skb_any(skb); /* put this one back */
  258. } else {
  259. __kfree_skb(skb);
  260. }
  261. }
  262. }
  263. put_cpu_var(softnet_data);
  264. }
  265. static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
  266. {
  267. int count = 0;
  268. struct sk_buff *skb;
  269. zap_completion_queue();
  270. refill_skbs();
  271. repeat:
  272. skb = alloc_skb(len, GFP_ATOMIC);
  273. if (!skb)
  274. skb = skb_dequeue(&skb_pool);
  275. if (!skb) {
  276. if (++count < 10) {
  277. netpoll_poll_dev(np->dev);
  278. goto repeat;
  279. }
  280. return NULL;
  281. }
  282. atomic_set(&skb->users, 1);
  283. skb_reserve(skb, reserve);
  284. return skb;
  285. }
  286. static int netpoll_owner_active(struct net_device *dev)
  287. {
  288. struct napi_struct *napi;
  289. list_for_each_entry(napi, &dev->napi_list, dev_list) {
  290. if (napi->poll_owner == smp_processor_id())
  291. return 1;
  292. }
  293. return 0;
  294. }
  295. /* call with IRQ disabled */
  296. void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
  297. struct net_device *dev)
  298. {
  299. int status = NETDEV_TX_BUSY;
  300. unsigned long tries;
  301. const struct net_device_ops *ops = dev->netdev_ops;
  302. /* It is up to the caller to keep npinfo alive. */
  303. struct netpoll_info *npinfo;
  304. WARN_ON_ONCE(!irqs_disabled());
  305. npinfo = rcu_dereference_bh(np->dev->npinfo);
  306. if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
  307. __kfree_skb(skb);
  308. return;
  309. }
  310. /* don't get messages out of order, and no recursion */
  311. if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
  312. struct netdev_queue *txq;
  313. txq = netdev_pick_tx(dev, skb, NULL);
  314. /* try until next clock tick */
  315. for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
  316. tries > 0; --tries) {
  317. if (__netif_tx_trylock(txq)) {
  318. if (!netif_xmit_stopped(txq)) {
  319. if (vlan_tx_tag_present(skb) &&
  320. !vlan_hw_offload_capable(netif_skb_features(skb),
  321. skb->vlan_proto)) {
  322. skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
  323. if (unlikely(!skb)) {
  324. /* This is actually a packet drop, but we
  325. * don't want the code at the end of this
  326. * function to try and re-queue a NULL skb.
  327. */
  328. status = NETDEV_TX_OK;
  329. goto unlock_txq;
  330. }
  331. skb->vlan_tci = 0;
  332. }
  333. status = ops->ndo_start_xmit(skb, dev);
  334. if (status == NETDEV_TX_OK)
  335. txq_trans_update(txq);
  336. }
  337. unlock_txq:
  338. __netif_tx_unlock(txq);
  339. if (status == NETDEV_TX_OK)
  340. break;
  341. }
  342. /* tickle device maybe there is some cleanup */
  343. netpoll_poll_dev(np->dev);
  344. udelay(USEC_PER_POLL);
  345. }
  346. WARN_ONCE(!irqs_disabled(),
  347. "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
  348. dev->name, ops->ndo_start_xmit);
  349. }
  350. if (status != NETDEV_TX_OK) {
  351. skb_queue_tail(&npinfo->txq, skb);
  352. schedule_delayed_work(&npinfo->tx_work,0);
  353. }
  354. }
  355. EXPORT_SYMBOL(netpoll_send_skb_on_dev);
  356. void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
  357. {
  358. int total_len, ip_len, udp_len;
  359. struct sk_buff *skb;
  360. struct udphdr *udph;
  361. struct iphdr *iph;
  362. struct ethhdr *eth;
  363. static atomic_t ip_ident;
  364. struct ipv6hdr *ip6h;
  365. udp_len = len + sizeof(*udph);
  366. if (np->ipv6)
  367. ip_len = udp_len + sizeof(*ip6h);
  368. else
  369. ip_len = udp_len + sizeof(*iph);
  370. total_len = ip_len + LL_RESERVED_SPACE(np->dev);
  371. skb = find_skb(np, total_len + np->dev->needed_tailroom,
  372. total_len - len);
  373. if (!skb)
  374. return;
  375. skb_copy_to_linear_data(skb, msg, len);
  376. skb_put(skb, len);
  377. skb_push(skb, sizeof(*udph));
  378. skb_reset_transport_header(skb);
  379. udph = udp_hdr(skb);
  380. udph->source = htons(np->local_port);
  381. udph->dest = htons(np->remote_port);
  382. udph->len = htons(udp_len);
  383. if (np->ipv6) {
  384. udph->check = 0;
  385. udph->check = csum_ipv6_magic(&np->local_ip.in6,
  386. &np->remote_ip.in6,
  387. udp_len, IPPROTO_UDP,
  388. csum_partial(udph, udp_len, 0));
  389. if (udph->check == 0)
  390. udph->check = CSUM_MANGLED_0;
  391. skb_push(skb, sizeof(*ip6h));
  392. skb_reset_network_header(skb);
  393. ip6h = ipv6_hdr(skb);
  394. /* ip6h->version = 6; ip6h->priority = 0; */
  395. put_unaligned(0x60, (unsigned char *)ip6h);
  396. ip6h->flow_lbl[0] = 0;
  397. ip6h->flow_lbl[1] = 0;
  398. ip6h->flow_lbl[2] = 0;
  399. ip6h->payload_len = htons(sizeof(struct udphdr) + len);
  400. ip6h->nexthdr = IPPROTO_UDP;
  401. ip6h->hop_limit = 32;
  402. ip6h->saddr = np->local_ip.in6;
  403. ip6h->daddr = np->remote_ip.in6;
  404. eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
  405. skb_reset_mac_header(skb);
  406. skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
  407. } else {
  408. udph->check = 0;
  409. udph->check = csum_tcpudp_magic(np->local_ip.ip,
  410. np->remote_ip.ip,
  411. udp_len, IPPROTO_UDP,
  412. csum_partial(udph, udp_len, 0));
  413. if (udph->check == 0)
  414. udph->check = CSUM_MANGLED_0;
  415. skb_push(skb, sizeof(*iph));
  416. skb_reset_network_header(skb);
  417. iph = ip_hdr(skb);
  418. /* iph->version = 4; iph->ihl = 5; */
  419. put_unaligned(0x45, (unsigned char *)iph);
  420. iph->tos = 0;
  421. put_unaligned(htons(ip_len), &(iph->tot_len));
  422. iph->id = htons(atomic_inc_return(&ip_ident));
  423. iph->frag_off = 0;
  424. iph->ttl = 64;
  425. iph->protocol = IPPROTO_UDP;
  426. iph->check = 0;
  427. put_unaligned(np->local_ip.ip, &(iph->saddr));
  428. put_unaligned(np->remote_ip.ip, &(iph->daddr));
  429. iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  430. eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
  431. skb_reset_mac_header(skb);
  432. skb->protocol = eth->h_proto = htons(ETH_P_IP);
  433. }
  434. ether_addr_copy(eth->h_source, np->dev->dev_addr);
  435. ether_addr_copy(eth->h_dest, np->remote_mac);
  436. skb->dev = np->dev;
  437. netpoll_send_skb(np, skb);
  438. }
  439. EXPORT_SYMBOL(netpoll_send_udp);
  440. #ifdef CONFIG_NETPOLL_TRAP
  441. static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
  442. {
  443. int size, type = ARPOP_REPLY;
  444. __be32 sip, tip;
  445. unsigned char *sha;
  446. struct sk_buff *send_skb;
  447. struct netpoll *np, *tmp;
  448. unsigned long flags;
  449. int hlen, tlen;
  450. int hits = 0, proto;
  451. if (!netpoll_rx_processing(npinfo))
  452. return;
  453. /* Before checking the packet, we do some early
  454. inspection whether this is interesting at all */
  455. spin_lock_irqsave(&npinfo->rx_lock, flags);
  456. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  457. if (np->dev == skb->dev)
  458. hits++;
  459. }
  460. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  461. /* No netpoll struct is using this dev */
  462. if (!hits)
  463. return;
  464. proto = ntohs(eth_hdr(skb)->h_proto);
  465. if (proto == ETH_P_ARP) {
  466. struct arphdr *arp;
  467. unsigned char *arp_ptr;
  468. /* No arp on this interface */
  469. if (skb->dev->flags & IFF_NOARP)
  470. return;
  471. if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
  472. return;
  473. skb_reset_network_header(skb);
  474. skb_reset_transport_header(skb);
  475. arp = arp_hdr(skb);
  476. if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
  477. arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
  478. arp->ar_pro != htons(ETH_P_IP) ||
  479. arp->ar_op != htons(ARPOP_REQUEST))
  480. return;
  481. arp_ptr = (unsigned char *)(arp+1);
  482. /* save the location of the src hw addr */
  483. sha = arp_ptr;
  484. arp_ptr += skb->dev->addr_len;
  485. memcpy(&sip, arp_ptr, 4);
  486. arp_ptr += 4;
  487. /* If we actually cared about dst hw addr,
  488. it would get copied here */
  489. arp_ptr += skb->dev->addr_len;
  490. memcpy(&tip, arp_ptr, 4);
  491. /* Should we ignore arp? */
  492. if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
  493. return;
  494. size = arp_hdr_len(skb->dev);
  495. spin_lock_irqsave(&npinfo->rx_lock, flags);
  496. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  497. if (tip != np->local_ip.ip)
  498. continue;
  499. hlen = LL_RESERVED_SPACE(np->dev);
  500. tlen = np->dev->needed_tailroom;
  501. send_skb = find_skb(np, size + hlen + tlen, hlen);
  502. if (!send_skb)
  503. continue;
  504. skb_reset_network_header(send_skb);
  505. arp = (struct arphdr *) skb_put(send_skb, size);
  506. send_skb->dev = skb->dev;
  507. send_skb->protocol = htons(ETH_P_ARP);
  508. /* Fill the device header for the ARP frame */
  509. if (dev_hard_header(send_skb, skb->dev, ETH_P_ARP,
  510. sha, np->dev->dev_addr,
  511. send_skb->len) < 0) {
  512. kfree_skb(send_skb);
  513. continue;
  514. }
  515. /*
  516. * Fill out the arp protocol part.
  517. *
  518. * we only support ethernet device type,
  519. * which (according to RFC 1390) should
  520. * always equal 1 (Ethernet).
  521. */
  522. arp->ar_hrd = htons(np->dev->type);
  523. arp->ar_pro = htons(ETH_P_IP);
  524. arp->ar_hln = np->dev->addr_len;
  525. arp->ar_pln = 4;
  526. arp->ar_op = htons(type);
  527. arp_ptr = (unsigned char *)(arp + 1);
  528. memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
  529. arp_ptr += np->dev->addr_len;
  530. memcpy(arp_ptr, &tip, 4);
  531. arp_ptr += 4;
  532. memcpy(arp_ptr, sha, np->dev->addr_len);
  533. arp_ptr += np->dev->addr_len;
  534. memcpy(arp_ptr, &sip, 4);
  535. netpoll_send_skb(np, send_skb);
  536. /* If there are several rx_skb_hooks for the same
  537. * address we're fine by sending a single reply
  538. */
  539. break;
  540. }
  541. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  542. } else if( proto == ETH_P_IPV6) {
  543. #if IS_ENABLED(CONFIG_IPV6)
  544. struct nd_msg *msg;
  545. u8 *lladdr = NULL;
  546. struct ipv6hdr *hdr;
  547. struct icmp6hdr *icmp6h;
  548. const struct in6_addr *saddr;
  549. const struct in6_addr *daddr;
  550. struct inet6_dev *in6_dev = NULL;
  551. struct in6_addr *target;
  552. in6_dev = in6_dev_get(skb->dev);
  553. if (!in6_dev || !in6_dev->cnf.accept_ra)
  554. return;
  555. if (!pskb_may_pull(skb, skb->len))
  556. return;
  557. msg = (struct nd_msg *)skb_transport_header(skb);
  558. __skb_push(skb, skb->data - skb_transport_header(skb));
  559. if (ipv6_hdr(skb)->hop_limit != 255)
  560. return;
  561. if (msg->icmph.icmp6_code != 0)
  562. return;
  563. if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
  564. return;
  565. saddr = &ipv6_hdr(skb)->saddr;
  566. daddr = &ipv6_hdr(skb)->daddr;
  567. size = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
  568. spin_lock_irqsave(&npinfo->rx_lock, flags);
  569. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  570. if (!ipv6_addr_equal(daddr, &np->local_ip.in6))
  571. continue;
  572. hlen = LL_RESERVED_SPACE(np->dev);
  573. tlen = np->dev->needed_tailroom;
  574. send_skb = find_skb(np, size + hlen + tlen, hlen);
  575. if (!send_skb)
  576. continue;
  577. send_skb->protocol = htons(ETH_P_IPV6);
  578. send_skb->dev = skb->dev;
  579. skb_reset_network_header(send_skb);
  580. hdr = (struct ipv6hdr *) skb_put(send_skb, sizeof(struct ipv6hdr));
  581. *(__be32*)hdr = htonl(0x60000000);
  582. hdr->payload_len = htons(size);
  583. hdr->nexthdr = IPPROTO_ICMPV6;
  584. hdr->hop_limit = 255;
  585. hdr->saddr = *saddr;
  586. hdr->daddr = *daddr;
  587. icmp6h = (struct icmp6hdr *) skb_put(send_skb, sizeof(struct icmp6hdr));
  588. icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
  589. icmp6h->icmp6_router = 0;
  590. icmp6h->icmp6_solicited = 1;
  591. target = (struct in6_addr *) skb_put(send_skb, sizeof(struct in6_addr));
  592. *target = msg->target;
  593. icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
  594. IPPROTO_ICMPV6,
  595. csum_partial(icmp6h,
  596. size, 0));
  597. if (dev_hard_header(send_skb, skb->dev, ETH_P_IPV6,
  598. lladdr, np->dev->dev_addr,
  599. send_skb->len) < 0) {
  600. kfree_skb(send_skb);
  601. continue;
  602. }
  603. netpoll_send_skb(np, send_skb);
  604. /* If there are several rx_skb_hooks for the same
  605. * address, we're fine by sending a single reply
  606. */
  607. break;
  608. }
  609. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  610. #endif
  611. }
  612. }
  613. static bool pkt_is_ns(struct sk_buff *skb)
  614. {
  615. struct nd_msg *msg;
  616. struct ipv6hdr *hdr;
  617. if (skb->protocol != htons(ETH_P_ARP))
  618. return false;
  619. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
  620. return false;
  621. msg = (struct nd_msg *)skb_transport_header(skb);
  622. __skb_push(skb, skb->data - skb_transport_header(skb));
  623. hdr = ipv6_hdr(skb);
  624. if (hdr->nexthdr != IPPROTO_ICMPV6)
  625. return false;
  626. if (hdr->hop_limit != 255)
  627. return false;
  628. if (msg->icmph.icmp6_code != 0)
  629. return false;
  630. if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
  631. return false;
  632. return true;
  633. }
  634. int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
  635. {
  636. int proto, len, ulen, data_len;
  637. int hits = 0, offset;
  638. const struct iphdr *iph;
  639. struct udphdr *uh;
  640. struct netpoll *np, *tmp;
  641. uint16_t source;
  642. if (!netpoll_rx_processing(npinfo))
  643. goto out;
  644. if (skb->dev->type != ARPHRD_ETHER)
  645. goto out;
  646. /* check if netpoll clients need ARP */
  647. if (skb->protocol == htons(ETH_P_ARP) && netpoll_trap()) {
  648. skb_queue_tail(&npinfo->neigh_tx, skb);
  649. return 1;
  650. } else if (pkt_is_ns(skb) && netpoll_trap()) {
  651. skb_queue_tail(&npinfo->neigh_tx, skb);
  652. return 1;
  653. }
  654. if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
  655. skb = vlan_untag(skb);
  656. if (unlikely(!skb))
  657. goto out;
  658. }
  659. proto = ntohs(eth_hdr(skb)->h_proto);
  660. if (proto != ETH_P_IP && proto != ETH_P_IPV6)
  661. goto out;
  662. if (skb->pkt_type == PACKET_OTHERHOST)
  663. goto out;
  664. if (skb_shared(skb))
  665. goto out;
  666. if (proto == ETH_P_IP) {
  667. if (!pskb_may_pull(skb, sizeof(struct iphdr)))
  668. goto out;
  669. iph = (struct iphdr *)skb->data;
  670. if (iph->ihl < 5 || iph->version != 4)
  671. goto out;
  672. if (!pskb_may_pull(skb, iph->ihl*4))
  673. goto out;
  674. iph = (struct iphdr *)skb->data;
  675. if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
  676. goto out;
  677. len = ntohs(iph->tot_len);
  678. if (skb->len < len || len < iph->ihl*4)
  679. goto out;
  680. /*
  681. * Our transport medium may have padded the buffer out.
  682. * Now We trim to the true length of the frame.
  683. */
  684. if (pskb_trim_rcsum(skb, len))
  685. goto out;
  686. iph = (struct iphdr *)skb->data;
  687. if (iph->protocol != IPPROTO_UDP)
  688. goto out;
  689. len -= iph->ihl*4;
  690. uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
  691. offset = (unsigned char *)(uh + 1) - skb->data;
  692. ulen = ntohs(uh->len);
  693. data_len = skb->len - offset;
  694. source = ntohs(uh->source);
  695. if (ulen != len)
  696. goto out;
  697. if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
  698. goto out;
  699. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  700. if (np->local_ip.ip && np->local_ip.ip != iph->daddr)
  701. continue;
  702. if (np->remote_ip.ip && np->remote_ip.ip != iph->saddr)
  703. continue;
  704. if (np->local_port && np->local_port != ntohs(uh->dest))
  705. continue;
  706. np->rx_skb_hook(np, source, skb, offset, data_len);
  707. hits++;
  708. }
  709. } else {
  710. #if IS_ENABLED(CONFIG_IPV6)
  711. const struct ipv6hdr *ip6h;
  712. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
  713. goto out;
  714. ip6h = (struct ipv6hdr *)skb->data;
  715. if (ip6h->version != 6)
  716. goto out;
  717. len = ntohs(ip6h->payload_len);
  718. if (!len)
  719. goto out;
  720. if (len + sizeof(struct ipv6hdr) > skb->len)
  721. goto out;
  722. if (pskb_trim_rcsum(skb, len + sizeof(struct ipv6hdr)))
  723. goto out;
  724. ip6h = ipv6_hdr(skb);
  725. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  726. goto out;
  727. uh = udp_hdr(skb);
  728. offset = (unsigned char *)(uh + 1) - skb->data;
  729. ulen = ntohs(uh->len);
  730. data_len = skb->len - offset;
  731. source = ntohs(uh->source);
  732. if (ulen != skb->len)
  733. goto out;
  734. if (udp6_csum_init(skb, uh, IPPROTO_UDP))
  735. goto out;
  736. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  737. if (!ipv6_addr_equal(&np->local_ip.in6, &ip6h->daddr))
  738. continue;
  739. if (!ipv6_addr_equal(&np->remote_ip.in6, &ip6h->saddr))
  740. continue;
  741. if (np->local_port && np->local_port != ntohs(uh->dest))
  742. continue;
  743. np->rx_skb_hook(np, source, skb, offset, data_len);
  744. hits++;
  745. }
  746. #endif
  747. }
  748. if (!hits)
  749. goto out;
  750. kfree_skb(skb);
  751. return 1;
  752. out:
  753. if (netpoll_trap()) {
  754. kfree_skb(skb);
  755. return 1;
  756. }
  757. return 0;
  758. }
  759. static void netpoll_trap_setup_info(struct netpoll_info *npinfo)
  760. {
  761. INIT_LIST_HEAD(&npinfo->rx_np);
  762. spin_lock_init(&npinfo->rx_lock);
  763. skb_queue_head_init(&npinfo->neigh_tx);
  764. }
  765. static void netpoll_trap_cleanup_info(struct netpoll_info *npinfo)
  766. {
  767. skb_queue_purge(&npinfo->neigh_tx);
  768. }
  769. static void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo)
  770. {
  771. unsigned long flags;
  772. if (np->rx_skb_hook) {
  773. spin_lock_irqsave(&npinfo->rx_lock, flags);
  774. list_add_tail(&np->rx, &npinfo->rx_np);
  775. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  776. }
  777. }
  778. static void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo)
  779. {
  780. unsigned long flags;
  781. if (!list_empty(&npinfo->rx_np)) {
  782. spin_lock_irqsave(&npinfo->rx_lock, flags);
  783. list_del(&np->rx);
  784. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  785. }
  786. }
  787. #else /* !CONFIG_NETPOLL_TRAP */
  788. static inline void netpoll_trap_setup_info(struct netpoll_info *npinfo)
  789. {
  790. }
  791. static inline void netpoll_trap_cleanup_info(struct netpoll_info *npinfo)
  792. {
  793. }
  794. static inline
  795. void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo)
  796. {
  797. }
  798. static inline
  799. void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo)
  800. {
  801. }
  802. #endif /* CONFIG_NETPOLL_TRAP */
  803. void netpoll_print_options(struct netpoll *np)
  804. {
  805. np_info(np, "local port %d\n", np->local_port);
  806. if (np->ipv6)
  807. np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
  808. else
  809. np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
  810. np_info(np, "interface '%s'\n", np->dev_name);
  811. np_info(np, "remote port %d\n", np->remote_port);
  812. if (np->ipv6)
  813. np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
  814. else
  815. np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
  816. np_info(np, "remote ethernet address %pM\n", np->remote_mac);
  817. }
  818. EXPORT_SYMBOL(netpoll_print_options);
  819. static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
  820. {
  821. const char *end;
  822. if (!strchr(str, ':') &&
  823. in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
  824. if (!*end)
  825. return 0;
  826. }
  827. if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
  828. #if IS_ENABLED(CONFIG_IPV6)
  829. if (!*end)
  830. return 1;
  831. #else
  832. return -1;
  833. #endif
  834. }
  835. return -1;
  836. }
  837. int netpoll_parse_options(struct netpoll *np, char *opt)
  838. {
  839. char *cur=opt, *delim;
  840. int ipv6;
  841. bool ipversion_set = false;
  842. if (*cur != '@') {
  843. if ((delim = strchr(cur, '@')) == NULL)
  844. goto parse_failed;
  845. *delim = 0;
  846. if (kstrtou16(cur, 10, &np->local_port))
  847. goto parse_failed;
  848. cur = delim;
  849. }
  850. cur++;
  851. if (*cur != '/') {
  852. ipversion_set = true;
  853. if ((delim = strchr(cur, '/')) == NULL)
  854. goto parse_failed;
  855. *delim = 0;
  856. ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
  857. if (ipv6 < 0)
  858. goto parse_failed;
  859. else
  860. np->ipv6 = (bool)ipv6;
  861. cur = delim;
  862. }
  863. cur++;
  864. if (*cur != ',') {
  865. /* parse out dev name */
  866. if ((delim = strchr(cur, ',')) == NULL)
  867. goto parse_failed;
  868. *delim = 0;
  869. strlcpy(np->dev_name, cur, sizeof(np->dev_name));
  870. cur = delim;
  871. }
  872. cur++;
  873. if (*cur != '@') {
  874. /* dst port */
  875. if ((delim = strchr(cur, '@')) == NULL)
  876. goto parse_failed;
  877. *delim = 0;
  878. if (*cur == ' ' || *cur == '\t')
  879. np_info(np, "warning: whitespace is not allowed\n");
  880. if (kstrtou16(cur, 10, &np->remote_port))
  881. goto parse_failed;
  882. cur = delim;
  883. }
  884. cur++;
  885. /* dst ip */
  886. if ((delim = strchr(cur, '/')) == NULL)
  887. goto parse_failed;
  888. *delim = 0;
  889. ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
  890. if (ipv6 < 0)
  891. goto parse_failed;
  892. else if (ipversion_set && np->ipv6 != (bool)ipv6)
  893. goto parse_failed;
  894. else
  895. np->ipv6 = (bool)ipv6;
  896. cur = delim + 1;
  897. if (*cur != 0) {
  898. /* MAC address */
  899. if (!mac_pton(cur, np->remote_mac))
  900. goto parse_failed;
  901. }
  902. netpoll_print_options(np);
  903. return 0;
  904. parse_failed:
  905. np_info(np, "couldn't parse config at '%s'!\n", cur);
  906. return -1;
  907. }
  908. EXPORT_SYMBOL(netpoll_parse_options);
  909. int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
  910. {
  911. struct netpoll_info *npinfo;
  912. const struct net_device_ops *ops;
  913. int err;
  914. np->dev = ndev;
  915. strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
  916. INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
  917. if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
  918. !ndev->netdev_ops->ndo_poll_controller) {
  919. np_err(np, "%s doesn't support polling, aborting\n",
  920. np->dev_name);
  921. err = -ENOTSUPP;
  922. goto out;
  923. }
  924. if (!ndev->npinfo) {
  925. npinfo = kmalloc(sizeof(*npinfo), gfp);
  926. if (!npinfo) {
  927. err = -ENOMEM;
  928. goto out;
  929. }
  930. netpoll_trap_setup_info(npinfo);
  931. sema_init(&npinfo->dev_lock, 1);
  932. skb_queue_head_init(&npinfo->txq);
  933. INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
  934. atomic_set(&npinfo->refcnt, 1);
  935. ops = np->dev->netdev_ops;
  936. if (ops->ndo_netpoll_setup) {
  937. err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
  938. if (err)
  939. goto free_npinfo;
  940. }
  941. } else {
  942. npinfo = rtnl_dereference(ndev->npinfo);
  943. atomic_inc(&npinfo->refcnt);
  944. }
  945. npinfo->netpoll = np;
  946. netpoll_trap_setup(np, npinfo);
  947. /* last thing to do is link it to the net device structure */
  948. rcu_assign_pointer(ndev->npinfo, npinfo);
  949. return 0;
  950. free_npinfo:
  951. kfree(npinfo);
  952. out:
  953. return err;
  954. }
  955. EXPORT_SYMBOL_GPL(__netpoll_setup);
  956. int netpoll_setup(struct netpoll *np)
  957. {
  958. struct net_device *ndev = NULL;
  959. struct in_device *in_dev;
  960. int err;
  961. rtnl_lock();
  962. if (np->dev_name) {
  963. struct net *net = current->nsproxy->net_ns;
  964. ndev = __dev_get_by_name(net, np->dev_name);
  965. }
  966. if (!ndev) {
  967. np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
  968. err = -ENODEV;
  969. goto unlock;
  970. }
  971. dev_hold(ndev);
  972. if (netdev_master_upper_dev_get(ndev)) {
  973. np_err(np, "%s is a slave device, aborting\n", np->dev_name);
  974. err = -EBUSY;
  975. goto put;
  976. }
  977. if (!netif_running(ndev)) {
  978. unsigned long atmost, atleast;
  979. np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
  980. err = dev_open(ndev);
  981. if (err) {
  982. np_err(np, "failed to open %s\n", ndev->name);
  983. goto put;
  984. }
  985. rtnl_unlock();
  986. atleast = jiffies + HZ/10;
  987. atmost = jiffies + carrier_timeout * HZ;
  988. while (!netif_carrier_ok(ndev)) {
  989. if (time_after(jiffies, atmost)) {
  990. np_notice(np, "timeout waiting for carrier\n");
  991. break;
  992. }
  993. msleep(1);
  994. }
  995. /* If carrier appears to come up instantly, we don't
  996. * trust it and pause so that we don't pump all our
  997. * queued console messages into the bitbucket.
  998. */
  999. if (time_before(jiffies, atleast)) {
  1000. np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
  1001. msleep(4000);
  1002. }
  1003. rtnl_lock();
  1004. }
  1005. if (!np->local_ip.ip) {
  1006. if (!np->ipv6) {
  1007. in_dev = __in_dev_get_rtnl(ndev);
  1008. if (!in_dev || !in_dev->ifa_list) {
  1009. np_err(np, "no IP address for %s, aborting\n",
  1010. np->dev_name);
  1011. err = -EDESTADDRREQ;
  1012. goto put;
  1013. }
  1014. np->local_ip.ip = in_dev->ifa_list->ifa_local;
  1015. np_info(np, "local IP %pI4\n", &np->local_ip.ip);
  1016. } else {
  1017. #if IS_ENABLED(CONFIG_IPV6)
  1018. struct inet6_dev *idev;
  1019. err = -EDESTADDRREQ;
  1020. idev = __in6_dev_get(ndev);
  1021. if (idev) {
  1022. struct inet6_ifaddr *ifp;
  1023. read_lock_bh(&idev->lock);
  1024. list_for_each_entry(ifp, &idev->addr_list, if_list) {
  1025. if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
  1026. continue;
  1027. np->local_ip.in6 = ifp->addr;
  1028. err = 0;
  1029. break;
  1030. }
  1031. read_unlock_bh(&idev->lock);
  1032. }
  1033. if (err) {
  1034. np_err(np, "no IPv6 address for %s, aborting\n",
  1035. np->dev_name);
  1036. goto put;
  1037. } else
  1038. np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
  1039. #else
  1040. np_err(np, "IPv6 is not supported %s, aborting\n",
  1041. np->dev_name);
  1042. err = -EINVAL;
  1043. goto put;
  1044. #endif
  1045. }
  1046. }
  1047. /* fill up the skb queue */
  1048. refill_skbs();
  1049. err = __netpoll_setup(np, ndev, GFP_KERNEL);
  1050. if (err)
  1051. goto put;
  1052. rtnl_unlock();
  1053. return 0;
  1054. put:
  1055. dev_put(ndev);
  1056. unlock:
  1057. rtnl_unlock();
  1058. return err;
  1059. }
  1060. EXPORT_SYMBOL(netpoll_setup);
  1061. static int __init netpoll_init(void)
  1062. {
  1063. skb_queue_head_init(&skb_pool);
  1064. return 0;
  1065. }
  1066. core_initcall(netpoll_init);
  1067. static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
  1068. {
  1069. struct netpoll_info *npinfo =
  1070. container_of(rcu_head, struct netpoll_info, rcu);
  1071. netpoll_trap_cleanup_info(npinfo);
  1072. skb_queue_purge(&npinfo->txq);
  1073. /* we can't call cancel_delayed_work_sync here, as we are in softirq */
  1074. cancel_delayed_work(&npinfo->tx_work);
  1075. /* clean after last, unfinished work */
  1076. __skb_queue_purge(&npinfo->txq);
  1077. /* now cancel it again */
  1078. cancel_delayed_work(&npinfo->tx_work);
  1079. kfree(npinfo);
  1080. }
  1081. void __netpoll_cleanup(struct netpoll *np)
  1082. {
  1083. struct netpoll_info *npinfo;
  1084. /* rtnl_dereference would be preferable here but
  1085. * rcu_cleanup_netpoll path can put us in here safely without
  1086. * holding the rtnl, so plain rcu_dereference it is
  1087. */
  1088. npinfo = rtnl_dereference(np->dev->npinfo);
  1089. if (!npinfo)
  1090. return;
  1091. netpoll_trap_cleanup(np, npinfo);
  1092. synchronize_srcu(&netpoll_srcu);
  1093. if (atomic_dec_and_test(&npinfo->refcnt)) {
  1094. const struct net_device_ops *ops;
  1095. ops = np->dev->netdev_ops;
  1096. if (ops->ndo_netpoll_cleanup)
  1097. ops->ndo_netpoll_cleanup(np->dev);
  1098. rcu_assign_pointer(np->dev->npinfo, NULL);
  1099. call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
  1100. }
  1101. }
  1102. EXPORT_SYMBOL_GPL(__netpoll_cleanup);
  1103. static void netpoll_async_cleanup(struct work_struct *work)
  1104. {
  1105. struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
  1106. rtnl_lock();
  1107. __netpoll_cleanup(np);
  1108. rtnl_unlock();
  1109. kfree(np);
  1110. }
  1111. void __netpoll_free_async(struct netpoll *np)
  1112. {
  1113. schedule_work(&np->cleanup_work);
  1114. }
  1115. EXPORT_SYMBOL_GPL(__netpoll_free_async);
  1116. void netpoll_cleanup(struct netpoll *np)
  1117. {
  1118. rtnl_lock();
  1119. if (!np->dev)
  1120. goto out;
  1121. __netpoll_cleanup(np);
  1122. dev_put(np->dev);
  1123. np->dev = NULL;
  1124. out:
  1125. rtnl_unlock();
  1126. }
  1127. EXPORT_SYMBOL(netpoll_cleanup);
  1128. #ifdef CONFIG_NETPOLL_TRAP
  1129. int netpoll_trap(void)
  1130. {
  1131. return atomic_read(&trapped);
  1132. }
  1133. EXPORT_SYMBOL(netpoll_trap);
  1134. void netpoll_set_trap(int trap)
  1135. {
  1136. if (trap)
  1137. atomic_inc(&trapped);
  1138. else
  1139. atomic_dec(&trapped);
  1140. }
  1141. EXPORT_SYMBOL(netpoll_set_trap);
  1142. #endif /* CONFIG_NETPOLL_TRAP */