netpoll.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. /*
  2. * Common framework for low-level network console, dump, and debugger code
  3. *
  4. * Sep 8 2003 Matt Mackall <mpm@selenic.com>
  5. *
  6. * based on the netconsole code from:
  7. *
  8. * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
  9. * Copyright (C) 2002 Red Hat, Inc.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/moduleparam.h>
  13. #include <linux/kernel.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/string.h>
  17. #include <linux/if_arp.h>
  18. #include <linux/inetdevice.h>
  19. #include <linux/inet.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/netpoll.h>
  22. #include <linux/sched.h>
  23. #include <linux/delay.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/slab.h>
  27. #include <linux/export.h>
  28. #include <linux/if_vlan.h>
  29. #include <net/tcp.h>
  30. #include <net/udp.h>
  31. #include <net/addrconf.h>
  32. #include <net/ndisc.h>
  33. #include <net/ip6_checksum.h>
  34. #include <asm/unaligned.h>
  35. #include <trace/events/napi.h>
  36. /*
  37. * We maintain a small pool of fully-sized skbs, to make sure the
  38. * message gets out even in extreme OOM situations.
  39. */
  40. #define MAX_UDP_CHUNK 1460
  41. #define MAX_SKBS 32
  42. static struct sk_buff_head skb_pool;
  43. DEFINE_STATIC_SRCU(netpoll_srcu);
  44. #define USEC_PER_POLL 50
  45. #define MAX_SKB_SIZE \
  46. (sizeof(struct ethhdr) + \
  47. sizeof(struct iphdr) + \
  48. sizeof(struct udphdr) + \
  49. MAX_UDP_CHUNK)
  50. static void zap_completion_queue(void);
  51. static void netpoll_async_cleanup(struct work_struct *work);
  52. static unsigned int carrier_timeout = 4;
  53. module_param(carrier_timeout, uint, 0644);
  54. #define np_info(np, fmt, ...) \
  55. pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
  56. #define np_err(np, fmt, ...) \
  57. pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
  58. #define np_notice(np, fmt, ...) \
  59. pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
  60. static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
  61. struct netdev_queue *txq)
  62. {
  63. int status = NETDEV_TX_OK;
  64. netdev_features_t features;
  65. features = netif_skb_features(skb);
  66. if (vlan_tx_tag_present(skb) &&
  67. !vlan_hw_offload_capable(features, skb->vlan_proto)) {
  68. skb = __vlan_put_tag(skb, skb->vlan_proto,
  69. vlan_tx_tag_get(skb));
  70. if (unlikely(!skb)) {
  71. /* This is actually a packet drop, but we
  72. * don't want the code that calls this
  73. * function to try and operate on a NULL skb.
  74. */
  75. goto out;
  76. }
  77. skb->vlan_tci = 0;
  78. }
  79. status = netdev_start_xmit(skb, dev);
  80. if (status == NETDEV_TX_OK)
  81. txq_trans_update(txq);
  82. out:
  83. return status;
  84. }
  85. static void queue_process(struct work_struct *work)
  86. {
  87. struct netpoll_info *npinfo =
  88. container_of(work, struct netpoll_info, tx_work.work);
  89. struct sk_buff *skb;
  90. unsigned long flags;
  91. while ((skb = skb_dequeue(&npinfo->txq))) {
  92. struct net_device *dev = skb->dev;
  93. struct netdev_queue *txq;
  94. if (!netif_device_present(dev) || !netif_running(dev)) {
  95. kfree_skb(skb);
  96. continue;
  97. }
  98. txq = skb_get_tx_queue(dev, skb);
  99. local_irq_save(flags);
  100. HARD_TX_LOCK(dev, txq, smp_processor_id());
  101. if (netif_xmit_frozen_or_stopped(txq) ||
  102. netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
  103. skb_queue_head(&npinfo->txq, skb);
  104. HARD_TX_UNLOCK(dev, txq);
  105. local_irq_restore(flags);
  106. schedule_delayed_work(&npinfo->tx_work, HZ/10);
  107. return;
  108. }
  109. HARD_TX_UNLOCK(dev, txq);
  110. local_irq_restore(flags);
  111. }
  112. }
  113. /*
  114. * Check whether delayed processing was scheduled for our NIC. If so,
  115. * we attempt to grab the poll lock and use ->poll() to pump the card.
  116. * If this fails, either we've recursed in ->poll() or it's already
  117. * running on another CPU.
  118. *
  119. * Note: we don't mask interrupts with this lock because we're using
  120. * trylock here and interrupts are already disabled in the softirq
  121. * case. Further, we test the poll_owner to avoid recursion on UP
  122. * systems where the lock doesn't exist.
  123. */
  124. static int poll_one_napi(struct napi_struct *napi, int budget)
  125. {
  126. int work;
  127. /* net_rx_action's ->poll() invocations and our's are
  128. * synchronized by this test which is only made while
  129. * holding the napi->poll_lock.
  130. */
  131. if (!test_bit(NAPI_STATE_SCHED, &napi->state))
  132. return budget;
  133. set_bit(NAPI_STATE_NPSVC, &napi->state);
  134. work = napi->poll(napi, budget);
  135. WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
  136. trace_napi_poll(napi);
  137. clear_bit(NAPI_STATE_NPSVC, &napi->state);
  138. return budget - work;
  139. }
  140. static void poll_napi(struct net_device *dev, int budget)
  141. {
  142. struct napi_struct *napi;
  143. list_for_each_entry(napi, &dev->napi_list, dev_list) {
  144. if (napi->poll_owner != smp_processor_id() &&
  145. spin_trylock(&napi->poll_lock)) {
  146. budget = poll_one_napi(napi, budget);
  147. spin_unlock(&napi->poll_lock);
  148. }
  149. }
  150. }
  151. static void netpoll_poll_dev(struct net_device *dev)
  152. {
  153. const struct net_device_ops *ops;
  154. struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
  155. int budget = 0;
  156. /* Don't do any rx activity if the dev_lock mutex is held
  157. * the dev_open/close paths use this to block netpoll activity
  158. * while changing device state
  159. */
  160. if (down_trylock(&ni->dev_lock))
  161. return;
  162. if (!netif_running(dev)) {
  163. up(&ni->dev_lock);
  164. return;
  165. }
  166. ops = dev->netdev_ops;
  167. if (!ops->ndo_poll_controller) {
  168. up(&ni->dev_lock);
  169. return;
  170. }
  171. /* Process pending work on NIC */
  172. ops->ndo_poll_controller(dev);
  173. poll_napi(dev, budget);
  174. up(&ni->dev_lock);
  175. zap_completion_queue();
  176. }
  177. void netpoll_poll_disable(struct net_device *dev)
  178. {
  179. struct netpoll_info *ni;
  180. int idx;
  181. might_sleep();
  182. idx = srcu_read_lock(&netpoll_srcu);
  183. ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
  184. if (ni)
  185. down(&ni->dev_lock);
  186. srcu_read_unlock(&netpoll_srcu, idx);
  187. }
  188. EXPORT_SYMBOL(netpoll_poll_disable);
  189. void netpoll_poll_enable(struct net_device *dev)
  190. {
  191. struct netpoll_info *ni;
  192. rcu_read_lock();
  193. ni = rcu_dereference(dev->npinfo);
  194. if (ni)
  195. up(&ni->dev_lock);
  196. rcu_read_unlock();
  197. }
  198. EXPORT_SYMBOL(netpoll_poll_enable);
  199. static void refill_skbs(void)
  200. {
  201. struct sk_buff *skb;
  202. unsigned long flags;
  203. spin_lock_irqsave(&skb_pool.lock, flags);
  204. while (skb_pool.qlen < MAX_SKBS) {
  205. skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
  206. if (!skb)
  207. break;
  208. __skb_queue_tail(&skb_pool, skb);
  209. }
  210. spin_unlock_irqrestore(&skb_pool.lock, flags);
  211. }
  212. static void zap_completion_queue(void)
  213. {
  214. unsigned long flags;
  215. struct softnet_data *sd = &get_cpu_var(softnet_data);
  216. if (sd->completion_queue) {
  217. struct sk_buff *clist;
  218. local_irq_save(flags);
  219. clist = sd->completion_queue;
  220. sd->completion_queue = NULL;
  221. local_irq_restore(flags);
  222. while (clist != NULL) {
  223. struct sk_buff *skb = clist;
  224. clist = clist->next;
  225. if (!skb_irq_freeable(skb)) {
  226. atomic_inc(&skb->users);
  227. dev_kfree_skb_any(skb); /* put this one back */
  228. } else {
  229. __kfree_skb(skb);
  230. }
  231. }
  232. }
  233. put_cpu_var(softnet_data);
  234. }
  235. static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
  236. {
  237. int count = 0;
  238. struct sk_buff *skb;
  239. zap_completion_queue();
  240. refill_skbs();
  241. repeat:
  242. skb = alloc_skb(len, GFP_ATOMIC);
  243. if (!skb)
  244. skb = skb_dequeue(&skb_pool);
  245. if (!skb) {
  246. if (++count < 10) {
  247. netpoll_poll_dev(np->dev);
  248. goto repeat;
  249. }
  250. return NULL;
  251. }
  252. atomic_set(&skb->users, 1);
  253. skb_reserve(skb, reserve);
  254. return skb;
  255. }
  256. static int netpoll_owner_active(struct net_device *dev)
  257. {
  258. struct napi_struct *napi;
  259. list_for_each_entry(napi, &dev->napi_list, dev_list) {
  260. if (napi->poll_owner == smp_processor_id())
  261. return 1;
  262. }
  263. return 0;
  264. }
  265. /* call with IRQ disabled */
  266. void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
  267. struct net_device *dev)
  268. {
  269. int status = NETDEV_TX_BUSY;
  270. unsigned long tries;
  271. /* It is up to the caller to keep npinfo alive. */
  272. struct netpoll_info *npinfo;
  273. WARN_ON_ONCE(!irqs_disabled());
  274. npinfo = rcu_dereference_bh(np->dev->npinfo);
  275. if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
  276. dev_kfree_skb_irq(skb);
  277. return;
  278. }
  279. /* don't get messages out of order, and no recursion */
  280. if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
  281. struct netdev_queue *txq;
  282. txq = netdev_pick_tx(dev, skb, NULL);
  283. /* try until next clock tick */
  284. for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
  285. tries > 0; --tries) {
  286. if (HARD_TX_TRYLOCK(dev, txq)) {
  287. if (!netif_xmit_stopped(txq))
  288. status = netpoll_start_xmit(skb, dev, txq);
  289. HARD_TX_UNLOCK(dev, txq);
  290. if (status == NETDEV_TX_OK)
  291. break;
  292. }
  293. /* tickle device maybe there is some cleanup */
  294. netpoll_poll_dev(np->dev);
  295. udelay(USEC_PER_POLL);
  296. }
  297. WARN_ONCE(!irqs_disabled(),
  298. "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
  299. dev->name, dev->netdev_ops->ndo_start_xmit);
  300. }
  301. if (status != NETDEV_TX_OK) {
  302. skb_queue_tail(&npinfo->txq, skb);
  303. schedule_delayed_work(&npinfo->tx_work,0);
  304. }
  305. }
  306. EXPORT_SYMBOL(netpoll_send_skb_on_dev);
  307. void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
  308. {
  309. int total_len, ip_len, udp_len;
  310. struct sk_buff *skb;
  311. struct udphdr *udph;
  312. struct iphdr *iph;
  313. struct ethhdr *eth;
  314. static atomic_t ip_ident;
  315. struct ipv6hdr *ip6h;
  316. udp_len = len + sizeof(*udph);
  317. if (np->ipv6)
  318. ip_len = udp_len + sizeof(*ip6h);
  319. else
  320. ip_len = udp_len + sizeof(*iph);
  321. total_len = ip_len + LL_RESERVED_SPACE(np->dev);
  322. skb = find_skb(np, total_len + np->dev->needed_tailroom,
  323. total_len - len);
  324. if (!skb)
  325. return;
  326. skb_copy_to_linear_data(skb, msg, len);
  327. skb_put(skb, len);
  328. skb_push(skb, sizeof(*udph));
  329. skb_reset_transport_header(skb);
  330. udph = udp_hdr(skb);
  331. udph->source = htons(np->local_port);
  332. udph->dest = htons(np->remote_port);
  333. udph->len = htons(udp_len);
  334. if (np->ipv6) {
  335. udph->check = 0;
  336. udph->check = csum_ipv6_magic(&np->local_ip.in6,
  337. &np->remote_ip.in6,
  338. udp_len, IPPROTO_UDP,
  339. csum_partial(udph, udp_len, 0));
  340. if (udph->check == 0)
  341. udph->check = CSUM_MANGLED_0;
  342. skb_push(skb, sizeof(*ip6h));
  343. skb_reset_network_header(skb);
  344. ip6h = ipv6_hdr(skb);
  345. /* ip6h->version = 6; ip6h->priority = 0; */
  346. put_unaligned(0x60, (unsigned char *)ip6h);
  347. ip6h->flow_lbl[0] = 0;
  348. ip6h->flow_lbl[1] = 0;
  349. ip6h->flow_lbl[2] = 0;
  350. ip6h->payload_len = htons(sizeof(struct udphdr) + len);
  351. ip6h->nexthdr = IPPROTO_UDP;
  352. ip6h->hop_limit = 32;
  353. ip6h->saddr = np->local_ip.in6;
  354. ip6h->daddr = np->remote_ip.in6;
  355. eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
  356. skb_reset_mac_header(skb);
  357. skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
  358. } else {
  359. udph->check = 0;
  360. udph->check = csum_tcpudp_magic(np->local_ip.ip,
  361. np->remote_ip.ip,
  362. udp_len, IPPROTO_UDP,
  363. csum_partial(udph, udp_len, 0));
  364. if (udph->check == 0)
  365. udph->check = CSUM_MANGLED_0;
  366. skb_push(skb, sizeof(*iph));
  367. skb_reset_network_header(skb);
  368. iph = ip_hdr(skb);
  369. /* iph->version = 4; iph->ihl = 5; */
  370. put_unaligned(0x45, (unsigned char *)iph);
  371. iph->tos = 0;
  372. put_unaligned(htons(ip_len), &(iph->tot_len));
  373. iph->id = htons(atomic_inc_return(&ip_ident));
  374. iph->frag_off = 0;
  375. iph->ttl = 64;
  376. iph->protocol = IPPROTO_UDP;
  377. iph->check = 0;
  378. put_unaligned(np->local_ip.ip, &(iph->saddr));
  379. put_unaligned(np->remote_ip.ip, &(iph->daddr));
  380. iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  381. eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
  382. skb_reset_mac_header(skb);
  383. skb->protocol = eth->h_proto = htons(ETH_P_IP);
  384. }
  385. ether_addr_copy(eth->h_source, np->dev->dev_addr);
  386. ether_addr_copy(eth->h_dest, np->remote_mac);
  387. skb->dev = np->dev;
  388. netpoll_send_skb(np, skb);
  389. }
  390. EXPORT_SYMBOL(netpoll_send_udp);
  391. void netpoll_print_options(struct netpoll *np)
  392. {
  393. np_info(np, "local port %d\n", np->local_port);
  394. if (np->ipv6)
  395. np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
  396. else
  397. np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
  398. np_info(np, "interface '%s'\n", np->dev_name);
  399. np_info(np, "remote port %d\n", np->remote_port);
  400. if (np->ipv6)
  401. np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
  402. else
  403. np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
  404. np_info(np, "remote ethernet address %pM\n", np->remote_mac);
  405. }
  406. EXPORT_SYMBOL(netpoll_print_options);
  407. static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
  408. {
  409. const char *end;
  410. if (!strchr(str, ':') &&
  411. in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
  412. if (!*end)
  413. return 0;
  414. }
  415. if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
  416. #if IS_ENABLED(CONFIG_IPV6)
  417. if (!*end)
  418. return 1;
  419. #else
  420. return -1;
  421. #endif
  422. }
  423. return -1;
  424. }
  425. int netpoll_parse_options(struct netpoll *np, char *opt)
  426. {
  427. char *cur=opt, *delim;
  428. int ipv6;
  429. bool ipversion_set = false;
  430. if (*cur != '@') {
  431. if ((delim = strchr(cur, '@')) == NULL)
  432. goto parse_failed;
  433. *delim = 0;
  434. if (kstrtou16(cur, 10, &np->local_port))
  435. goto parse_failed;
  436. cur = delim;
  437. }
  438. cur++;
  439. if (*cur != '/') {
  440. ipversion_set = true;
  441. if ((delim = strchr(cur, '/')) == NULL)
  442. goto parse_failed;
  443. *delim = 0;
  444. ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
  445. if (ipv6 < 0)
  446. goto parse_failed;
  447. else
  448. np->ipv6 = (bool)ipv6;
  449. cur = delim;
  450. }
  451. cur++;
  452. if (*cur != ',') {
  453. /* parse out dev name */
  454. if ((delim = strchr(cur, ',')) == NULL)
  455. goto parse_failed;
  456. *delim = 0;
  457. strlcpy(np->dev_name, cur, sizeof(np->dev_name));
  458. cur = delim;
  459. }
  460. cur++;
  461. if (*cur != '@') {
  462. /* dst port */
  463. if ((delim = strchr(cur, '@')) == NULL)
  464. goto parse_failed;
  465. *delim = 0;
  466. if (*cur == ' ' || *cur == '\t')
  467. np_info(np, "warning: whitespace is not allowed\n");
  468. if (kstrtou16(cur, 10, &np->remote_port))
  469. goto parse_failed;
  470. cur = delim;
  471. }
  472. cur++;
  473. /* dst ip */
  474. if ((delim = strchr(cur, '/')) == NULL)
  475. goto parse_failed;
  476. *delim = 0;
  477. ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
  478. if (ipv6 < 0)
  479. goto parse_failed;
  480. else if (ipversion_set && np->ipv6 != (bool)ipv6)
  481. goto parse_failed;
  482. else
  483. np->ipv6 = (bool)ipv6;
  484. cur = delim + 1;
  485. if (*cur != 0) {
  486. /* MAC address */
  487. if (!mac_pton(cur, np->remote_mac))
  488. goto parse_failed;
  489. }
  490. netpoll_print_options(np);
  491. return 0;
  492. parse_failed:
  493. np_info(np, "couldn't parse config at '%s'!\n", cur);
  494. return -1;
  495. }
  496. EXPORT_SYMBOL(netpoll_parse_options);
  497. int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
  498. {
  499. struct netpoll_info *npinfo;
  500. const struct net_device_ops *ops;
  501. int err;
  502. np->dev = ndev;
  503. strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
  504. INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
  505. if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
  506. !ndev->netdev_ops->ndo_poll_controller) {
  507. np_err(np, "%s doesn't support polling, aborting\n",
  508. np->dev_name);
  509. err = -ENOTSUPP;
  510. goto out;
  511. }
  512. if (!ndev->npinfo) {
  513. npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
  514. if (!npinfo) {
  515. err = -ENOMEM;
  516. goto out;
  517. }
  518. sema_init(&npinfo->dev_lock, 1);
  519. skb_queue_head_init(&npinfo->txq);
  520. INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
  521. atomic_set(&npinfo->refcnt, 1);
  522. ops = np->dev->netdev_ops;
  523. if (ops->ndo_netpoll_setup) {
  524. err = ops->ndo_netpoll_setup(ndev, npinfo);
  525. if (err)
  526. goto free_npinfo;
  527. }
  528. } else {
  529. npinfo = rtnl_dereference(ndev->npinfo);
  530. atomic_inc(&npinfo->refcnt);
  531. }
  532. npinfo->netpoll = np;
  533. /* last thing to do is link it to the net device structure */
  534. rcu_assign_pointer(ndev->npinfo, npinfo);
  535. return 0;
  536. free_npinfo:
  537. kfree(npinfo);
  538. out:
  539. return err;
  540. }
  541. EXPORT_SYMBOL_GPL(__netpoll_setup);
  542. int netpoll_setup(struct netpoll *np)
  543. {
  544. struct net_device *ndev = NULL;
  545. struct in_device *in_dev;
  546. int err;
  547. rtnl_lock();
  548. if (np->dev_name) {
  549. struct net *net = current->nsproxy->net_ns;
  550. ndev = __dev_get_by_name(net, np->dev_name);
  551. }
  552. if (!ndev) {
  553. np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
  554. err = -ENODEV;
  555. goto unlock;
  556. }
  557. dev_hold(ndev);
  558. if (netdev_master_upper_dev_get(ndev)) {
  559. np_err(np, "%s is a slave device, aborting\n", np->dev_name);
  560. err = -EBUSY;
  561. goto put;
  562. }
  563. if (!netif_running(ndev)) {
  564. unsigned long atmost, atleast;
  565. np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
  566. err = dev_open(ndev);
  567. if (err) {
  568. np_err(np, "failed to open %s\n", ndev->name);
  569. goto put;
  570. }
  571. rtnl_unlock();
  572. atleast = jiffies + HZ/10;
  573. atmost = jiffies + carrier_timeout * HZ;
  574. while (!netif_carrier_ok(ndev)) {
  575. if (time_after(jiffies, atmost)) {
  576. np_notice(np, "timeout waiting for carrier\n");
  577. break;
  578. }
  579. msleep(1);
  580. }
  581. /* If carrier appears to come up instantly, we don't
  582. * trust it and pause so that we don't pump all our
  583. * queued console messages into the bitbucket.
  584. */
  585. if (time_before(jiffies, atleast)) {
  586. np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
  587. msleep(4000);
  588. }
  589. rtnl_lock();
  590. }
  591. if (!np->local_ip.ip) {
  592. if (!np->ipv6) {
  593. in_dev = __in_dev_get_rtnl(ndev);
  594. if (!in_dev || !in_dev->ifa_list) {
  595. np_err(np, "no IP address for %s, aborting\n",
  596. np->dev_name);
  597. err = -EDESTADDRREQ;
  598. goto put;
  599. }
  600. np->local_ip.ip = in_dev->ifa_list->ifa_local;
  601. np_info(np, "local IP %pI4\n", &np->local_ip.ip);
  602. } else {
  603. #if IS_ENABLED(CONFIG_IPV6)
  604. struct inet6_dev *idev;
  605. err = -EDESTADDRREQ;
  606. idev = __in6_dev_get(ndev);
  607. if (idev) {
  608. struct inet6_ifaddr *ifp;
  609. read_lock_bh(&idev->lock);
  610. list_for_each_entry(ifp, &idev->addr_list, if_list) {
  611. if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
  612. continue;
  613. np->local_ip.in6 = ifp->addr;
  614. err = 0;
  615. break;
  616. }
  617. read_unlock_bh(&idev->lock);
  618. }
  619. if (err) {
  620. np_err(np, "no IPv6 address for %s, aborting\n",
  621. np->dev_name);
  622. goto put;
  623. } else
  624. np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
  625. #else
  626. np_err(np, "IPv6 is not supported %s, aborting\n",
  627. np->dev_name);
  628. err = -EINVAL;
  629. goto put;
  630. #endif
  631. }
  632. }
  633. /* fill up the skb queue */
  634. refill_skbs();
  635. err = __netpoll_setup(np, ndev);
  636. if (err)
  637. goto put;
  638. rtnl_unlock();
  639. return 0;
  640. put:
  641. dev_put(ndev);
  642. unlock:
  643. rtnl_unlock();
  644. return err;
  645. }
  646. EXPORT_SYMBOL(netpoll_setup);
  647. static int __init netpoll_init(void)
  648. {
  649. skb_queue_head_init(&skb_pool);
  650. return 0;
  651. }
  652. core_initcall(netpoll_init);
  653. static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
  654. {
  655. struct netpoll_info *npinfo =
  656. container_of(rcu_head, struct netpoll_info, rcu);
  657. skb_queue_purge(&npinfo->txq);
  658. /* we can't call cancel_delayed_work_sync here, as we are in softirq */
  659. cancel_delayed_work(&npinfo->tx_work);
  660. /* clean after last, unfinished work */
  661. __skb_queue_purge(&npinfo->txq);
  662. /* now cancel it again */
  663. cancel_delayed_work(&npinfo->tx_work);
  664. kfree(npinfo);
  665. }
  666. void __netpoll_cleanup(struct netpoll *np)
  667. {
  668. struct netpoll_info *npinfo;
  669. /* rtnl_dereference would be preferable here but
  670. * rcu_cleanup_netpoll path can put us in here safely without
  671. * holding the rtnl, so plain rcu_dereference it is
  672. */
  673. npinfo = rtnl_dereference(np->dev->npinfo);
  674. if (!npinfo)
  675. return;
  676. synchronize_srcu(&netpoll_srcu);
  677. if (atomic_dec_and_test(&npinfo->refcnt)) {
  678. const struct net_device_ops *ops;
  679. ops = np->dev->netdev_ops;
  680. if (ops->ndo_netpoll_cleanup)
  681. ops->ndo_netpoll_cleanup(np->dev);
  682. RCU_INIT_POINTER(np->dev->npinfo, NULL);
  683. call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
  684. } else
  685. RCU_INIT_POINTER(np->dev->npinfo, NULL);
  686. }
  687. EXPORT_SYMBOL_GPL(__netpoll_cleanup);
  688. static void netpoll_async_cleanup(struct work_struct *work)
  689. {
  690. struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
  691. rtnl_lock();
  692. __netpoll_cleanup(np);
  693. rtnl_unlock();
  694. kfree(np);
  695. }
  696. void __netpoll_free_async(struct netpoll *np)
  697. {
  698. schedule_work(&np->cleanup_work);
  699. }
  700. EXPORT_SYMBOL_GPL(__netpoll_free_async);
  701. void netpoll_cleanup(struct netpoll *np)
  702. {
  703. rtnl_lock();
  704. if (!np->dev)
  705. goto out;
  706. __netpoll_cleanup(np);
  707. dev_put(np->dev);
  708. np->dev = NULL;
  709. out:
  710. rtnl_unlock();
  711. }
  712. EXPORT_SYMBOL(netpoll_cleanup);