netpoll.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801
  1. /*
  2. * Common framework for low-level network console, dump, and debugger code
  3. *
  4. * Sep 8 2003 Matt Mackall <mpm@selenic.com>
  5. *
  6. * based on the netconsole code from:
  7. *
  8. * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
  9. * Copyright (C) 2002 Red Hat, Inc.
  10. */
  11. #include <linux/smp_lock.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/string.h>
  15. #include <linux/if_arp.h>
  16. #include <linux/inetdevice.h>
  17. #include <linux/inet.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/netpoll.h>
  20. #include <linux/sched.h>
  21. #include <linux/delay.h>
  22. #include <linux/rcupdate.h>
  23. #include <linux/workqueue.h>
  24. #include <net/tcp.h>
  25. #include <net/udp.h>
  26. #include <asm/unaligned.h>
  27. /*
  28. * We maintain a small pool of fully-sized skbs, to make sure the
  29. * message gets out even in extreme OOM situations.
  30. */
  31. #define MAX_UDP_CHUNK 1460
  32. #define MAX_SKBS 32
  33. #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
  34. static struct sk_buff_head skb_pool;
  35. static atomic_t trapped;
  36. #define USEC_PER_POLL 50
  37. #define NETPOLL_RX_ENABLED 1
  38. #define NETPOLL_RX_DROP 2
  39. #define MAX_SKB_SIZE \
  40. (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
  41. sizeof(struct iphdr) + sizeof(struct ethhdr))
  42. static void zap_completion_queue(void);
  43. static void arp_reply(struct sk_buff *skb);
  44. static void queue_process(struct work_struct *work)
  45. {
  46. struct netpoll_info *npinfo =
  47. container_of(work, struct netpoll_info, tx_work.work);
  48. struct sk_buff *skb;
  49. while ((skb = skb_dequeue(&npinfo->txq))) {
  50. struct net_device *dev = skb->dev;
  51. if (!netif_device_present(dev) || !netif_running(dev)) {
  52. __kfree_skb(skb);
  53. continue;
  54. }
  55. netif_tx_lock_bh(dev);
  56. if (netif_queue_stopped(dev) ||
  57. dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
  58. skb_queue_head(&npinfo->txq, skb);
  59. netif_tx_unlock_bh(dev);
  60. schedule_delayed_work(&npinfo->tx_work, HZ/10);
  61. return;
  62. }
  63. }
  64. }
  65. static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
  66. unsigned short ulen, __be32 saddr, __be32 daddr)
  67. {
  68. __wsum psum;
  69. if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
  70. return 0;
  71. psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
  72. if (skb->ip_summed == CHECKSUM_COMPLETE &&
  73. !csum_fold(csum_add(psum, skb->csum)))
  74. return 0;
  75. skb->csum = psum;
  76. return __skb_checksum_complete(skb);
  77. }
  78. /*
  79. * Check whether delayed processing was scheduled for our NIC. If so,
  80. * we attempt to grab the poll lock and use ->poll() to pump the card.
  81. * If this fails, either we've recursed in ->poll() or it's already
  82. * running on another CPU.
  83. *
  84. * Note: we don't mask interrupts with this lock because we're using
  85. * trylock here and interrupts are already disabled in the softirq
  86. * case. Further, we test the poll_owner to avoid recursion on UP
  87. * systems where the lock doesn't exist.
  88. *
  89. * In cases where there is bi-directional communications, reading only
  90. * one message at a time can lead to packets being dropped by the
  91. * network adapter, forcing superfluous retries and possibly timeouts.
  92. * Thus, we set our budget to greater than 1.
  93. */
  94. static void poll_napi(struct netpoll *np)
  95. {
  96. struct netpoll_info *npinfo = np->dev->npinfo;
  97. int budget = 16;
  98. if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
  99. npinfo->poll_owner != smp_processor_id() &&
  100. spin_trylock(&npinfo->poll_lock)) {
  101. npinfo->rx_flags |= NETPOLL_RX_DROP;
  102. atomic_inc(&trapped);
  103. np->dev->poll(np->dev, &budget);
  104. atomic_dec(&trapped);
  105. npinfo->rx_flags &= ~NETPOLL_RX_DROP;
  106. spin_unlock(&npinfo->poll_lock);
  107. }
  108. }
  109. static void service_arp_queue(struct netpoll_info *npi)
  110. {
  111. struct sk_buff *skb;
  112. if (unlikely(!npi))
  113. return;
  114. skb = skb_dequeue(&npi->arp_tx);
  115. while (skb != NULL) {
  116. arp_reply(skb);
  117. skb = skb_dequeue(&npi->arp_tx);
  118. }
  119. }
  120. void netpoll_poll(struct netpoll *np)
  121. {
  122. if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
  123. return;
  124. /* Process pending work on NIC */
  125. np->dev->poll_controller(np->dev);
  126. if (np->dev->poll)
  127. poll_napi(np);
  128. service_arp_queue(np->dev->npinfo);
  129. zap_completion_queue();
  130. }
  131. static void refill_skbs(void)
  132. {
  133. struct sk_buff *skb;
  134. unsigned long flags;
  135. spin_lock_irqsave(&skb_pool.lock, flags);
  136. while (skb_pool.qlen < MAX_SKBS) {
  137. skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
  138. if (!skb)
  139. break;
  140. __skb_queue_tail(&skb_pool, skb);
  141. }
  142. spin_unlock_irqrestore(&skb_pool.lock, flags);
  143. }
  144. static void zap_completion_queue(void)
  145. {
  146. unsigned long flags;
  147. struct softnet_data *sd = &get_cpu_var(softnet_data);
  148. if (sd->completion_queue) {
  149. struct sk_buff *clist;
  150. local_irq_save(flags);
  151. clist = sd->completion_queue;
  152. sd->completion_queue = NULL;
  153. local_irq_restore(flags);
  154. while (clist != NULL) {
  155. struct sk_buff *skb = clist;
  156. clist = clist->next;
  157. if (skb->destructor)
  158. dev_kfree_skb_any(skb); /* put this one back */
  159. else
  160. __kfree_skb(skb);
  161. }
  162. }
  163. put_cpu_var(softnet_data);
  164. }
  165. static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
  166. {
  167. int count = 0;
  168. struct sk_buff *skb;
  169. zap_completion_queue();
  170. refill_skbs();
  171. repeat:
  172. skb = alloc_skb(len, GFP_ATOMIC);
  173. if (!skb)
  174. skb = skb_dequeue(&skb_pool);
  175. if (!skb) {
  176. if (++count < 10) {
  177. netpoll_poll(np);
  178. goto repeat;
  179. }
  180. return NULL;
  181. }
  182. atomic_set(&skb->users, 1);
  183. skb_reserve(skb, reserve);
  184. return skb;
  185. }
  186. static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
  187. {
  188. int status = NETDEV_TX_BUSY;
  189. unsigned long tries;
  190. struct net_device *dev = np->dev;
  191. struct netpoll_info *npinfo = np->dev->npinfo;
  192. if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
  193. __kfree_skb(skb);
  194. return;
  195. }
  196. /* don't get messages out of order, and no recursion */
  197. if (skb_queue_len(&npinfo->txq) == 0 &&
  198. npinfo->poll_owner != smp_processor_id()) {
  199. local_bh_disable(); /* Where's netif_tx_trylock_bh()? */
  200. if (netif_tx_trylock(dev)) {
  201. /* try until next clock tick */
  202. for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
  203. tries > 0; --tries) {
  204. if (!netif_queue_stopped(dev))
  205. status = dev->hard_start_xmit(skb, dev);
  206. if (status == NETDEV_TX_OK)
  207. break;
  208. /* tickle device maybe there is some cleanup */
  209. netpoll_poll(np);
  210. udelay(USEC_PER_POLL);
  211. }
  212. netif_tx_unlock(dev);
  213. }
  214. local_bh_enable();
  215. }
  216. if (status != NETDEV_TX_OK) {
  217. skb_queue_tail(&npinfo->txq, skb);
  218. schedule_delayed_work(&npinfo->tx_work,0);
  219. }
  220. }
  221. void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
  222. {
  223. int total_len, eth_len, ip_len, udp_len;
  224. struct sk_buff *skb;
  225. struct udphdr *udph;
  226. struct iphdr *iph;
  227. struct ethhdr *eth;
  228. udp_len = len + sizeof(*udph);
  229. ip_len = eth_len = udp_len + sizeof(*iph);
  230. total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
  231. skb = find_skb(np, total_len, total_len - len);
  232. if (!skb)
  233. return;
  234. memcpy(skb->data, msg, len);
  235. skb->len += len;
  236. skb->h.uh = udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
  237. udph->source = htons(np->local_port);
  238. udph->dest = htons(np->remote_port);
  239. udph->len = htons(udp_len);
  240. udph->check = 0;
  241. udph->check = csum_tcpudp_magic(htonl(np->local_ip),
  242. htonl(np->remote_ip),
  243. udp_len, IPPROTO_UDP,
  244. csum_partial((unsigned char *)udph, udp_len, 0));
  245. if (udph->check == 0)
  246. udph->check = CSUM_MANGLED_0;
  247. skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
  248. /* iph->version = 4; iph->ihl = 5; */
  249. put_unaligned(0x45, (unsigned char *)iph);
  250. iph->tos = 0;
  251. put_unaligned(htons(ip_len), &(iph->tot_len));
  252. iph->id = 0;
  253. iph->frag_off = 0;
  254. iph->ttl = 64;
  255. iph->protocol = IPPROTO_UDP;
  256. iph->check = 0;
  257. put_unaligned(htonl(np->local_ip), &(iph->saddr));
  258. put_unaligned(htonl(np->remote_ip), &(iph->daddr));
  259. iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  260. eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
  261. skb->mac.raw = skb->data;
  262. skb->protocol = eth->h_proto = htons(ETH_P_IP);
  263. memcpy(eth->h_source, np->local_mac, 6);
  264. memcpy(eth->h_dest, np->remote_mac, 6);
  265. skb->dev = np->dev;
  266. netpoll_send_skb(np, skb);
  267. }
  268. static void arp_reply(struct sk_buff *skb)
  269. {
  270. struct netpoll_info *npinfo = skb->dev->npinfo;
  271. struct arphdr *arp;
  272. unsigned char *arp_ptr;
  273. int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
  274. __be32 sip, tip;
  275. unsigned char *sha;
  276. struct sk_buff *send_skb;
  277. struct netpoll *np = NULL;
  278. if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
  279. np = npinfo->rx_np;
  280. if (!np)
  281. return;
  282. /* No arp on this interface */
  283. if (skb->dev->flags & IFF_NOARP)
  284. return;
  285. if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
  286. (2 * skb->dev->addr_len) +
  287. (2 * sizeof(u32)))))
  288. return;
  289. skb->h.raw = skb->nh.raw = skb->data;
  290. arp = skb->nh.arph;
  291. if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
  292. arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
  293. arp->ar_pro != htons(ETH_P_IP) ||
  294. arp->ar_op != htons(ARPOP_REQUEST))
  295. return;
  296. arp_ptr = (unsigned char *)(arp+1);
  297. /* save the location of the src hw addr */
  298. sha = arp_ptr;
  299. arp_ptr += skb->dev->addr_len;
  300. memcpy(&sip, arp_ptr, 4);
  301. arp_ptr += 4;
  302. /* if we actually cared about dst hw addr, it would get copied here */
  303. arp_ptr += skb->dev->addr_len;
  304. memcpy(&tip, arp_ptr, 4);
  305. /* Should we ignore arp? */
  306. if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
  307. return;
  308. size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
  309. send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
  310. LL_RESERVED_SPACE(np->dev));
  311. if (!send_skb)
  312. return;
  313. send_skb->nh.raw = send_skb->data;
  314. arp = (struct arphdr *) skb_put(send_skb, size);
  315. send_skb->dev = skb->dev;
  316. send_skb->protocol = htons(ETH_P_ARP);
  317. /* Fill the device header for the ARP frame */
  318. if (np->dev->hard_header &&
  319. np->dev->hard_header(send_skb, skb->dev, ptype,
  320. sha, np->local_mac,
  321. send_skb->len) < 0) {
  322. kfree_skb(send_skb);
  323. return;
  324. }
  325. /*
  326. * Fill out the arp protocol part.
  327. *
  328. * we only support ethernet device type,
  329. * which (according to RFC 1390) should always equal 1 (Ethernet).
  330. */
  331. arp->ar_hrd = htons(np->dev->type);
  332. arp->ar_pro = htons(ETH_P_IP);
  333. arp->ar_hln = np->dev->addr_len;
  334. arp->ar_pln = 4;
  335. arp->ar_op = htons(type);
  336. arp_ptr=(unsigned char *)(arp + 1);
  337. memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
  338. arp_ptr += np->dev->addr_len;
  339. memcpy(arp_ptr, &tip, 4);
  340. arp_ptr += 4;
  341. memcpy(arp_ptr, sha, np->dev->addr_len);
  342. arp_ptr += np->dev->addr_len;
  343. memcpy(arp_ptr, &sip, 4);
  344. netpoll_send_skb(np, send_skb);
  345. }
  346. int __netpoll_rx(struct sk_buff *skb)
  347. {
  348. int proto, len, ulen;
  349. struct iphdr *iph;
  350. struct udphdr *uh;
  351. struct netpoll_info *npi = skb->dev->npinfo;
  352. struct netpoll *np = npi->rx_np;
  353. if (!np)
  354. goto out;
  355. if (skb->dev->type != ARPHRD_ETHER)
  356. goto out;
  357. /* check if netpoll clients need ARP */
  358. if (skb->protocol == __constant_htons(ETH_P_ARP) &&
  359. atomic_read(&trapped)) {
  360. skb_queue_tail(&npi->arp_tx, skb);
  361. return 1;
  362. }
  363. proto = ntohs(eth_hdr(skb)->h_proto);
  364. if (proto != ETH_P_IP)
  365. goto out;
  366. if (skb->pkt_type == PACKET_OTHERHOST)
  367. goto out;
  368. if (skb_shared(skb))
  369. goto out;
  370. iph = (struct iphdr *)skb->data;
  371. if (!pskb_may_pull(skb, sizeof(struct iphdr)))
  372. goto out;
  373. if (iph->ihl < 5 || iph->version != 4)
  374. goto out;
  375. if (!pskb_may_pull(skb, iph->ihl*4))
  376. goto out;
  377. if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
  378. goto out;
  379. len = ntohs(iph->tot_len);
  380. if (skb->len < len || len < iph->ihl*4)
  381. goto out;
  382. if (iph->protocol != IPPROTO_UDP)
  383. goto out;
  384. len -= iph->ihl*4;
  385. uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
  386. ulen = ntohs(uh->len);
  387. if (ulen != len)
  388. goto out;
  389. if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
  390. goto out;
  391. if (np->local_ip && np->local_ip != ntohl(iph->daddr))
  392. goto out;
  393. if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
  394. goto out;
  395. if (np->local_port && np->local_port != ntohs(uh->dest))
  396. goto out;
  397. np->rx_hook(np, ntohs(uh->source),
  398. (char *)(uh+1),
  399. ulen - sizeof(struct udphdr));
  400. kfree_skb(skb);
  401. return 1;
  402. out:
  403. if (atomic_read(&trapped)) {
  404. kfree_skb(skb);
  405. return 1;
  406. }
  407. return 0;
  408. }
  409. int netpoll_parse_options(struct netpoll *np, char *opt)
  410. {
  411. char *cur=opt, *delim;
  412. if (*cur != '@') {
  413. if ((delim = strchr(cur, '@')) == NULL)
  414. goto parse_failed;
  415. *delim = 0;
  416. np->local_port = simple_strtol(cur, NULL, 10);
  417. cur = delim;
  418. }
  419. cur++;
  420. printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
  421. if (*cur != '/') {
  422. if ((delim = strchr(cur, '/')) == NULL)
  423. goto parse_failed;
  424. *delim = 0;
  425. np->local_ip = ntohl(in_aton(cur));
  426. cur = delim;
  427. printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
  428. np->name, HIPQUAD(np->local_ip));
  429. }
  430. cur++;
  431. if (*cur != ',') {
  432. /* parse out dev name */
  433. if ((delim = strchr(cur, ',')) == NULL)
  434. goto parse_failed;
  435. *delim = 0;
  436. strlcpy(np->dev_name, cur, sizeof(np->dev_name));
  437. cur = delim;
  438. }
  439. cur++;
  440. printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
  441. if (*cur != '@') {
  442. /* dst port */
  443. if ((delim = strchr(cur, '@')) == NULL)
  444. goto parse_failed;
  445. *delim = 0;
  446. np->remote_port = simple_strtol(cur, NULL, 10);
  447. cur = delim;
  448. }
  449. cur++;
  450. printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
  451. /* dst ip */
  452. if ((delim = strchr(cur, '/')) == NULL)
  453. goto parse_failed;
  454. *delim = 0;
  455. np->remote_ip = ntohl(in_aton(cur));
  456. cur = delim + 1;
  457. printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
  458. np->name, HIPQUAD(np->remote_ip));
  459. if (*cur != 0) {
  460. /* MAC address */
  461. if ((delim = strchr(cur, ':')) == NULL)
  462. goto parse_failed;
  463. *delim = 0;
  464. np->remote_mac[0] = simple_strtol(cur, NULL, 16);
  465. cur = delim + 1;
  466. if ((delim = strchr(cur, ':')) == NULL)
  467. goto parse_failed;
  468. *delim = 0;
  469. np->remote_mac[1] = simple_strtol(cur, NULL, 16);
  470. cur = delim + 1;
  471. if ((delim = strchr(cur, ':')) == NULL)
  472. goto parse_failed;
  473. *delim = 0;
  474. np->remote_mac[2] = simple_strtol(cur, NULL, 16);
  475. cur = delim + 1;
  476. if ((delim = strchr(cur, ':')) == NULL)
  477. goto parse_failed;
  478. *delim = 0;
  479. np->remote_mac[3] = simple_strtol(cur, NULL, 16);
  480. cur = delim + 1;
  481. if ((delim = strchr(cur, ':')) == NULL)
  482. goto parse_failed;
  483. *delim = 0;
  484. np->remote_mac[4] = simple_strtol(cur, NULL, 16);
  485. cur = delim + 1;
  486. np->remote_mac[5] = simple_strtol(cur, NULL, 16);
  487. }
  488. printk(KERN_INFO "%s: remote ethernet address "
  489. "%02x:%02x:%02x:%02x:%02x:%02x\n",
  490. np->name,
  491. np->remote_mac[0],
  492. np->remote_mac[1],
  493. np->remote_mac[2],
  494. np->remote_mac[3],
  495. np->remote_mac[4],
  496. np->remote_mac[5]);
  497. return 0;
  498. parse_failed:
  499. printk(KERN_INFO "%s: couldn't parse config at %s!\n",
  500. np->name, cur);
  501. return -1;
  502. }
  503. int netpoll_setup(struct netpoll *np)
  504. {
  505. struct net_device *ndev = NULL;
  506. struct in_device *in_dev;
  507. struct netpoll_info *npinfo;
  508. unsigned long flags;
  509. int err;
  510. if (np->dev_name)
  511. ndev = dev_get_by_name(np->dev_name);
  512. if (!ndev) {
  513. printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
  514. np->name, np->dev_name);
  515. return -ENODEV;
  516. }
  517. np->dev = ndev;
  518. if (!ndev->npinfo) {
  519. npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
  520. if (!npinfo) {
  521. err = -ENOMEM;
  522. goto release;
  523. }
  524. npinfo->rx_flags = 0;
  525. npinfo->rx_np = NULL;
  526. spin_lock_init(&npinfo->poll_lock);
  527. npinfo->poll_owner = -1;
  528. spin_lock_init(&npinfo->rx_lock);
  529. skb_queue_head_init(&npinfo->arp_tx);
  530. skb_queue_head_init(&npinfo->txq);
  531. INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
  532. atomic_set(&npinfo->refcnt, 1);
  533. } else {
  534. npinfo = ndev->npinfo;
  535. atomic_inc(&npinfo->refcnt);
  536. }
  537. if (!ndev->poll_controller) {
  538. printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
  539. np->name, np->dev_name);
  540. err = -ENOTSUPP;
  541. goto release;
  542. }
  543. if (!netif_running(ndev)) {
  544. unsigned long atmost, atleast;
  545. printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
  546. np->name, np->dev_name);
  547. rtnl_lock();
  548. err = dev_open(ndev);
  549. rtnl_unlock();
  550. if (err) {
  551. printk(KERN_ERR "%s: failed to open %s\n",
  552. np->name, ndev->name);
  553. goto release;
  554. }
  555. atleast = jiffies + HZ/10;
  556. atmost = jiffies + 4*HZ;
  557. while (!netif_carrier_ok(ndev)) {
  558. if (time_after(jiffies, atmost)) {
  559. printk(KERN_NOTICE
  560. "%s: timeout waiting for carrier\n",
  561. np->name);
  562. break;
  563. }
  564. cond_resched();
  565. }
  566. /* If carrier appears to come up instantly, we don't
  567. * trust it and pause so that we don't pump all our
  568. * queued console messages into the bitbucket.
  569. */
  570. if (time_before(jiffies, atleast)) {
  571. printk(KERN_NOTICE "%s: carrier detect appears"
  572. " untrustworthy, waiting 4 seconds\n",
  573. np->name);
  574. msleep(4000);
  575. }
  576. }
  577. if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
  578. memcpy(np->local_mac, ndev->dev_addr, 6);
  579. if (!np->local_ip) {
  580. rcu_read_lock();
  581. in_dev = __in_dev_get_rcu(ndev);
  582. if (!in_dev || !in_dev->ifa_list) {
  583. rcu_read_unlock();
  584. printk(KERN_ERR "%s: no IP address for %s, aborting\n",
  585. np->name, np->dev_name);
  586. err = -EDESTADDRREQ;
  587. goto release;
  588. }
  589. np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
  590. rcu_read_unlock();
  591. printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
  592. np->name, HIPQUAD(np->local_ip));
  593. }
  594. if (np->rx_hook) {
  595. spin_lock_irqsave(&npinfo->rx_lock, flags);
  596. npinfo->rx_flags |= NETPOLL_RX_ENABLED;
  597. npinfo->rx_np = np;
  598. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  599. }
  600. /* fill up the skb queue */
  601. refill_skbs();
  602. /* last thing to do is link it to the net device structure */
  603. ndev->npinfo = npinfo;
  604. /* avoid racing with NAPI reading npinfo */
  605. synchronize_rcu();
  606. return 0;
  607. release:
  608. if (!ndev->npinfo)
  609. kfree(npinfo);
  610. np->dev = NULL;
  611. dev_put(ndev);
  612. return err;
  613. }
  614. static int __init netpoll_init(void)
  615. {
  616. skb_queue_head_init(&skb_pool);
  617. return 0;
  618. }
  619. core_initcall(netpoll_init);
  620. void netpoll_cleanup(struct netpoll *np)
  621. {
  622. struct netpoll_info *npinfo;
  623. unsigned long flags;
  624. if (np->dev) {
  625. npinfo = np->dev->npinfo;
  626. if (npinfo) {
  627. if (npinfo->rx_np == np) {
  628. spin_lock_irqsave(&npinfo->rx_lock, flags);
  629. npinfo->rx_np = NULL;
  630. npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
  631. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  632. }
  633. np->dev->npinfo = NULL;
  634. if (atomic_dec_and_test(&npinfo->refcnt)) {
  635. skb_queue_purge(&npinfo->arp_tx);
  636. skb_queue_purge(&npinfo->txq);
  637. cancel_rearming_delayed_work(&npinfo->tx_work);
  638. flush_scheduled_work();
  639. kfree(npinfo);
  640. }
  641. }
  642. dev_put(np->dev);
  643. }
  644. np->dev = NULL;
  645. }
  646. int netpoll_trap(void)
  647. {
  648. return atomic_read(&trapped);
  649. }
  650. void netpoll_set_trap(int trap)
  651. {
  652. if (trap)
  653. atomic_inc(&trapped);
  654. else
  655. atomic_dec(&trapped);
  656. }
  657. EXPORT_SYMBOL(netpoll_set_trap);
  658. EXPORT_SYMBOL(netpoll_trap);
  659. EXPORT_SYMBOL(netpoll_parse_options);
  660. EXPORT_SYMBOL(netpoll_setup);
  661. EXPORT_SYMBOL(netpoll_cleanup);
  662. EXPORT_SYMBOL(netpoll_send_udp);
  663. EXPORT_SYMBOL(netpoll_poll);