netpoll.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /*
  2. * Common code for low-level network console, dump, and debugger code
  3. *
  4. * Derived from netconsole, kgdb-over-ethernet, and netdump patches
  5. */
  6. #ifndef _LINUX_NETPOLL_H
  7. #define _LINUX_NETPOLL_H
  8. #include <linux/netdevice.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/rcupdate.h>
  11. #include <linux/list.h>
  12. union inet_addr {
  13. __u32 all[4];
  14. __be32 ip;
  15. __be32 ip6[4];
  16. struct in_addr in;
  17. struct in6_addr in6;
  18. };
  19. struct netpoll {
  20. struct net_device *dev;
  21. char dev_name[IFNAMSIZ];
  22. const char *name;
  23. void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
  24. int offset, int len);
  25. union inet_addr local_ip, remote_ip;
  26. bool ipv6;
  27. u16 local_port, remote_port;
  28. u8 remote_mac[ETH_ALEN];
  29. struct list_head rx; /* rx_np list element */
  30. struct work_struct cleanup_work;
  31. };
  32. struct netpoll_info {
  33. atomic_t refcnt;
  34. spinlock_t rx_lock;
  35. struct semaphore dev_lock;
  36. struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
  37. struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
  38. struct sk_buff_head txq;
  39. struct delayed_work tx_work;
  40. struct netpoll *netpoll;
  41. struct rcu_head rcu;
  42. };
  43. #ifdef CONFIG_NETPOLL
  44. extern void netpoll_rx_disable(struct net_device *dev);
  45. extern void netpoll_rx_enable(struct net_device *dev);
  46. #else
  47. static inline void netpoll_rx_disable(struct net_device *dev) { return; }
  48. static inline void netpoll_rx_enable(struct net_device *dev) { return; }
  49. #endif
  50. void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
  51. void netpoll_print_options(struct netpoll *np);
  52. int netpoll_parse_options(struct netpoll *np, char *opt);
  53. int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
  54. int netpoll_setup(struct netpoll *np);
  55. int netpoll_trap(void);
  56. void netpoll_set_trap(int trap);
  57. void __netpoll_cleanup(struct netpoll *np);
  58. void __netpoll_free_async(struct netpoll *np);
  59. void netpoll_cleanup(struct netpoll *np);
  60. int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
  61. void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
  62. struct net_device *dev);
  63. static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
  64. {
  65. unsigned long flags;
  66. local_irq_save(flags);
  67. netpoll_send_skb_on_dev(np, skb, np->dev);
  68. local_irq_restore(flags);
  69. }
  70. #ifdef CONFIG_NETPOLL_TRAP
  71. static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
  72. {
  73. return !list_empty(&npinfo->rx_np);
  74. }
  75. #else
  76. static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
  77. {
  78. return false;
  79. }
  80. #endif
  81. #ifdef CONFIG_NETPOLL
  82. static inline bool netpoll_rx_on(struct sk_buff *skb)
  83. {
  84. struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
  85. return npinfo && netpoll_rx_processing(npinfo);
  86. }
  87. static inline bool netpoll_rx(struct sk_buff *skb)
  88. {
  89. struct netpoll_info *npinfo;
  90. unsigned long flags;
  91. bool ret = false;
  92. local_irq_save(flags);
  93. if (!netpoll_rx_on(skb))
  94. goto out;
  95. npinfo = rcu_dereference_bh(skb->dev->npinfo);
  96. spin_lock(&npinfo->rx_lock);
  97. /* check rx_processing again with the lock held */
  98. if (netpoll_rx_processing(npinfo) && __netpoll_rx(skb, npinfo))
  99. ret = true;
  100. spin_unlock(&npinfo->rx_lock);
  101. out:
  102. local_irq_restore(flags);
  103. return ret;
  104. }
  105. static inline int netpoll_receive_skb(struct sk_buff *skb)
  106. {
  107. if (!list_empty(&skb->dev->napi_list))
  108. return netpoll_rx(skb);
  109. return 0;
  110. }
  111. static inline void *netpoll_poll_lock(struct napi_struct *napi)
  112. {
  113. struct net_device *dev = napi->dev;
  114. if (dev && dev->npinfo) {
  115. spin_lock(&napi->poll_lock);
  116. napi->poll_owner = smp_processor_id();
  117. return napi;
  118. }
  119. return NULL;
  120. }
  121. static inline void netpoll_poll_unlock(void *have)
  122. {
  123. struct napi_struct *napi = have;
  124. if (napi) {
  125. napi->poll_owner = -1;
  126. spin_unlock(&napi->poll_lock);
  127. }
  128. }
  129. static inline bool netpoll_tx_running(struct net_device *dev)
  130. {
  131. return irqs_disabled();
  132. }
  133. #else
  134. static inline bool netpoll_rx(struct sk_buff *skb)
  135. {
  136. return false;
  137. }
  138. static inline bool netpoll_rx_on(struct sk_buff *skb)
  139. {
  140. return false;
  141. }
  142. static inline int netpoll_receive_skb(struct sk_buff *skb)
  143. {
  144. return 0;
  145. }
  146. static inline void *netpoll_poll_lock(struct napi_struct *napi)
  147. {
  148. return NULL;
  149. }
  150. static inline void netpoll_poll_unlock(void *have)
  151. {
  152. }
  153. static inline void netpoll_netdev_init(struct net_device *dev)
  154. {
  155. }
  156. static inline bool netpoll_tx_running(struct net_device *dev)
  157. {
  158. return false;
  159. }
  160. #endif
  161. #endif