internal.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. #ifndef MPLS_INTERNAL_H
  2. #define MPLS_INTERNAL_H
  3. #include <net/mpls.h>
  4. /* put a reasonable limit on the number of labels
  5. * we will accept from userspace
  6. */
  7. #define MAX_NEW_LABELS 30
  8. struct mpls_entry_decoded {
  9. u32 label;
  10. u8 ttl;
  11. u8 tc;
  12. u8 bos;
  13. };
  14. struct mpls_pcpu_stats {
  15. struct mpls_link_stats stats;
  16. struct u64_stats_sync syncp;
  17. };
  18. struct mpls_dev {
  19. int input_enabled;
  20. struct net_device *dev;
  21. struct mpls_pcpu_stats __percpu *stats;
  22. struct ctl_table_header *sysctl;
  23. struct rcu_head rcu;
  24. };
  25. #if BITS_PER_LONG == 32
  26. #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \
  27. do { \
  28. __typeof__(*(mdev)->stats) *ptr = \
  29. raw_cpu_ptr((mdev)->stats); \
  30. local_bh_disable(); \
  31. u64_stats_update_begin(&ptr->syncp); \
  32. ptr->stats.pkts_field++; \
  33. ptr->stats.bytes_field += (len); \
  34. u64_stats_update_end(&ptr->syncp); \
  35. local_bh_enable(); \
  36. } while (0)
  37. #define MPLS_INC_STATS(mdev, field) \
  38. do { \
  39. __typeof__(*(mdev)->stats) *ptr = \
  40. raw_cpu_ptr((mdev)->stats); \
  41. local_bh_disable(); \
  42. u64_stats_update_begin(&ptr->syncp); \
  43. ptr->stats.field++; \
  44. u64_stats_update_end(&ptr->syncp); \
  45. local_bh_enable(); \
  46. } while (0)
  47. #else
  48. #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \
  49. do { \
  50. this_cpu_inc((mdev)->stats->stats.pkts_field); \
  51. this_cpu_add((mdev)->stats->stats.bytes_field, (len)); \
  52. } while (0)
  53. #define MPLS_INC_STATS(mdev, field) \
  54. this_cpu_inc((mdev)->stats->stats.field)
  55. #endif
  56. struct sk_buff;
  57. #define LABEL_NOT_SPECIFIED (1 << 20)
  58. /* This maximum ha length copied from the definition of struct neighbour */
  59. #define VIA_ALEN_ALIGN sizeof(unsigned long)
  60. #define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, VIA_ALEN_ALIGN))
  61. enum mpls_payload_type {
  62. MPT_UNSPEC, /* IPv4 or IPv6 */
  63. MPT_IPV4 = 4,
  64. MPT_IPV6 = 6,
  65. /* Other types not implemented:
  66. * - Pseudo-wire with or without control word (RFC4385)
  67. * - GAL (RFC5586)
  68. */
  69. };
  70. struct mpls_nh { /* next hop label forwarding entry */
  71. struct net_device __rcu *nh_dev;
  72. /* nh_flags is accessed under RCU in the packet path; it is
  73. * modified handling netdev events with rtnl lock held
  74. */
  75. unsigned int nh_flags;
  76. u8 nh_labels;
  77. u8 nh_via_alen;
  78. u8 nh_via_table;
  79. u8 nh_reserved1;
  80. u32 nh_label[0];
  81. };
  82. /* offset of via from beginning of mpls_nh */
  83. #define MPLS_NH_VIA_OFF(num_labels) \
  84. ALIGN(sizeof(struct mpls_nh) + (num_labels) * sizeof(u32), \
  85. VIA_ALEN_ALIGN)
  86. /* all nexthops within a route have the same size based on the
  87. * max number of labels and max via length across all nexthops
  88. */
  89. #define MPLS_NH_SIZE(num_labels, max_via_alen) \
  90. (MPLS_NH_VIA_OFF((num_labels)) + \
  91. ALIGN((max_via_alen), VIA_ALEN_ALIGN))
  92. enum mpls_ttl_propagation {
  93. MPLS_TTL_PROP_DEFAULT,
  94. MPLS_TTL_PROP_ENABLED,
  95. MPLS_TTL_PROP_DISABLED,
  96. };
  97. /* The route, nexthops and vias are stored together in the same memory
  98. * block:
  99. *
  100. * +----------------------+
  101. * | mpls_route |
  102. * +----------------------+
  103. * | mpls_nh 0 |
  104. * +----------------------+
  105. * | alignment padding | 4 bytes for odd number of labels
  106. * +----------------------+
  107. * | via[rt_max_alen] 0 |
  108. * +----------------------+
  109. * | alignment padding | via's aligned on sizeof(unsigned long)
  110. * +----------------------+
  111. * | ... |
  112. * +----------------------+
  113. * | mpls_nh n-1 |
  114. * +----------------------+
  115. * | via[rt_max_alen] n-1 |
  116. * +----------------------+
  117. */
  118. struct mpls_route { /* next hop label forwarding entry */
  119. struct rcu_head rt_rcu;
  120. u8 rt_protocol;
  121. u8 rt_payload_type;
  122. u8 rt_max_alen;
  123. u8 rt_ttl_propagate;
  124. u8 rt_nhn;
  125. /* rt_nhn_alive is accessed under RCU in the packet path; it
  126. * is modified handling netdev events with rtnl lock held
  127. */
  128. u8 rt_nhn_alive;
  129. u8 rt_nh_size;
  130. u8 rt_via_offset;
  131. u8 rt_reserved1;
  132. struct mpls_nh rt_nh[0];
  133. };
  134. #define for_nexthops(rt) { \
  135. int nhsel; struct mpls_nh *nh; u8 *__nh; \
  136. for (nhsel = 0, nh = (rt)->rt_nh, __nh = (u8 *)((rt)->rt_nh); \
  137. nhsel < (rt)->rt_nhn; \
  138. __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
  139. #define change_nexthops(rt) { \
  140. int nhsel; struct mpls_nh *nh; u8 *__nh; \
  141. for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh), \
  142. __nh = (u8 *)((rt)->rt_nh); \
  143. nhsel < (rt)->rt_nhn; \
  144. __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
  145. #define endfor_nexthops(rt) }
  146. static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, unsigned ttl, unsigned tc, bool bos)
  147. {
  148. struct mpls_shim_hdr result;
  149. result.label_stack_entry =
  150. cpu_to_be32((label << MPLS_LS_LABEL_SHIFT) |
  151. (tc << MPLS_LS_TC_SHIFT) |
  152. (bos ? (1 << MPLS_LS_S_SHIFT) : 0) |
  153. (ttl << MPLS_LS_TTL_SHIFT));
  154. return result;
  155. }
  156. static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *hdr)
  157. {
  158. struct mpls_entry_decoded result;
  159. unsigned entry = be32_to_cpu(hdr->label_stack_entry);
  160. result.label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
  161. result.ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
  162. result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
  163. result.bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
  164. return result;
  165. }
  166. static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
  167. {
  168. return rcu_dereference_rtnl(dev->mpls_ptr);
  169. }
  170. int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels,
  171. const u32 label[]);
  172. int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels,
  173. u32 label[], struct netlink_ext_ack *extack);
  174. bool mpls_output_possible(const struct net_device *dev);
  175. unsigned int mpls_dev_mtu(const struct net_device *dev);
  176. bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
  177. void mpls_stats_inc_outucastpkts(struct net_device *dev,
  178. const struct sk_buff *skb);
  179. #endif /* MPLS_INTERNAL_H */