inet_frag.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. #ifndef __NET_FRAG_H__
  2. #define __NET_FRAG_H__
  3. #include <linux/percpu_counter.h>
  4. struct netns_frags {
  5. /* The percpu_counter "mem" need to be cacheline aligned.
  6. * mem.count must not share cacheline with other writers
  7. */
  8. struct percpu_counter mem ____cacheline_aligned_in_smp;
  9. /* sysctls */
  10. int timeout;
  11. int high_thresh;
  12. int low_thresh;
  13. };
  14. struct inet_frag_queue {
  15. spinlock_t lock;
  16. struct timer_list timer; /* when will this queue expire? */
  17. struct hlist_node list;
  18. atomic_t refcnt;
  19. struct sk_buff *fragments; /* list of received fragments */
  20. struct sk_buff *fragments_tail;
  21. ktime_t stamp;
  22. int len; /* total length of orig datagram */
  23. int meat;
  24. __u8 last_in; /* first/last segment arrived? */
  25. #define INET_FRAG_EVICTED 8
  26. #define INET_FRAG_COMPLETE 4
  27. #define INET_FRAG_FIRST_IN 2
  28. #define INET_FRAG_LAST_IN 1
  29. u16 max_size;
  30. struct netns_frags *net;
  31. };
  32. #define INETFRAGS_HASHSZ 1024
  33. /* averaged:
  34. * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
  35. * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
  36. * struct frag_queue))
  37. */
  38. #define INETFRAGS_MAXDEPTH 128
  39. struct inet_frag_bucket {
  40. struct hlist_head chain;
  41. spinlock_t chain_lock;
  42. };
  43. struct inet_frags {
  44. struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
  45. /* This rwlock is a global lock (seperate per IPv4, IPv6 and
  46. * netfilter). Important to keep this on a seperate cacheline.
  47. * Its primarily a rebuild protection rwlock.
  48. */
  49. rwlock_t lock ____cacheline_aligned_in_smp;
  50. struct work_struct frags_work;
  51. unsigned int next_bucket;
  52. unsigned long last_rebuild_jiffies;
  53. bool rebuild;
  54. /* The first call to hashfn is responsible to initialize
  55. * rnd. This is best done with net_get_random_once.
  56. */
  57. u32 rnd;
  58. int qsize;
  59. unsigned int (*hashfn)(const struct inet_frag_queue *);
  60. bool (*match)(const struct inet_frag_queue *q,
  61. const void *arg);
  62. void (*constructor)(struct inet_frag_queue *q,
  63. const void *arg);
  64. void (*destructor)(struct inet_frag_queue *);
  65. void (*skb_free)(struct sk_buff *);
  66. void (*frag_expire)(unsigned long data);
  67. };
  68. void inet_frags_init(struct inet_frags *);
  69. void inet_frags_fini(struct inet_frags *);
  70. void inet_frags_init_net(struct netns_frags *nf);
  71. void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
  72. void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
  73. void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
  74. struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
  75. struct inet_frags *f, void *key, unsigned int hash)
  76. __releases(&f->lock);
  77. void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
  78. const char *prefix);
  79. static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
  80. {
  81. if (atomic_dec_and_test(&q->refcnt))
  82. inet_frag_destroy(q, f);
  83. }
  84. /* Memory Tracking Functions. */
  85. /* The default percpu_counter batch size is not big enough to scale to
  86. * fragmentation mem acct sizes.
  87. * The mem size of a 64K fragment is approx:
  88. * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
  89. */
  90. static unsigned int frag_percpu_counter_batch = 130000;
  91. static inline int frag_mem_limit(struct netns_frags *nf)
  92. {
  93. return percpu_counter_read(&nf->mem);
  94. }
  95. static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
  96. {
  97. __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
  98. }
  99. static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
  100. {
  101. __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
  102. }
  103. static inline void init_frag_mem_limit(struct netns_frags *nf)
  104. {
  105. percpu_counter_init(&nf->mem, 0);
  106. }
  107. static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
  108. {
  109. unsigned int res;
  110. local_bh_disable();
  111. res = percpu_counter_sum_positive(&nf->mem);
  112. local_bh_enable();
  113. return res;
  114. }
  115. /* RFC 3168 support :
  116. * We want to check ECN values of all fragments, do detect invalid combinations.
  117. * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
  118. */
  119. #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
  120. #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
  121. #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
  122. #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
  123. extern const u8 ip_frag_ecn_table[16];
  124. #endif