inet_frag.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. #ifndef __NET_FRAG_H__
  2. #define __NET_FRAG_H__
  3. #include <linux/percpu_counter.h>
  4. struct netns_frags {
  5. /* The percpu_counter "mem" need to be cacheline aligned.
  6. * mem.count must not share cacheline with other writers
  7. */
  8. struct percpu_counter mem ____cacheline_aligned_in_smp;
  9. /* sysctls */
  10. int timeout;
  11. int high_thresh;
  12. int low_thresh;
  13. };
  14. struct inet_frag_queue {
  15. spinlock_t lock;
  16. struct timer_list timer; /* when will this queue expire? */
  17. struct hlist_node list;
  18. atomic_t refcnt;
  19. struct sk_buff *fragments; /* list of received fragments */
  20. struct sk_buff *fragments_tail;
  21. ktime_t stamp;
  22. int len; /* total length of orig datagram */
  23. int meat;
  24. __u8 flags; /* first/last segment arrived? */
  25. #define INET_FRAG_EVICTED 8
  26. #define INET_FRAG_COMPLETE 4
  27. #define INET_FRAG_FIRST_IN 2
  28. #define INET_FRAG_LAST_IN 1
  29. u16 max_size;
  30. struct netns_frags *net;
  31. };
  32. #define INETFRAGS_HASHSZ 1024
  33. /* averaged:
  34. * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
  35. * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
  36. * struct frag_queue))
  37. */
  38. #define INETFRAGS_MAXDEPTH 128
  39. struct inet_frag_bucket {
  40. struct hlist_head chain;
  41. spinlock_t chain_lock;
  42. };
  43. struct inet_frags {
  44. struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
  45. struct work_struct frags_work;
  46. unsigned int next_bucket;
  47. unsigned long last_rebuild_jiffies;
  48. bool rebuild;
  49. /* The first call to hashfn is responsible to initialize
  50. * rnd. This is best done with net_get_random_once.
  51. *
  52. * rnd_seqlock is used to let hash insertion detect
  53. * when it needs to re-lookup the hash chain to use.
  54. */
  55. u32 rnd;
  56. seqlock_t rnd_seqlock;
  57. int qsize;
  58. unsigned int (*hashfn)(const struct inet_frag_queue *);
  59. bool (*match)(const struct inet_frag_queue *q,
  60. const void *arg);
  61. void (*constructor)(struct inet_frag_queue *q,
  62. const void *arg);
  63. void (*destructor)(struct inet_frag_queue *);
  64. void (*skb_free)(struct sk_buff *);
  65. void (*frag_expire)(unsigned long data);
  66. };
  67. void inet_frags_init(struct inet_frags *);
  68. void inet_frags_fini(struct inet_frags *);
  69. void inet_frags_init_net(struct netns_frags *nf);
  70. void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
  71. void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
  72. void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
  73. struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
  74. struct inet_frags *f, void *key, unsigned int hash);
  75. void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
  76. const char *prefix);
  77. static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
  78. {
  79. if (atomic_dec_and_test(&q->refcnt))
  80. inet_frag_destroy(q, f);
  81. }
  82. /* Memory Tracking Functions. */
  83. /* The default percpu_counter batch size is not big enough to scale to
  84. * fragmentation mem acct sizes.
  85. * The mem size of a 64K fragment is approx:
  86. * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
  87. */
  88. static unsigned int frag_percpu_counter_batch = 130000;
  89. static inline int frag_mem_limit(struct netns_frags *nf)
  90. {
  91. return percpu_counter_read(&nf->mem);
  92. }
  93. static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
  94. {
  95. __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
  96. }
  97. static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
  98. {
  99. __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
  100. }
  101. static inline void init_frag_mem_limit(struct netns_frags *nf)
  102. {
  103. percpu_counter_init(&nf->mem, 0);
  104. }
  105. static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
  106. {
  107. unsigned int res;
  108. local_bh_disable();
  109. res = percpu_counter_sum_positive(&nf->mem);
  110. local_bh_enable();
  111. return res;
  112. }
  113. /* RFC 3168 support :
  114. * We want to check ECN values of all fragments, do detect invalid combinations.
  115. * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
  116. */
  117. #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
  118. #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
  119. #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
  120. #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
  121. extern const u8 ip_frag_ecn_table[16];
  122. #endif