inet_frag.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NET_FRAG_H__
  3. #define __NET_FRAG_H__
  4. struct netns_frags {
  5. /* Keep atomic mem on separate cachelines in structs that include it */
  6. atomic_t mem ____cacheline_aligned_in_smp;
  7. /* sysctls */
  8. int timeout;
  9. int high_thresh;
  10. int low_thresh;
  11. int max_dist;
  12. };
  13. /**
  14. * fragment queue flags
  15. *
  16. * @INET_FRAG_FIRST_IN: first fragment has arrived
  17. * @INET_FRAG_LAST_IN: final fragment has arrived
  18. * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
  19. */
  20. enum {
  21. INET_FRAG_FIRST_IN = BIT(0),
  22. INET_FRAG_LAST_IN = BIT(1),
  23. INET_FRAG_COMPLETE = BIT(2),
  24. };
  25. /**
  26. * struct inet_frag_queue - fragment queue
  27. *
  28. * @lock: spinlock protecting the queue
  29. * @timer: queue expiration timer
  30. * @list: hash bucket list
  31. * @refcnt: reference count of the queue
  32. * @fragments: received fragments head
  33. * @fragments_tail: received fragments tail
  34. * @stamp: timestamp of the last received fragment
  35. * @len: total length of the original datagram
  36. * @meat: length of received fragments so far
  37. * @flags: fragment queue flags
  38. * @max_size: maximum received fragment size
  39. * @net: namespace that this frag belongs to
  40. * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
  41. */
  42. struct inet_frag_queue {
  43. spinlock_t lock;
  44. struct timer_list timer;
  45. struct hlist_node list;
  46. refcount_t refcnt;
  47. struct sk_buff *fragments;
  48. struct sk_buff *fragments_tail;
  49. ktime_t stamp;
  50. int len;
  51. int meat;
  52. __u8 flags;
  53. u16 max_size;
  54. struct netns_frags *net;
  55. struct hlist_node list_evictor;
  56. };
  57. #define INETFRAGS_HASHSZ 1024
  58. /* averaged:
  59. * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
  60. * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
  61. * struct frag_queue))
  62. */
  63. #define INETFRAGS_MAXDEPTH 128
  64. struct inet_frag_bucket {
  65. struct hlist_head chain;
  66. spinlock_t chain_lock;
  67. };
  68. struct inet_frags {
  69. struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
  70. struct work_struct frags_work;
  71. unsigned int next_bucket;
  72. unsigned long last_rebuild_jiffies;
  73. bool rebuild;
  74. /* The first call to hashfn is responsible to initialize
  75. * rnd. This is best done with net_get_random_once.
  76. *
  77. * rnd_seqlock is used to let hash insertion detect
  78. * when it needs to re-lookup the hash chain to use.
  79. */
  80. u32 rnd;
  81. seqlock_t rnd_seqlock;
  82. unsigned int qsize;
  83. unsigned int (*hashfn)(const struct inet_frag_queue *);
  84. bool (*match)(const struct inet_frag_queue *q,
  85. const void *arg);
  86. void (*constructor)(struct inet_frag_queue *q,
  87. const void *arg);
  88. void (*destructor)(struct inet_frag_queue *);
  89. void (*frag_expire)(struct timer_list *t);
  90. struct kmem_cache *frags_cachep;
  91. const char *frags_cache_name;
  92. };
  93. int inet_frags_init(struct inet_frags *);
  94. void inet_frags_fini(struct inet_frags *);
  95. static inline void inet_frags_init_net(struct netns_frags *nf)
  96. {
  97. atomic_set(&nf->mem, 0);
  98. }
  99. void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
  100. void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
  101. void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
  102. struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
  103. struct inet_frags *f, void *key, unsigned int hash);
  104. void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
  105. const char *prefix);
  106. static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
  107. {
  108. if (refcount_dec_and_test(&q->refcnt))
  109. inet_frag_destroy(q, f);
  110. }
  111. static inline bool inet_frag_evicting(struct inet_frag_queue *q)
  112. {
  113. return !hlist_unhashed(&q->list_evictor);
  114. }
  115. /* Memory Tracking Functions. */
  116. static inline int frag_mem_limit(struct netns_frags *nf)
  117. {
  118. return atomic_read(&nf->mem);
  119. }
  120. static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
  121. {
  122. atomic_sub(i, &nf->mem);
  123. }
  124. static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
  125. {
  126. atomic_add(i, &nf->mem);
  127. }
  128. static inline int sum_frag_mem_limit(struct netns_frags *nf)
  129. {
  130. return atomic_read(&nf->mem);
  131. }
  132. /* RFC 3168 support :
  133. * We want to check ECN values of all fragments, do detect invalid combinations.
  134. * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
  135. */
  136. #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
  137. #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
  138. #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
  139. #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
  140. extern const u8 ip_frag_ecn_table[16];
  141. #endif