percpu-internal.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. #ifndef _MM_PERCPU_INTERNAL_H
  2. #define _MM_PERCPU_INTERNAL_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. /*
  6. * pcpu_block_md is the metadata block struct.
  7. * Each chunk's bitmap is split into a number of full blocks.
  8. * All units are in terms of bits.
  9. */
  10. struct pcpu_block_md {
  11. int contig_hint; /* contig hint for block */
  12. int contig_hint_start; /* block relative starting
  13. position of the contig hint */
  14. int left_free; /* size of free space along
  15. the left side of the block */
  16. int right_free; /* size of free space along
  17. the right side of the block */
  18. int first_free; /* block position of first free */
  19. };
  20. struct pcpu_chunk {
  21. #ifdef CONFIG_PERCPU_STATS
  22. int nr_alloc; /* # of allocations */
  23. size_t max_alloc_size; /* largest allocation size */
  24. #endif
  25. struct list_head list; /* linked to pcpu_slot lists */
  26. int free_bytes; /* free bytes in the chunk */
  27. int contig_bits; /* max contiguous size hint */
  28. int contig_bits_start; /* contig_bits starting
  29. offset */
  30. void *base_addr; /* base address of this chunk */
  31. unsigned long *alloc_map; /* allocation map */
  32. unsigned long *bound_map; /* boundary map */
  33. struct pcpu_block_md *md_blocks; /* metadata blocks */
  34. void *data; /* chunk data */
  35. int first_bit; /* no free below this */
  36. bool immutable; /* no [de]population allowed */
  37. int start_offset; /* the overlap with the previous
  38. region to have a page aligned
  39. base_addr */
  40. int end_offset; /* additional area required to
  41. have the region end page
  42. aligned */
  43. int nr_pages; /* # of pages served by this chunk */
  44. int nr_populated; /* # of populated pages */
  45. int nr_empty_pop_pages; /* # of empty populated pages */
  46. unsigned long populated[]; /* populated bitmap */
  47. };
  48. extern spinlock_t pcpu_lock;
  49. extern struct list_head *pcpu_slot;
  50. extern int pcpu_nr_slots;
  51. extern int pcpu_nr_empty_pop_pages;
  52. extern struct pcpu_chunk *pcpu_first_chunk;
  53. extern struct pcpu_chunk *pcpu_reserved_chunk;
  54. /**
  55. * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
  56. * @chunk: chunk of interest
  57. *
  58. * This conversion is from the number of physical pages that the chunk
  59. * serves to the number of bitmap blocks used.
  60. */
  61. static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
  62. {
  63. return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
  64. }
  65. /**
  66. * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
  67. * @pages: number of physical pages
  68. *
  69. * This conversion is from physical pages to the number of bits
  70. * required in the bitmap.
  71. */
  72. static inline int pcpu_nr_pages_to_map_bits(int pages)
  73. {
  74. return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
  75. }
  76. /**
  77. * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
  78. * @chunk: chunk of interest
  79. *
  80. * This conversion is from the number of physical pages that the chunk
  81. * serves to the number of bits in the bitmap.
  82. */
  83. static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
  84. {
  85. return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
  86. }
  87. #ifdef CONFIG_PERCPU_STATS
  88. #include <linux/spinlock.h>
  89. struct percpu_stats {
  90. u64 nr_alloc; /* lifetime # of allocations */
  91. u64 nr_dealloc; /* lifetime # of deallocations */
  92. u64 nr_cur_alloc; /* current # of allocations */
  93. u64 nr_max_alloc; /* max # of live allocations */
  94. u32 nr_chunks; /* current # of live chunks */
  95. u32 nr_max_chunks; /* max # of live chunks */
  96. size_t min_alloc_size; /* min allocaiton size */
  97. size_t max_alloc_size; /* max allocation size */
  98. };
  99. extern struct percpu_stats pcpu_stats;
  100. extern struct pcpu_alloc_info pcpu_stats_ai;
  101. /*
  102. * For debug purposes. We don't care about the flexible array.
  103. */
  104. static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
  105. {
  106. memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
  107. /* initialize min_alloc_size to unit_size */
  108. pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
  109. }
  110. /*
  111. * pcpu_stats_area_alloc - increment area allocation stats
  112. * @chunk: the location of the area being allocated
  113. * @size: size of area to allocate in bytes
  114. *
  115. * CONTEXT:
  116. * pcpu_lock.
  117. */
  118. static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
  119. {
  120. lockdep_assert_held(&pcpu_lock);
  121. pcpu_stats.nr_alloc++;
  122. pcpu_stats.nr_cur_alloc++;
  123. pcpu_stats.nr_max_alloc =
  124. max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
  125. pcpu_stats.min_alloc_size =
  126. min(pcpu_stats.min_alloc_size, size);
  127. pcpu_stats.max_alloc_size =
  128. max(pcpu_stats.max_alloc_size, size);
  129. chunk->nr_alloc++;
  130. chunk->max_alloc_size = max(chunk->max_alloc_size, size);
  131. }
  132. /*
  133. * pcpu_stats_area_dealloc - decrement allocation stats
  134. * @chunk: the location of the area being deallocated
  135. *
  136. * CONTEXT:
  137. * pcpu_lock.
  138. */
  139. static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
  140. {
  141. lockdep_assert_held(&pcpu_lock);
  142. pcpu_stats.nr_dealloc++;
  143. pcpu_stats.nr_cur_alloc--;
  144. chunk->nr_alloc--;
  145. }
  146. /*
  147. * pcpu_stats_chunk_alloc - increment chunk stats
  148. */
  149. static inline void pcpu_stats_chunk_alloc(void)
  150. {
  151. unsigned long flags;
  152. spin_lock_irqsave(&pcpu_lock, flags);
  153. pcpu_stats.nr_chunks++;
  154. pcpu_stats.nr_max_chunks =
  155. max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
  156. spin_unlock_irqrestore(&pcpu_lock, flags);
  157. }
  158. /*
  159. * pcpu_stats_chunk_dealloc - decrement chunk stats
  160. */
  161. static inline void pcpu_stats_chunk_dealloc(void)
  162. {
  163. unsigned long flags;
  164. spin_lock_irqsave(&pcpu_lock, flags);
  165. pcpu_stats.nr_chunks--;
  166. spin_unlock_irqrestore(&pcpu_lock, flags);
  167. }
  168. #else
  169. static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
  170. {
  171. }
  172. static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
  173. {
  174. }
  175. static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
  176. {
  177. }
  178. static inline void pcpu_stats_chunk_alloc(void)
  179. {
  180. }
  181. static inline void pcpu_stats_chunk_dealloc(void)
  182. {
  183. }
  184. #endif /* !CONFIG_PERCPU_STATS */
  185. #endif