percpu-internal.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. #ifndef _MM_PERCPU_INTERNAL_H
  2. #define _MM_PERCPU_INTERNAL_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. struct pcpu_chunk {
  6. #ifdef CONFIG_PERCPU_STATS
  7. int nr_alloc; /* # of allocations */
  8. size_t max_alloc_size; /* largest allocation size */
  9. #endif
  10. struct list_head list; /* linked to pcpu_slot lists */
  11. int free_size; /* free bytes in the chunk */
  12. int contig_hint; /* max contiguous size hint */
  13. void *base_addr; /* base address of this chunk */
  14. int map_used; /* # of map entries used before the sentry */
  15. int map_alloc; /* # of map entries allocated */
  16. int *map; /* allocation map */
  17. struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
  18. void *data; /* chunk data */
  19. int first_free; /* no free below this */
  20. bool immutable; /* no [de]population allowed */
  21. bool has_reserved; /* Indicates if chunk has reserved space
  22. at the beginning. Reserved chunk will
  23. contain reservation for static chunk.
  24. Dynamic chunk will contain reservation
  25. for static and reserved chunks. */
  26. int nr_populated; /* # of populated pages */
  27. unsigned long populated[]; /* populated bitmap */
  28. };
  29. extern spinlock_t pcpu_lock;
  30. extern struct list_head *pcpu_slot;
  31. extern int pcpu_nr_slots;
  32. extern struct pcpu_chunk *pcpu_first_chunk;
  33. extern struct pcpu_chunk *pcpu_reserved_chunk;
  34. #ifdef CONFIG_PERCPU_STATS
  35. #include <linux/spinlock.h>
  36. struct percpu_stats {
  37. u64 nr_alloc; /* lifetime # of allocations */
  38. u64 nr_dealloc; /* lifetime # of deallocations */
  39. u64 nr_cur_alloc; /* current # of allocations */
  40. u64 nr_max_alloc; /* max # of live allocations */
  41. u32 nr_chunks; /* current # of live chunks */
  42. u32 nr_max_chunks; /* max # of live chunks */
  43. size_t min_alloc_size; /* min allocaiton size */
  44. size_t max_alloc_size; /* max allocation size */
  45. };
  46. extern struct percpu_stats pcpu_stats;
  47. extern struct pcpu_alloc_info pcpu_stats_ai;
  48. /*
  49. * For debug purposes. We don't care about the flexible array.
  50. */
  51. static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
  52. {
  53. memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
  54. /* initialize min_alloc_size to unit_size */
  55. pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
  56. }
  57. /*
  58. * pcpu_stats_area_alloc - increment area allocation stats
  59. * @chunk: the location of the area being allocated
  60. * @size: size of area to allocate in bytes
  61. *
  62. * CONTEXT:
  63. * pcpu_lock.
  64. */
  65. static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
  66. {
  67. lockdep_assert_held(&pcpu_lock);
  68. pcpu_stats.nr_alloc++;
  69. pcpu_stats.nr_cur_alloc++;
  70. pcpu_stats.nr_max_alloc =
  71. max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
  72. pcpu_stats.min_alloc_size =
  73. min(pcpu_stats.min_alloc_size, size);
  74. pcpu_stats.max_alloc_size =
  75. max(pcpu_stats.max_alloc_size, size);
  76. chunk->nr_alloc++;
  77. chunk->max_alloc_size = max(chunk->max_alloc_size, size);
  78. }
  79. /*
  80. * pcpu_stats_area_dealloc - decrement allocation stats
  81. * @chunk: the location of the area being deallocated
  82. *
  83. * CONTEXT:
  84. * pcpu_lock.
  85. */
  86. static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
  87. {
  88. lockdep_assert_held(&pcpu_lock);
  89. pcpu_stats.nr_dealloc++;
  90. pcpu_stats.nr_cur_alloc--;
  91. chunk->nr_alloc--;
  92. }
  93. /*
  94. * pcpu_stats_chunk_alloc - increment chunk stats
  95. */
  96. static inline void pcpu_stats_chunk_alloc(void)
  97. {
  98. unsigned long flags;
  99. spin_lock_irqsave(&pcpu_lock, flags);
  100. pcpu_stats.nr_chunks++;
  101. pcpu_stats.nr_max_chunks =
  102. max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
  103. spin_unlock_irqrestore(&pcpu_lock, flags);
  104. }
  105. /*
  106. * pcpu_stats_chunk_dealloc - decrement chunk stats
  107. */
  108. static inline void pcpu_stats_chunk_dealloc(void)
  109. {
  110. unsigned long flags;
  111. spin_lock_irqsave(&pcpu_lock, flags);
  112. pcpu_stats.nr_chunks--;
  113. spin_unlock_irqrestore(&pcpu_lock, flags);
  114. }
  115. #else
  116. static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
  117. {
  118. }
  119. static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
  120. {
  121. }
  122. static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
  123. {
  124. }
  125. static inline void pcpu_stats_chunk_alloc(void)
  126. {
  127. }
  128. static inline void pcpu_stats_chunk_dealloc(void)
  129. {
  130. }
  131. #endif /* !CONFIG_PERCPU_STATS */
  132. #endif