internal.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. #ifndef _KERNEL_EVENTS_INTERNAL_H
  2. #define _KERNEL_EVENTS_INTERNAL_H
  3. #include <linux/hardirq.h>
  4. #include <linux/uaccess.h>
  5. /* Buffer handling */
  6. #define RING_BUFFER_WRITABLE 0x01
  7. struct ring_buffer {
  8. atomic_t refcount;
  9. struct rcu_head rcu_head;
  10. #ifdef CONFIG_PERF_USE_VMALLOC
  11. struct work_struct work;
  12. int page_order; /* allocation order */
  13. #endif
  14. int nr_pages; /* nr of data pages */
  15. int overwrite; /* can overwrite itself */
  16. atomic_t poll; /* POLL_ for wakeups */
  17. local_t head; /* write position */
  18. local_t nest; /* nested writers */
  19. local_t events; /* event limit */
  20. local_t wakeup; /* wakeup stamp */
  21. local_t lost; /* nr records lost */
  22. long watermark; /* wakeup watermark */
  23. long aux_watermark;
  24. /* poll crap */
  25. spinlock_t event_lock;
  26. struct list_head event_list;
  27. atomic_t mmap_count;
  28. unsigned long mmap_locked;
  29. struct user_struct *mmap_user;
  30. /* AUX area */
  31. local_t aux_head;
  32. local_t aux_nest;
  33. local_t aux_wakeup;
  34. unsigned long aux_pgoff;
  35. int aux_nr_pages;
  36. int aux_overwrite;
  37. atomic_t aux_mmap_count;
  38. unsigned long aux_mmap_locked;
  39. void (*free_aux)(void *);
  40. atomic_t aux_refcount;
  41. void **aux_pages;
  42. void *aux_priv;
  43. struct perf_event_mmap_page *user_page;
  44. void *data_pages[0];
  45. };
  46. extern void rb_free(struct ring_buffer *rb);
  47. extern struct ring_buffer *
  48. rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  49. extern void perf_event_wakeup(struct perf_event *event);
  50. extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
  51. pgoff_t pgoff, int nr_pages, long watermark, int flags);
  52. extern void rb_free_aux(struct ring_buffer *rb);
  53. extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
  54. extern void ring_buffer_put(struct ring_buffer *rb);
  55. static inline bool rb_has_aux(struct ring_buffer *rb)
  56. {
  57. return !!rb->aux_nr_pages;
  58. }
  59. void perf_event_aux_event(struct perf_event *event, unsigned long head,
  60. unsigned long size, u64 flags);
  61. extern void
  62. perf_event_header__init_id(struct perf_event_header *header,
  63. struct perf_sample_data *data,
  64. struct perf_event *event);
  65. extern void
  66. perf_event__output_id_sample(struct perf_event *event,
  67. struct perf_output_handle *handle,
  68. struct perf_sample_data *sample);
  69. extern struct page *
  70. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  71. #ifdef CONFIG_PERF_USE_VMALLOC
  72. /*
  73. * Back perf_mmap() with vmalloc memory.
  74. *
  75. * Required for architectures that have d-cache aliasing issues.
  76. */
  77. static inline int page_order(struct ring_buffer *rb)
  78. {
  79. return rb->page_order;
  80. }
  81. #else
  82. static inline int page_order(struct ring_buffer *rb)
  83. {
  84. return 0;
  85. }
  86. #endif
  87. static inline unsigned long perf_data_size(struct ring_buffer *rb)
  88. {
  89. return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  90. }
  91. static inline unsigned long perf_aux_size(struct ring_buffer *rb)
  92. {
  93. return rb->aux_nr_pages << PAGE_SHIFT;
  94. }
  95. #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
  96. static inline unsigned long \
  97. func_name(struct perf_output_handle *handle, \
  98. const void *buf, unsigned long len) \
  99. { \
  100. unsigned long size, written; \
  101. \
  102. do { \
  103. size = min(handle->size, len); \
  104. written = memcpy_func(handle->addr, buf, size); \
  105. written = size - written; \
  106. \
  107. len -= written; \
  108. handle->addr += written; \
  109. buf += written; \
  110. handle->size -= written; \
  111. if (!handle->size) { \
  112. struct ring_buffer *rb = handle->rb; \
  113. \
  114. handle->page++; \
  115. handle->page &= rb->nr_pages - 1; \
  116. handle->addr = rb->data_pages[handle->page]; \
  117. handle->size = PAGE_SIZE << page_order(rb); \
  118. } \
  119. } while (len && written == size); \
  120. \
  121. return len; \
  122. }
  123. static inline unsigned long
  124. memcpy_common(void *dst, const void *src, unsigned long n)
  125. {
  126. memcpy(dst, src, n);
  127. return 0;
  128. }
  129. DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
  130. static inline unsigned long
  131. memcpy_skip(void *dst, const void *src, unsigned long n)
  132. {
  133. return 0;
  134. }
  135. DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
  136. #ifndef arch_perf_out_copy_user
  137. #define arch_perf_out_copy_user arch_perf_out_copy_user
  138. static inline unsigned long
  139. arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
  140. {
  141. unsigned long ret;
  142. pagefault_disable();
  143. ret = __copy_from_user_inatomic(dst, src, n);
  144. pagefault_enable();
  145. return ret;
  146. }
  147. #endif
  148. DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
  149. /* Callchain handling */
  150. extern struct perf_callchain_entry *
  151. perf_callchain(struct perf_event *event, struct pt_regs *regs);
  152. extern int get_callchain_buffers(void);
  153. extern void put_callchain_buffers(void);
  154. static inline int get_recursion_context(int *recursion)
  155. {
  156. int rctx;
  157. if (in_nmi())
  158. rctx = 3;
  159. else if (in_irq())
  160. rctx = 2;
  161. else if (in_softirq())
  162. rctx = 1;
  163. else
  164. rctx = 0;
  165. if (recursion[rctx])
  166. return -1;
  167. recursion[rctx]++;
  168. barrier();
  169. return rctx;
  170. }
  171. static inline void put_recursion_context(int *recursion, int rctx)
  172. {
  173. barrier();
  174. recursion[rctx]--;
  175. }
  176. #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
  177. static inline bool arch_perf_have_user_stack_dump(void)
  178. {
  179. return true;
  180. }
  181. #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
  182. #else
  183. static inline bool arch_perf_have_user_stack_dump(void)
  184. {
  185. return false;
  186. }
  187. #define perf_user_stack_pointer(regs) 0
  188. #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
  189. #endif /* _KERNEL_EVENTS_INTERNAL_H */