cpu_buffer.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. /**
  2. * @file cpu_buffer.h
  3. *
  4. * @remark Copyright 2002 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author John Levon <levon@movementarian.org>
  8. */
  9. #ifndef OPROFILE_CPU_BUFFER_H
  10. #define OPROFILE_CPU_BUFFER_H
  11. #include <linux/types.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/cache.h>
  15. #include <linux/sched.h>
  16. struct task_struct;
  17. int alloc_cpu_buffers(void);
  18. void free_cpu_buffers(void);
  19. void start_cpu_work(void);
  20. void end_cpu_work(void);
  21. /* CPU buffer is composed of such entries (which are
  22. * also used for context switch notes)
  23. */
  24. struct op_sample {
  25. unsigned long eip;
  26. unsigned long event;
  27. };
  28. struct oprofile_cpu_buffer {
  29. volatile unsigned long head_pos;
  30. volatile unsigned long tail_pos;
  31. unsigned long buffer_size;
  32. struct task_struct *last_task;
  33. int last_is_kernel;
  34. int tracing;
  35. struct op_sample *buffer;
  36. unsigned long sample_received;
  37. unsigned long sample_lost_overflow;
  38. unsigned long backtrace_aborted;
  39. unsigned long sample_invalid_eip;
  40. int cpu;
  41. struct delayed_work work;
  42. };
  43. DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
  44. void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf);
  45. static inline
  46. struct op_sample *cpu_buffer_write_entry(struct oprofile_cpu_buffer *cpu_buf)
  47. {
  48. return &cpu_buf->buffer[cpu_buf->head_pos];
  49. }
  50. static inline
  51. void cpu_buffer_write_commit(struct oprofile_cpu_buffer *b)
  52. {
  53. unsigned long new_head = b->head_pos + 1;
  54. /*
  55. * Ensure anything written to the slot before we increment is
  56. * visible
  57. */
  58. wmb();
  59. if (new_head < b->buffer_size)
  60. b->head_pos = new_head;
  61. else
  62. b->head_pos = 0;
  63. }
  64. static inline
  65. struct op_sample *cpu_buffer_read_entry(struct oprofile_cpu_buffer *cpu_buf)
  66. {
  67. return &cpu_buf->buffer[cpu_buf->tail_pos];
  68. }
  69. /* "acquire" as many cpu buffer slots as we can */
  70. static inline
  71. unsigned long cpu_buffer_entries(struct oprofile_cpu_buffer *b)
  72. {
  73. unsigned long head = b->head_pos;
  74. unsigned long tail = b->tail_pos;
  75. /*
  76. * Subtle. This resets the persistent last_task
  77. * and in_kernel values used for switching notes.
  78. * BUT, there is a small window between reading
  79. * head_pos, and this call, that means samples
  80. * can appear at the new head position, but not
  81. * be prefixed with the notes for switching
  82. * kernel mode or a task switch. This small hole
  83. * can lead to mis-attribution or samples where
  84. * we don't know if it's in the kernel or not,
  85. * at the start of an event buffer.
  86. */
  87. cpu_buffer_reset(b);
  88. if (head >= tail)
  89. return head - tail;
  90. return head + (b->buffer_size - tail);
  91. }
  92. /* transient events for the CPU buffer -> event buffer */
  93. #define CPU_IS_KERNEL 1
  94. #define CPU_TRACE_BEGIN 2
  95. #define IBS_FETCH_BEGIN 3
  96. #define IBS_OP_BEGIN 4
  97. #endif /* OPROFILE_CPU_BUFFER_H */