|
@@ -580,35 +580,40 @@ extern u64 perf_event_read_value(struct perf_event *event,
|
|
|
|
|
|
|
|
|
struct perf_sample_data {
|
|
|
- u64 type;
|
|
|
+ /*
|
|
|
+ * Fields set by perf_sample_data_init(), group so as to
|
|
|
+ * minimize the cachelines touched.
|
|
|
+ */
|
|
|
+ u64 addr;
|
|
|
+ struct perf_raw_record *raw;
|
|
|
+ struct perf_branch_stack *br_stack;
|
|
|
+ u64 period;
|
|
|
+ u64 weight;
|
|
|
+ u64 txn;
|
|
|
+ union perf_mem_data_src data_src;
|
|
|
|
|
|
+ /*
|
|
|
+ * The other fields, optionally {set,used} by
|
|
|
+ * perf_{prepare,output}_sample().
|
|
|
+ */
|
|
|
+ u64 type;
|
|
|
u64 ip;
|
|
|
struct {
|
|
|
u32 pid;
|
|
|
u32 tid;
|
|
|
} tid_entry;
|
|
|
u64 time;
|
|
|
- u64 addr;
|
|
|
u64 id;
|
|
|
u64 stream_id;
|
|
|
struct {
|
|
|
u32 cpu;
|
|
|
u32 reserved;
|
|
|
} cpu_entry;
|
|
|
- u64 period;
|
|
|
- union perf_mem_data_src data_src;
|
|
|
struct perf_callchain_entry *callchain;
|
|
|
- struct perf_raw_record *raw;
|
|
|
- struct perf_branch_stack *br_stack;
|
|
|
struct perf_regs regs_user;
|
|
|
struct perf_regs regs_intr;
|
|
|
u64 stack_user_size;
|
|
|
- u64 weight;
|
|
|
- /*
|
|
|
- * Transaction flags for abort events:
|
|
|
- */
|
|
|
- u64 txn;
|
|
|
-};
|
|
|
+} ____cacheline_aligned;
|
|
|
|
|
|
/* default value for data source */
|
|
|
#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
|
|
@@ -625,14 +630,9 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
|
|
|
data->raw = NULL;
|
|
|
data->br_stack = NULL;
|
|
|
data->period = period;
|
|
|
- data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
|
|
|
- data->regs_user.regs = NULL;
|
|
|
- data->stack_user_size = 0;
|
|
|
data->weight = 0;
|
|
|
data->data_src.val = PERF_MEM_NA;
|
|
|
data->txn = 0;
|
|
|
- data->regs_intr.abi = PERF_SAMPLE_REGS_ABI_NONE;
|
|
|
- data->regs_intr.regs = NULL;
|
|
|
}
|
|
|
|
|
|
extern void perf_output_sample(struct perf_output_handle *handle,
|