|
@@ -180,13 +180,89 @@ void intel_pmu_lbr_reset(void)
|
|
|
intel_pmu_lbr_reset_64();
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * TOS = most recently recorded branch
|
|
|
+ */
|
|
|
+static inline u64 intel_pmu_lbr_tos(void)
|
|
|
+{
|
|
|
+ u64 tos;
|
|
|
+
|
|
|
+ rdmsrl(x86_pmu.lbr_tos, tos);
|
|
|
+ return tos;
|
|
|
+}
|
|
|
+
|
|
|
+enum {
|
|
|
+ LBR_NONE,
|
|
|
+ LBR_VALID,
|
|
|
+};
|
|
|
+
|
|
|
+static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ unsigned lbr_idx, mask;
|
|
|
+ u64 tos;
|
|
|
+
|
|
|
+ if (task_ctx->lbr_callstack_users == 0 ||
|
|
|
+ task_ctx->lbr_stack_state == LBR_NONE) {
|
|
|
+ intel_pmu_lbr_reset();
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ mask = x86_pmu.lbr_nr - 1;
|
|
|
+ tos = intel_pmu_lbr_tos();
|
|
|
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
|
|
|
+ lbr_idx = (tos - i) & mask;
|
|
|
+ wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
|
|
|
+ wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
|
|
|
+ }
|
|
|
+ task_ctx->lbr_stack_state = LBR_NONE;
|
|
|
+}
|
|
|
+
|
|
|
+static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ unsigned lbr_idx, mask;
|
|
|
+ u64 tos;
|
|
|
+
|
|
|
+ if (task_ctx->lbr_callstack_users == 0) {
|
|
|
+ task_ctx->lbr_stack_state = LBR_NONE;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ mask = x86_pmu.lbr_nr - 1;
|
|
|
+ tos = intel_pmu_lbr_tos();
|
|
|
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
|
|
|
+ lbr_idx = (tos - i) & mask;
|
|
|
+ rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
|
|
|
+ rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
|
|
|
+ }
|
|
|
+ task_ctx->lbr_stack_state = LBR_VALID;
|
|
|
+}
|
|
|
+
|
|
|
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
+ struct x86_perf_task_context *task_ctx;
|
|
|
|
|
|
if (!x86_pmu.lbr_nr)
|
|
|
return;
|
|
|
|
|
|
+ /*
|
|
|
+ * If LBR callstack feature is enabled and the stack was saved when
|
|
|
+ * the task was scheduled out, restore the stack. Otherwise flush
|
|
|
+ * the LBR stack.
|
|
|
+ */
|
|
|
+ task_ctx = ctx ? ctx->task_ctx_data : NULL;
|
|
|
+ if (task_ctx) {
|
|
|
+ if (sched_in) {
|
|
|
+ __intel_pmu_lbr_restore(task_ctx);
|
|
|
+ cpuc->lbr_context = ctx;
|
|
|
+ } else {
|
|
|
+ __intel_pmu_lbr_save(task_ctx);
|
|
|
+ }
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* When sampling the branck stack in system-wide, it may be
|
|
|
* necessary to flush the stack on context switch. This happens
|
|
@@ -279,18 +355,6 @@ void intel_pmu_lbr_disable_all(void)
|
|
|
__intel_pmu_lbr_disable();
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * TOS = most recently recorded branch
|
|
|
- */
|
|
|
-static inline u64 intel_pmu_lbr_tos(void)
|
|
|
-{
|
|
|
- u64 tos;
|
|
|
-
|
|
|
- rdmsrl(x86_pmu.lbr_tos, tos);
|
|
|
-
|
|
|
- return tos;
|
|
|
-}
|
|
|
-
|
|
|
static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
|
|
|
{
|
|
|
unsigned long mask = x86_pmu.lbr_nr - 1;
|