|
@@ -951,6 +951,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
|
|
if (!cqm_group_leader(event))
|
|
if (!cqm_group_leader(event))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Getting up-to-date values requires an SMP IPI which is not
|
|
|
|
+ * possible if we're being called in interrupt context. Return
|
|
|
|
+ * the cached values instead.
|
|
|
|
+ */
|
|
|
|
+ if (unlikely(in_interrupt()))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Notice that we don't perform the reading of an RMID
|
|
* Notice that we don't perform the reading of an RMID
|
|
* atomically, because we can't hold a spin lock across the
|
|
* atomically, because we can't hold a spin lock across the
|