|
@@ -1013,7 +1013,7 @@ static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
|
|
|
|
|
|
static void sparc_pmu_enable(struct pmu *pmu)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
int i;
|
|
|
|
|
|
if (cpuc->enabled)
|
|
@@ -1031,7 +1031,7 @@ static void sparc_pmu_enable(struct pmu *pmu)
|
|
|
|
|
|
static void sparc_pmu_disable(struct pmu *pmu)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
int i;
|
|
|
|
|
|
if (!cpuc->enabled)
|
|
@@ -1065,7 +1065,7 @@ static int active_event_index(struct cpu_hw_events *cpuc,
|
|
|
|
|
|
static void sparc_pmu_start(struct perf_event *event, int flags)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
int idx = active_event_index(cpuc, event);
|
|
|
|
|
|
if (flags & PERF_EF_RELOAD) {
|
|
@@ -1080,7 +1080,7 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
|
|
|
|
|
|
static void sparc_pmu_stop(struct perf_event *event, int flags)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
int idx = active_event_index(cpuc, event);
|
|
|
|
|
|
if (!(event->hw.state & PERF_HES_STOPPED)) {
|
|
@@ -1096,7 +1096,7 @@ static void sparc_pmu_stop(struct perf_event *event, int flags)
|
|
|
|
|
|
static void sparc_pmu_del(struct perf_event *event, int _flags)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
unsigned long flags;
|
|
|
int i;
|
|
|
|
|
@@ -1133,7 +1133,7 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
|
|
|
|
|
|
static void sparc_pmu_read(struct perf_event *event)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
int idx = active_event_index(cpuc, event);
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
|
@@ -1145,7 +1145,7 @@ static DEFINE_MUTEX(pmc_grab_mutex);
|
|
|
|
|
|
static void perf_stop_nmi_watchdog(void *unused)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
int i;
|
|
|
|
|
|
stop_nmi_watchdog(NULL);
|
|
@@ -1356,7 +1356,7 @@ static int collect_events(struct perf_event *group, int max_count,
|
|
|
|
|
|
static int sparc_pmu_add(struct perf_event *event, int ef_flags)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
int n0, ret = -EAGAIN;
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -1498,7 +1498,7 @@ static int sparc_pmu_event_init(struct perf_event *event)
|
|
|
*/
|
|
|
static void sparc_pmu_start_txn(struct pmu *pmu)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
|
|
|
|
|
|
perf_pmu_disable(pmu);
|
|
|
cpuhw->group_flag |= PERF_EVENT_TXN;
|
|
@@ -1511,7 +1511,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
|
|
|
*/
|
|
|
static void sparc_pmu_cancel_txn(struct pmu *pmu)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
|
|
|
|
|
|
cpuhw->group_flag &= ~PERF_EVENT_TXN;
|
|
|
perf_pmu_enable(pmu);
|
|
@@ -1524,13 +1524,13 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
|
|
|
*/
|
|
|
static int sparc_pmu_commit_txn(struct pmu *pmu)
|
|
|
{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
int n;
|
|
|
|
|
|
if (!sparc_pmu)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
n = cpuc->n_events;
|
|
|
if (check_excludes(cpuc->event, 0, n))
|
|
|
return -EINVAL;
|
|
@@ -1601,7 +1601,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
|
|
|
|
|
|
regs = args->regs;
|
|
|
|
|
|
- cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
|
|
|
/* If the PMU has the TOE IRQ enable bits, we need to do a
|
|
|
* dummy write to the %pcr to clear the overflow bits and thus
|