|
@@ -59,9 +59,9 @@ struct cpu_hw_events {
|
|
|
struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
|
|
|
};
|
|
|
|
|
|
-DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
|
|
|
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
|
|
|
|
|
|
-struct power_pmu *ppmu;
|
|
|
+static struct power_pmu *ppmu;
|
|
|
|
|
|
/*
|
|
|
* Normally, to ignore kernel events we set the FCS (freeze counters
|
|
@@ -124,7 +124,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
|
|
|
|
|
|
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
|
|
|
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
|
|
|
-void power_pmu_flush_branch_stack(void) {}
|
|
|
+static void power_pmu_flush_branch_stack(void) {}
|
|
|
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
|
|
|
static void pmao_restore_workaround(bool ebb) { }
|
|
|
#endif /* CONFIG_PPC32 */
|
|
@@ -375,7 +375,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
|
|
|
/* Called from ctxsw to prevent one process's branch entries to
|
|
|
* mingle with the other process's entries during context switch.
|
|
|
*/
|
|
|
-void power_pmu_flush_branch_stack(void)
|
|
|
+static void power_pmu_flush_branch_stack(void)
|
|
|
{
|
|
|
if (ppmu->bhrb_nr)
|
|
|
power_pmu_bhrb_reset();
|
|
@@ -408,7 +408,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
|
|
|
}
|
|
|
|
|
|
/* Processing BHRB entries */
|
|
|
-void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
|
|
|
+static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
|
|
|
{
|
|
|
u64 val;
|
|
|
u64 addr;
|
|
@@ -1573,7 +1573,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
|
|
|
* Set the flag to make pmu::enable() not perform the
|
|
|
* schedulability test, it will be performed at commit time
|
|
|
*/
|
|
|
-void power_pmu_start_txn(struct pmu *pmu)
|
|
|
+static void power_pmu_start_txn(struct pmu *pmu)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
@@ -1587,7 +1587,7 @@ void power_pmu_start_txn(struct pmu *pmu)
|
|
|
* Clear the flag and pmu::enable() will perform the
|
|
|
* schedulability test.
|
|
|
*/
|
|
|
-void power_pmu_cancel_txn(struct pmu *pmu)
|
|
|
+static void power_pmu_cancel_txn(struct pmu *pmu)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
@@ -1600,7 +1600,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
|
|
|
* Perform the group schedulability test as a whole
|
|
|
* Return 0 if success
|
|
|
*/
|
|
|
-int power_pmu_commit_txn(struct pmu *pmu)
|
|
|
+static int power_pmu_commit_txn(struct pmu *pmu)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuhw;
|
|
|
long i, n;
|
|
@@ -1888,7 +1888,7 @@ ssize_t power_events_sysfs_show(struct device *dev,
|
|
|
return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
|
|
|
}
|
|
|
|
|
|
-struct pmu power_pmu = {
|
|
|
+static struct pmu power_pmu = {
|
|
|
.pmu_enable = power_pmu_enable,
|
|
|
.pmu_disable = power_pmu_disable,
|
|
|
.event_init = power_pmu_event_init,
|