|
@@ -196,7 +196,7 @@ static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
|
|
static int perf_sample_allowed_ns __read_mostly =
|
|
static int perf_sample_allowed_ns __read_mostly =
|
|
DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
|
|
DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
|
|
|
|
|
|
-void update_perf_cpu_limits(void)
|
|
|
|
|
|
+static void update_perf_cpu_limits(void)
|
|
{
|
|
{
|
|
u64 tmp = perf_sample_period_ns;
|
|
u64 tmp = perf_sample_period_ns;
|
|
|
|
|
|
@@ -472,7 +472,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
|
|
* mode SWOUT : schedule out everything
|
|
* mode SWOUT : schedule out everything
|
|
* mode SWIN : schedule in based on cgroup for next
|
|
* mode SWIN : schedule in based on cgroup for next
|
|
*/
|
|
*/
|
|
-void perf_cgroup_switch(struct task_struct *task, int mode)
|
|
|
|
|
|
+static void perf_cgroup_switch(struct task_struct *task, int mode)
|
|
{
|
|
{
|
|
struct perf_cpu_context *cpuctx;
|
|
struct perf_cpu_context *cpuctx;
|
|
struct pmu *pmu;
|
|
struct pmu *pmu;
|
|
@@ -7390,7 +7390,7 @@ static int perf_pmu_nop_int(struct pmu *pmu)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-DEFINE_PER_CPU(unsigned int, nop_txn_flags);
|
|
|
|
|
|
+static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
|
|
|
|
|
|
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
|
|
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
|
|
{
|
|
{
|
|
@@ -7750,7 +7750,7 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-struct pmu *perf_init_event(struct perf_event *event)
|
|
|
|
|
|
+static struct pmu *perf_init_event(struct perf_event *event)
|
|
{
|
|
{
|
|
struct pmu *pmu = NULL;
|
|
struct pmu *pmu = NULL;
|
|
int idx;
|
|
int idx;
|