perf/core, perf/x86: Change needlessly global functions and a variable to static

Fixes various sparse warnings.

Signed-off-by: Geliang Tang <geliangtang@163.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/70c14234da1bed6e3e67b9c419e2d5e376ab4f32.1443367286.git.geliangtang@163.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Geliang Tang 2015-09-27 23:25:50 +08:00 коммит произвёл Ingo Molnar
Родитель 6afc0c269c
Коммит 18ab2cd3ee
2 изменённых файлов: 8 добавлений и 8 удалений

Просмотреть файл

@ -157,7 +157,7 @@ struct _cpuid4_info_regs {
struct amd_northbridge *nb; struct amd_northbridge *nb;
}; };
unsigned short num_cache_leaves; static unsigned short num_cache_leaves;
/* AMD doesn't have CPUID4. Emulate it here to report the same /* AMD doesn't have CPUID4. Emulate it here to report the same
information to the user. This makes some assumptions about the machine: information to the user. This makes some assumptions about the machine:
@ -326,7 +326,7 @@ static void amd_calc_l3_indices(struct amd_northbridge *nb)
* *
* @returns: the disabled index if used or negative value if slot free. * @returns: the disabled index if used or negative value if slot free.
*/ */
int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
{ {
unsigned int reg = 0; unsigned int reg = 0;
@ -403,8 +403,8 @@ static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
* *
* @return: 0 on success, error status on failure * @return: 0 on success, error status on failure
*/ */
int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
unsigned long index) unsigned slot, unsigned long index)
{ {
int ret = 0; int ret = 0;

Просмотреть файл

@ -196,7 +196,7 @@ static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
static int perf_sample_allowed_ns __read_mostly = static int perf_sample_allowed_ns __read_mostly =
DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
void update_perf_cpu_limits(void) static void update_perf_cpu_limits(void)
{ {
u64 tmp = perf_sample_period_ns; u64 tmp = perf_sample_period_ns;
@ -472,7 +472,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
* mode SWOUT : schedule out everything * mode SWOUT : schedule out everything
* mode SWIN : schedule in based on cgroup for next * mode SWIN : schedule in based on cgroup for next
*/ */
void perf_cgroup_switch(struct task_struct *task, int mode) static void perf_cgroup_switch(struct task_struct *task, int mode)
{ {
struct perf_cpu_context *cpuctx; struct perf_cpu_context *cpuctx;
struct pmu *pmu; struct pmu *pmu;
@ -7390,7 +7390,7 @@ static int perf_pmu_nop_int(struct pmu *pmu)
return 0; return 0;
} }
DEFINE_PER_CPU(unsigned int, nop_txn_flags); static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
{ {
@ -7750,7 +7750,7 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
return ret; return ret;
} }
struct pmu *perf_init_event(struct perf_event *event) static struct pmu *perf_init_event(struct perf_event *event)
{ {
struct pmu *pmu = NULL; struct pmu *pmu = NULL;
int idx; int idx;