Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile: (21 commits) OProfile: Fix buffer synchronization for IBS oprofile: hotplug cpu fix oprofile: fixing whitespaces in arch/x86/oprofile/* oprofile: fixing whitespaces in arch/x86/oprofile/* oprofile: fixing whitespaces in drivers/oprofile/* x86/oprofile: add the logic for enabling additional IBS bits x86/oprofile: reordering functions in nmi_int.c x86/oprofile: removing unused function parameter in add_ibs_begin() oprofile: more whitespace fixes oprofile: whitespace fixes OProfile: Rename IBS sysfs dir into "ibs_op" OProfile: Rework string handling in setup_ibs_files() OProfile: Rework oprofile_add_ibs_sample() function oprofile: discover counters for op ppro too oprofile: Implement Intel architectural perfmon support oprofile: Don't report Nehalem as core_2 oprofile: drop const in num counters field Revert "Oprofile Multiplexing Patch" x86, oprofile: BUG: using smp_processor_id() in preemptible code x86/oprofile: fix on_each_cpu build error ... Manually fixed trivial conflicts in drivers/oprofile/{cpu_buffer.c,event_buffer.h}
This commit is contained in:
Коммит
92fb83afd6
|
@ -106,7 +106,7 @@ op_axp_stop(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
op_axp_create_files(struct super_block * sb, struct dentry * root)
|
op_axp_create_files(struct super_block *sb, struct dentry *root)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
|
@ -12,11 +12,11 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
|
||||||
extern int perfmon_init(struct oprofile_operations * ops);
|
extern int perfmon_init(struct oprofile_operations *ops);
|
||||||
extern void perfmon_exit(void);
|
extern void perfmon_exit(void);
|
||||||
extern void ia64_backtrace(struct pt_regs * const regs, unsigned int depth);
|
extern void ia64_backtrace(struct pt_regs * const regs, unsigned int depth);
|
||||||
|
|
||||||
int __init oprofile_arch_init(struct oprofile_operations * ops)
|
int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||||
{
|
{
|
||||||
int ret = -ENODEV;
|
int ret = -ENODEV;
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ static pfm_buffer_fmt_t oprofile_fmt = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static char * get_cpu_type(void)
|
static char *get_cpu_type(void)
|
||||||
{
|
{
|
||||||
__u8 family = local_cpu_data->family;
|
__u8 family = local_cpu_data->family;
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ static char * get_cpu_type(void)
|
||||||
|
|
||||||
static int using_perfmon;
|
static int using_perfmon;
|
||||||
|
|
||||||
int perfmon_init(struct oprofile_operations * ops)
|
int perfmon_init(struct oprofile_operations *ops)
|
||||||
{
|
{
|
||||||
int ret = pfm_register_buffer_fmt(&oprofile_fmt);
|
int ret = pfm_register_buffer_fmt(&oprofile_fmt);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
|
||||||
int __init oprofile_arch_init(struct oprofile_operations * ops)
|
int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,7 @@ static int op_mips_setup(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int op_mips_create_files(struct super_block * sb, struct dentry * root)
|
static int op_mips_create_files(struct super_block *sb, struct dentry *root)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ struct op_counter_config {
|
||||||
/* Per-architecture configury and hooks. */
|
/* Per-architecture configury and hooks. */
|
||||||
struct op_mips_model {
|
struct op_mips_model {
|
||||||
void (*reg_setup) (struct op_counter_config *);
|
void (*reg_setup) (struct op_counter_config *);
|
||||||
void (*cpu_setup) (void * dummy);
|
void (*cpu_setup) (void *dummy);
|
||||||
int (*init)(void);
|
int (*init)(void);
|
||||||
void (*exit)(void);
|
void (*exit)(void);
|
||||||
void (*cpu_start)(void *args);
|
void (*cpu_start)(void *args);
|
||||||
|
|
|
@ -80,7 +80,7 @@ static void rm9000_cpu_stop(void *args)
|
||||||
write_c0_perfcontrol(0);
|
write_c0_perfcontrol(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t rm9000_perfcount_handler(int irq, void * dev_id)
|
static irqreturn_t rm9000_perfcount_handler(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
unsigned int control = read_c0_perfcontrol();
|
unsigned int control = read_c0_perfcontrol();
|
||||||
struct pt_regs *regs = get_irq_regs();
|
struct pt_regs *regs = get_irq_regs();
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/oprofile.h>
|
#include <linux/oprofile.h>
|
||||||
|
|
||||||
int __init oprofile_arch_init(struct oprofile_operations * ops)
|
int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
|
@ -404,7 +404,7 @@ set_count_mode(u32 kernel, u32 user)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
|
static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
|
||||||
{
|
{
|
||||||
|
|
||||||
pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
|
pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
|
||||||
int __init oprofile_arch_init(struct oprofile_operations * ops)
|
int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
|
||||||
int __init oprofile_arch_init(struct oprofile_operations * ops)
|
int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,8 +52,7 @@ struct frame_head {
|
||||||
unsigned long ret;
|
unsigned long ret;
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
static struct frame_head *
|
static struct frame_head *dump_user_backtrace(struct frame_head *head)
|
||||||
dump_user_backtrace(struct frame_head * head)
|
|
||||||
{
|
{
|
||||||
struct frame_head bufhead[2];
|
struct frame_head bufhead[2];
|
||||||
|
|
||||||
|
|
|
@ -28,85 +28,9 @@ static struct op_x86_model_spec const *model;
|
||||||
static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
|
static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
|
||||||
static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
|
static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
|
||||||
|
|
||||||
static int nmi_start(void);
|
|
||||||
static void nmi_stop(void);
|
|
||||||
static void nmi_cpu_start(void *dummy);
|
|
||||||
static void nmi_cpu_stop(void *dummy);
|
|
||||||
|
|
||||||
/* 0 == registered but off, 1 == registered and on */
|
/* 0 == registered but off, 1 == registered and on */
|
||||||
static int nmi_enabled = 0;
|
static int nmi_enabled = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
|
|
||||||
void *data)
|
|
||||||
{
|
|
||||||
int cpu = (unsigned long)data;
|
|
||||||
switch (action) {
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
case CPU_ONLINE:
|
|
||||||
smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
|
|
||||||
break;
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block oprofile_cpu_nb = {
|
|
||||||
.notifier_call = oprofile_cpu_notifier
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
|
||||||
|
|
||||||
static int nmi_suspend(struct sys_device *dev, pm_message_t state)
|
|
||||||
{
|
|
||||||
/* Only one CPU left, just stop that one */
|
|
||||||
if (nmi_enabled == 1)
|
|
||||||
nmi_cpu_stop(NULL);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nmi_resume(struct sys_device *dev)
|
|
||||||
{
|
|
||||||
if (nmi_enabled == 1)
|
|
||||||
nmi_cpu_start(NULL);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sysdev_class oprofile_sysclass = {
|
|
||||||
.name = "oprofile",
|
|
||||||
.resume = nmi_resume,
|
|
||||||
.suspend = nmi_suspend,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct sys_device device_oprofile = {
|
|
||||||
.id = 0,
|
|
||||||
.cls = &oprofile_sysclass,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init init_sysfs(void)
|
|
||||||
{
|
|
||||||
int error;
|
|
||||||
|
|
||||||
error = sysdev_class_register(&oprofile_sysclass);
|
|
||||||
if (!error)
|
|
||||||
error = sysdev_register(&device_oprofile);
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void exit_sysfs(void)
|
|
||||||
{
|
|
||||||
sysdev_unregister(&device_oprofile);
|
|
||||||
sysdev_class_unregister(&oprofile_sysclass);
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
#define init_sysfs() do { } while (0)
|
|
||||||
#define exit_sysfs() do { } while (0)
|
|
||||||
#endif /* CONFIG_PM */
|
|
||||||
|
|
||||||
static int profile_exceptions_notify(struct notifier_block *self,
|
static int profile_exceptions_notify(struct notifier_block *self,
|
||||||
unsigned long val, void *data)
|
unsigned long val, void *data)
|
||||||
{
|
{
|
||||||
|
@ -361,6 +285,77 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
int cpu = (unsigned long)data;
|
||||||
|
switch (action) {
|
||||||
|
case CPU_DOWN_FAILED:
|
||||||
|
case CPU_ONLINE:
|
||||||
|
smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
|
||||||
|
break;
|
||||||
|
case CPU_DOWN_PREPARE:
|
||||||
|
smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block oprofile_cpu_nb = {
|
||||||
|
.notifier_call = oprofile_cpu_notifier
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_PM
|
||||||
|
|
||||||
|
static int nmi_suspend(struct sys_device *dev, pm_message_t state)
|
||||||
|
{
|
||||||
|
/* Only one CPU left, just stop that one */
|
||||||
|
if (nmi_enabled == 1)
|
||||||
|
nmi_cpu_stop(NULL);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nmi_resume(struct sys_device *dev)
|
||||||
|
{
|
||||||
|
if (nmi_enabled == 1)
|
||||||
|
nmi_cpu_start(NULL);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct sysdev_class oprofile_sysclass = {
|
||||||
|
.name = "oprofile",
|
||||||
|
.resume = nmi_resume,
|
||||||
|
.suspend = nmi_suspend,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct sys_device device_oprofile = {
|
||||||
|
.id = 0,
|
||||||
|
.cls = &oprofile_sysclass,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init init_sysfs(void)
|
||||||
|
{
|
||||||
|
int error;
|
||||||
|
|
||||||
|
error = sysdev_class_register(&oprofile_sysclass);
|
||||||
|
if (!error)
|
||||||
|
error = sysdev_register(&device_oprofile);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void exit_sysfs(void)
|
||||||
|
{
|
||||||
|
sysdev_unregister(&device_oprofile);
|
||||||
|
sysdev_class_unregister(&oprofile_sysclass);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
#define init_sysfs() do { } while (0)
|
||||||
|
#define exit_sysfs() do { } while (0)
|
||||||
|
#endif /* CONFIG_PM */
|
||||||
|
|
||||||
static int p4force;
|
static int p4force;
|
||||||
module_param(p4force, int, 0);
|
module_param(p4force, int, 0);
|
||||||
|
|
||||||
|
@ -420,9 +415,6 @@ static int __init ppro_init(char **cpu_type)
|
||||||
case 15: case 23:
|
case 15: case 23:
|
||||||
*cpu_type = "i386/core_2";
|
*cpu_type = "i386/core_2";
|
||||||
break;
|
break;
|
||||||
case 26:
|
|
||||||
*cpu_type = "i386/core_2";
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
/* Unknown */
|
/* Unknown */
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -432,6 +424,16 @@ static int __init ppro_init(char **cpu_type)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init arch_perfmon_init(char **cpu_type)
|
||||||
|
{
|
||||||
|
if (!cpu_has_arch_perfmon)
|
||||||
|
return 0;
|
||||||
|
*cpu_type = "i386/arch_perfmon";
|
||||||
|
model = &op_arch_perfmon_spec;
|
||||||
|
arch_perfmon_setup_counters();
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* in order to get sysfs right */
|
/* in order to get sysfs right */
|
||||||
static int using_nmi;
|
static int using_nmi;
|
||||||
|
|
||||||
|
@ -439,7 +441,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
|
||||||
{
|
{
|
||||||
__u8 vendor = boot_cpu_data.x86_vendor;
|
__u8 vendor = boot_cpu_data.x86_vendor;
|
||||||
__u8 family = boot_cpu_data.x86;
|
__u8 family = boot_cpu_data.x86;
|
||||||
char *cpu_type;
|
char *cpu_type = NULL;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!cpu_has_apic)
|
if (!cpu_has_apic)
|
||||||
|
@ -477,19 +479,20 @@ int __init op_nmi_init(struct oprofile_operations *ops)
|
||||||
switch (family) {
|
switch (family) {
|
||||||
/* Pentium IV */
|
/* Pentium IV */
|
||||||
case 0xf:
|
case 0xf:
|
||||||
if (!p4_init(&cpu_type))
|
p4_init(&cpu_type);
|
||||||
return -ENODEV;
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* A P6-class processor */
|
/* A P6-class processor */
|
||||||
case 6:
|
case 6:
|
||||||
if (!ppro_init(&cpu_type))
|
ppro_init(&cpu_type);
|
||||||
return -ENODEV;
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return -ENODEV;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!cpu_type && !arch_perfmon_init(&cpu_type))
|
||||||
|
return -ENODEV;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -67,8 +67,9 @@ static unsigned long reset_value[NUM_COUNTERS];
|
||||||
|
|
||||||
/* The function interface needs to be fixed, something like add
|
/* The function interface needs to be fixed, something like add
|
||||||
data. Should then be added to linux/oprofile.h. */
|
data. Should then be added to linux/oprofile.h. */
|
||||||
extern void oprofile_add_ibs_sample(struct pt_regs *const regs,
|
extern void
|
||||||
unsigned int * const ibs_sample, u8 code);
|
oprofile_add_ibs_sample(struct pt_regs *const regs,
|
||||||
|
unsigned int *const ibs_sample, int ibs_code);
|
||||||
|
|
||||||
struct ibs_fetch_sample {
|
struct ibs_fetch_sample {
|
||||||
/* MSRC001_1031 IBS Fetch Linear Address Register */
|
/* MSRC001_1031 IBS Fetch Linear Address Register */
|
||||||
|
@ -309,12 +310,15 @@ static void op_amd_start(struct op_msrs const * const msrs)
|
||||||
#ifdef CONFIG_OPROFILE_IBS
|
#ifdef CONFIG_OPROFILE_IBS
|
||||||
if (ibs_allowed && ibs_config.fetch_enabled) {
|
if (ibs_allowed && ibs_config.fetch_enabled) {
|
||||||
low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
|
low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
|
||||||
high = IBS_FETCH_HIGH_ENABLE;
|
high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
|
||||||
|
+ IBS_FETCH_HIGH_ENABLE;
|
||||||
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
|
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ibs_allowed && ibs_config.op_enabled) {
|
if (ibs_allowed && ibs_config.op_enabled) {
|
||||||
low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) + IBS_OP_LOW_ENABLE;
|
low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
|
||||||
|
+ ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
|
||||||
|
+ IBS_OP_LOW_ENABLE;
|
||||||
high = 0;
|
high = 0;
|
||||||
wrmsr(MSR_AMD64_IBSOPCTL, low, high);
|
wrmsr(MSR_AMD64_IBSOPCTL, low, high);
|
||||||
}
|
}
|
||||||
|
@ -468,11 +472,10 @@ static void clear_ibs_nmi(void)
|
||||||
on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
|
on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int (*create_arch_files)(struct super_block * sb, struct dentry * root);
|
static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
|
||||||
|
|
||||||
static int setup_ibs_files(struct super_block * sb, struct dentry * root)
|
static int setup_ibs_files(struct super_block *sb, struct dentry *root)
|
||||||
{
|
{
|
||||||
char buf[12];
|
|
||||||
struct dentry *dir;
|
struct dentry *dir;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
@ -494,16 +497,16 @@ static int setup_ibs_files(struct super_block * sb, struct dentry * root)
|
||||||
ibs_config.max_cnt_op = 250000;
|
ibs_config.max_cnt_op = 250000;
|
||||||
ibs_config.op_enabled = 0;
|
ibs_config.op_enabled = 0;
|
||||||
ibs_config.dispatched_ops = 1;
|
ibs_config.dispatched_ops = 1;
|
||||||
snprintf(buf, sizeof(buf), "ibs_fetch");
|
|
||||||
dir = oprofilefs_mkdir(sb, root, buf);
|
dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
|
||||||
oprofilefs_create_ulong(sb, dir, "rand_enable",
|
|
||||||
&ibs_config.rand_en);
|
|
||||||
oprofilefs_create_ulong(sb, dir, "enable",
|
oprofilefs_create_ulong(sb, dir, "enable",
|
||||||
&ibs_config.fetch_enabled);
|
&ibs_config.fetch_enabled);
|
||||||
oprofilefs_create_ulong(sb, dir, "max_count",
|
oprofilefs_create_ulong(sb, dir, "max_count",
|
||||||
&ibs_config.max_cnt_fetch);
|
&ibs_config.max_cnt_fetch);
|
||||||
snprintf(buf, sizeof(buf), "ibs_uops");
|
oprofilefs_create_ulong(sb, dir, "rand_enable",
|
||||||
dir = oprofilefs_mkdir(sb, root, buf);
|
&ibs_config.rand_en);
|
||||||
|
|
||||||
|
dir = oprofilefs_mkdir(sb, root, "ibs_op");
|
||||||
oprofilefs_create_ulong(sb, dir, "enable",
|
oprofilefs_create_ulong(sb, dir, "enable",
|
||||||
&ibs_config.op_enabled);
|
&ibs_config.op_enabled);
|
||||||
oprofilefs_create_ulong(sb, dir, "max_count",
|
oprofilefs_create_ulong(sb, dir, "max_count",
|
||||||
|
|
|
@ -1,32 +1,34 @@
|
||||||
/*
|
/*
|
||||||
* @file op_model_ppro.h
|
* @file op_model_ppro.h
|
||||||
* pentium pro / P6 model-specific MSR operations
|
* Family 6 perfmon and architectural perfmon MSR operations
|
||||||
*
|
*
|
||||||
* @remark Copyright 2002 OProfile authors
|
* @remark Copyright 2002 OProfile authors
|
||||||
|
* @remark Copyright 2008 Intel Corporation
|
||||||
* @remark Read the file COPYING
|
* @remark Read the file COPYING
|
||||||
*
|
*
|
||||||
* @author John Levon
|
* @author John Levon
|
||||||
* @author Philippe Elie
|
* @author Philippe Elie
|
||||||
* @author Graydon Hoare
|
* @author Graydon Hoare
|
||||||
|
* @author Andi Kleen
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/oprofile.h>
|
#include <linux/oprofile.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/nmi.h>
|
#include <asm/nmi.h>
|
||||||
|
#include <asm/intel_arch_perfmon.h>
|
||||||
|
|
||||||
#include "op_x86_model.h"
|
#include "op_x86_model.h"
|
||||||
#include "op_counter.h"
|
#include "op_counter.h"
|
||||||
|
|
||||||
#define NUM_COUNTERS 2
|
static int num_counters = 2;
|
||||||
#define NUM_CONTROLS 2
|
static int counter_width = 32;
|
||||||
|
|
||||||
#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
|
#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
|
||||||
#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
|
#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
|
||||||
#define CTR_32BIT_WRITE(l, msrs, c) \
|
#define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1))))
|
||||||
do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0); } while (0)
|
|
||||||
#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
|
|
||||||
|
|
||||||
#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
|
#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
|
||||||
#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
|
#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
|
||||||
|
@ -40,20 +42,20 @@
|
||||||
#define CTRL_SET_UM(val, m) (val |= (m << 8))
|
#define CTRL_SET_UM(val, m) (val |= (m << 8))
|
||||||
#define CTRL_SET_EVENT(val, e) (val |= e)
|
#define CTRL_SET_EVENT(val, e) (val |= e)
|
||||||
|
|
||||||
static unsigned long reset_value[NUM_COUNTERS];
|
static u64 *reset_value;
|
||||||
|
|
||||||
static void ppro_fill_in_addresses(struct op_msrs * const msrs)
|
static void ppro_fill_in_addresses(struct op_msrs * const msrs)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NUM_COUNTERS; i++) {
|
for (i = 0; i < num_counters; i++) {
|
||||||
if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
|
if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
|
||||||
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
|
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
|
||||||
else
|
else
|
||||||
msrs->counters[i].addr = 0;
|
msrs->counters[i].addr = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < NUM_CONTROLS; i++) {
|
for (i = 0; i < num_counters; i++) {
|
||||||
if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
|
if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
|
||||||
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
|
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
|
||||||
else
|
else
|
||||||
|
@ -67,8 +69,22 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
|
||||||
unsigned int low, high;
|
unsigned int low, high;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (!reset_value) {
|
||||||
|
reset_value = kmalloc(sizeof(unsigned) * num_counters,
|
||||||
|
GFP_ATOMIC);
|
||||||
|
if (!reset_value)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu_has_arch_perfmon) {
|
||||||
|
union cpuid10_eax eax;
|
||||||
|
eax.full = cpuid_eax(0xa);
|
||||||
|
if (counter_width < eax.split.bit_width)
|
||||||
|
counter_width = eax.split.bit_width;
|
||||||
|
}
|
||||||
|
|
||||||
/* clear all counters */
|
/* clear all counters */
|
||||||
for (i = 0 ; i < NUM_CONTROLS; ++i) {
|
for (i = 0 ; i < num_counters; ++i) {
|
||||||
if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
|
if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
|
||||||
continue;
|
continue;
|
||||||
CTRL_READ(low, high, msrs, i);
|
CTRL_READ(low, high, msrs, i);
|
||||||
|
@ -77,18 +93,18 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* avoid a false detection of ctr overflows in NMI handler */
|
/* avoid a false detection of ctr overflows in NMI handler */
|
||||||
for (i = 0; i < NUM_COUNTERS; ++i) {
|
for (i = 0; i < num_counters; ++i) {
|
||||||
if (unlikely(!CTR_IS_RESERVED(msrs, i)))
|
if (unlikely(!CTR_IS_RESERVED(msrs, i)))
|
||||||
continue;
|
continue;
|
||||||
CTR_32BIT_WRITE(1, msrs, i);
|
wrmsrl(msrs->counters[i].addr, -1LL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* enable active counters */
|
/* enable active counters */
|
||||||
for (i = 0; i < NUM_COUNTERS; ++i) {
|
for (i = 0; i < num_counters; ++i) {
|
||||||
if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
|
if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
|
||||||
reset_value[i] = counter_config[i].count;
|
reset_value[i] = counter_config[i].count;
|
||||||
|
|
||||||
CTR_32BIT_WRITE(counter_config[i].count, msrs, i);
|
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
|
||||||
|
|
||||||
CTRL_READ(low, high, msrs, i);
|
CTRL_READ(low, high, msrs, i);
|
||||||
CTRL_CLEAR(low);
|
CTRL_CLEAR(low);
|
||||||
|
@ -111,13 +127,13 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
|
||||||
unsigned int low, high;
|
unsigned int low, high;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0 ; i < NUM_COUNTERS; ++i) {
|
for (i = 0 ; i < num_counters; ++i) {
|
||||||
if (!reset_value[i])
|
if (!reset_value[i])
|
||||||
continue;
|
continue;
|
||||||
CTR_READ(low, high, msrs, i);
|
CTR_READ(low, high, msrs, i);
|
||||||
if (CTR_OVERFLOWED(low)) {
|
if (CTR_OVERFLOWED(low)) {
|
||||||
oprofile_add_sample(regs, i);
|
oprofile_add_sample(regs, i);
|
||||||
CTR_32BIT_WRITE(reset_value[i], msrs, i);
|
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +157,7 @@ static void ppro_start(struct op_msrs const * const msrs)
|
||||||
unsigned int low, high;
|
unsigned int low, high;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NUM_COUNTERS; ++i) {
|
for (i = 0; i < num_counters; ++i) {
|
||||||
if (reset_value[i]) {
|
if (reset_value[i]) {
|
||||||
CTRL_READ(low, high, msrs, i);
|
CTRL_READ(low, high, msrs, i);
|
||||||
CTRL_SET_ACTIVE(low);
|
CTRL_SET_ACTIVE(low);
|
||||||
|
@ -156,7 +172,7 @@ static void ppro_stop(struct op_msrs const * const msrs)
|
||||||
unsigned int low, high;
|
unsigned int low, high;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NUM_COUNTERS; ++i) {
|
for (i = 0; i < num_counters; ++i) {
|
||||||
if (!reset_value[i])
|
if (!reset_value[i])
|
||||||
continue;
|
continue;
|
||||||
CTRL_READ(low, high, msrs, i);
|
CTRL_READ(low, high, msrs, i);
|
||||||
|
@ -169,20 +185,24 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0 ; i < NUM_COUNTERS ; ++i) {
|
for (i = 0 ; i < num_counters ; ++i) {
|
||||||
if (CTR_IS_RESERVED(msrs, i))
|
if (CTR_IS_RESERVED(msrs, i))
|
||||||
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
|
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
|
||||||
}
|
}
|
||||||
for (i = 0 ; i < NUM_CONTROLS ; ++i) {
|
for (i = 0 ; i < num_counters ; ++i) {
|
||||||
if (CTRL_IS_RESERVED(msrs, i))
|
if (CTRL_IS_RESERVED(msrs, i))
|
||||||
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
|
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
|
||||||
}
|
}
|
||||||
|
if (reset_value) {
|
||||||
|
kfree(reset_value);
|
||||||
|
reset_value = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct op_x86_model_spec const op_ppro_spec = {
|
struct op_x86_model_spec op_ppro_spec = {
|
||||||
.num_counters = NUM_COUNTERS,
|
.num_counters = 2, /* can be overriden */
|
||||||
.num_controls = NUM_CONTROLS,
|
.num_controls = 2, /* dito */
|
||||||
.fill_in_addresses = &ppro_fill_in_addresses,
|
.fill_in_addresses = &ppro_fill_in_addresses,
|
||||||
.setup_ctrs = &ppro_setup_ctrs,
|
.setup_ctrs = &ppro_setup_ctrs,
|
||||||
.check_ctrs = &ppro_check_ctrs,
|
.check_ctrs = &ppro_check_ctrs,
|
||||||
|
@ -190,3 +210,45 @@ struct op_x86_model_spec const op_ppro_spec = {
|
||||||
.stop = &ppro_stop,
|
.stop = &ppro_stop,
|
||||||
.shutdown = &ppro_shutdown
|
.shutdown = &ppro_shutdown
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Architectural performance monitoring.
|
||||||
|
*
|
||||||
|
* Newer Intel CPUs (Core1+) have support for architectural
|
||||||
|
* events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
|
||||||
|
* The advantage of this is that it can be done without knowing about
|
||||||
|
* the specific CPU.
|
||||||
|
*/
|
||||||
|
|
||||||
|
void arch_perfmon_setup_counters(void)
|
||||||
|
{
|
||||||
|
union cpuid10_eax eax;
|
||||||
|
|
||||||
|
eax.full = cpuid_eax(0xa);
|
||||||
|
|
||||||
|
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
|
||||||
|
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
|
||||||
|
current_cpu_data.x86_model == 15) {
|
||||||
|
eax.split.version_id = 2;
|
||||||
|
eax.split.num_counters = 2;
|
||||||
|
eax.split.bit_width = 40;
|
||||||
|
}
|
||||||
|
|
||||||
|
num_counters = eax.split.num_counters;
|
||||||
|
|
||||||
|
op_arch_perfmon_spec.num_counters = num_counters;
|
||||||
|
op_arch_perfmon_spec.num_controls = num_counters;
|
||||||
|
op_ppro_spec.num_counters = num_counters;
|
||||||
|
op_ppro_spec.num_controls = num_counters;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct op_x86_model_spec op_arch_perfmon_spec = {
|
||||||
|
/* num_counters/num_controls filled in at runtime */
|
||||||
|
.fill_in_addresses = &ppro_fill_in_addresses,
|
||||||
|
/* user space does the cpuid check for available events */
|
||||||
|
.setup_ctrs = &ppro_setup_ctrs,
|
||||||
|
.check_ctrs = &ppro_check_ctrs,
|
||||||
|
.start = &ppro_start,
|
||||||
|
.stop = &ppro_stop,
|
||||||
|
.shutdown = &ppro_shutdown
|
||||||
|
};
|
||||||
|
|
|
@ -22,8 +22,8 @@ struct op_msr {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct op_msrs {
|
struct op_msrs {
|
||||||
struct op_msr * counters;
|
struct op_msr *counters;
|
||||||
struct op_msr * controls;
|
struct op_msr *controls;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pt_regs;
|
struct pt_regs;
|
||||||
|
@ -34,8 +34,8 @@ struct pt_regs;
|
||||||
struct op_x86_model_spec {
|
struct op_x86_model_spec {
|
||||||
int (*init)(struct oprofile_operations *ops);
|
int (*init)(struct oprofile_operations *ops);
|
||||||
void (*exit)(void);
|
void (*exit)(void);
|
||||||
unsigned int const num_counters;
|
unsigned int num_counters;
|
||||||
unsigned int const num_controls;
|
unsigned int num_controls;
|
||||||
void (*fill_in_addresses)(struct op_msrs * const msrs);
|
void (*fill_in_addresses)(struct op_msrs * const msrs);
|
||||||
void (*setup_ctrs)(struct op_msrs const * const msrs);
|
void (*setup_ctrs)(struct op_msrs const * const msrs);
|
||||||
int (*check_ctrs)(struct pt_regs * const regs,
|
int (*check_ctrs)(struct pt_regs * const regs,
|
||||||
|
@ -45,9 +45,12 @@ struct op_x86_model_spec {
|
||||||
void (*shutdown)(struct op_msrs const * const msrs);
|
void (*shutdown)(struct op_msrs const * const msrs);
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct op_x86_model_spec const op_ppro_spec;
|
extern struct op_x86_model_spec op_ppro_spec;
|
||||||
extern struct op_x86_model_spec const op_p4_spec;
|
extern struct op_x86_model_spec const op_p4_spec;
|
||||||
extern struct op_x86_model_spec const op_p4_ht2_spec;
|
extern struct op_x86_model_spec const op_p4_ht2_spec;
|
||||||
extern struct op_x86_model_spec const op_amd_spec;
|
extern struct op_x86_model_spec const op_amd_spec;
|
||||||
|
extern struct op_x86_model_spec op_arch_perfmon_spec;
|
||||||
|
|
||||||
|
extern void arch_perfmon_setup_counters(void);
|
||||||
|
|
||||||
#endif /* OP_X86_MODEL_H */
|
#endif /* OP_X86_MODEL_H */
|
||||||
|
|
|
@ -41,7 +41,6 @@ static cpumask_t marked_cpus = CPU_MASK_NONE;
|
||||||
static DEFINE_SPINLOCK(task_mortuary);
|
static DEFINE_SPINLOCK(task_mortuary);
|
||||||
static void process_task_mortuary(void);
|
static void process_task_mortuary(void);
|
||||||
|
|
||||||
|
|
||||||
/* Take ownership of the task struct and place it on the
|
/* Take ownership of the task struct and place it on the
|
||||||
* list for processing. Only after two full buffer syncs
|
* list for processing. Only after two full buffer syncs
|
||||||
* does the task eventually get freed, because by then
|
* does the task eventually get freed, because by then
|
||||||
|
@ -341,7 +340,7 @@ static void add_trace_begin(void)
|
||||||
* Add IBS fetch and op entries to event buffer
|
* Add IBS fetch and op entries to event buffer
|
||||||
*/
|
*/
|
||||||
static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
|
static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
|
||||||
int in_kernel, struct mm_struct *mm)
|
struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
unsigned long rip;
|
unsigned long rip;
|
||||||
int i, count;
|
int i, count;
|
||||||
|
@ -565,9 +564,11 @@ void sync_buffer(int cpu)
|
||||||
struct task_struct *new;
|
struct task_struct *new;
|
||||||
unsigned long cookie = 0;
|
unsigned long cookie = 0;
|
||||||
int in_kernel = 1;
|
int in_kernel = 1;
|
||||||
unsigned int i;
|
|
||||||
sync_buffer_state state = sb_buffer_start;
|
sync_buffer_state state = sb_buffer_start;
|
||||||
|
#ifndef CONFIG_OPROFILE_IBS
|
||||||
|
unsigned int i;
|
||||||
unsigned long available;
|
unsigned long available;
|
||||||
|
#endif
|
||||||
|
|
||||||
mutex_lock(&buffer_mutex);
|
mutex_lock(&buffer_mutex);
|
||||||
|
|
||||||
|
@ -575,9 +576,13 @@ void sync_buffer(int cpu)
|
||||||
|
|
||||||
/* Remember, only we can modify tail_pos */
|
/* Remember, only we can modify tail_pos */
|
||||||
|
|
||||||
|
#ifndef CONFIG_OPROFILE_IBS
|
||||||
available = get_slots(cpu_buf);
|
available = get_slots(cpu_buf);
|
||||||
|
|
||||||
for (i = 0; i < available; ++i) {
|
for (i = 0; i < available; ++i) {
|
||||||
|
#else
|
||||||
|
while (get_slots(cpu_buf)) {
|
||||||
|
#endif
|
||||||
struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
|
struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
|
||||||
|
|
||||||
if (is_code(s->eip)) {
|
if (is_code(s->eip)) {
|
||||||
|
@ -593,12 +598,10 @@ void sync_buffer(int cpu)
|
||||||
#ifdef CONFIG_OPROFILE_IBS
|
#ifdef CONFIG_OPROFILE_IBS
|
||||||
} else if (s->event == IBS_FETCH_BEGIN) {
|
} else if (s->event == IBS_FETCH_BEGIN) {
|
||||||
state = sb_bt_start;
|
state = sb_bt_start;
|
||||||
add_ibs_begin(cpu_buf,
|
add_ibs_begin(cpu_buf, IBS_FETCH_CODE, mm);
|
||||||
IBS_FETCH_CODE, in_kernel, mm);
|
|
||||||
} else if (s->event == IBS_OP_BEGIN) {
|
} else if (s->event == IBS_OP_BEGIN) {
|
||||||
state = sb_bt_start;
|
state = sb_bt_start;
|
||||||
add_ibs_begin(cpu_buf,
|
add_ibs_begin(cpu_buf, IBS_OP_CODE, mm);
|
||||||
IBS_OP_CODE, in_kernel, mm);
|
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
struct mm_struct *oldmm = mm;
|
struct mm_struct *oldmm = mm;
|
||||||
|
|
|
@ -39,7 +39,7 @@ void free_cpu_buffers(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_online_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
vfree(per_cpu(cpu_buffer, i).buffer);
|
vfree(per_cpu(cpu_buffer, i).buffer);
|
||||||
per_cpu(cpu_buffer, i).buffer = NULL;
|
per_cpu(cpu_buffer, i).buffer = NULL;
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ int alloc_cpu_buffers(void)
|
||||||
|
|
||||||
unsigned long buffer_size = fs_cpu_buffer_size;
|
unsigned long buffer_size = fs_cpu_buffer_size;
|
||||||
|
|
||||||
for_each_online_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
|
struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
|
||||||
|
|
||||||
b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
|
b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
|
||||||
|
@ -125,7 +125,7 @@ void end_cpu_work(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Resets the cpu buffer to a sane state. */
|
/* Resets the cpu buffer to a sane state. */
|
||||||
void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
|
void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf)
|
||||||
{
|
{
|
||||||
/* reset these to invalid values; the next sample
|
/* reset these to invalid values; the next sample
|
||||||
* collected will populate the buffer with proper
|
* collected will populate the buffer with proper
|
||||||
|
@ -136,7 +136,7 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* compute number of available slots in cpu_buffer queue */
|
/* compute number of available slots in cpu_buffer queue */
|
||||||
static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
|
static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
|
||||||
{
|
{
|
||||||
unsigned long head = b->head_pos;
|
unsigned long head = b->head_pos;
|
||||||
unsigned long tail = b->tail_pos;
|
unsigned long tail = b->tail_pos;
|
||||||
|
@ -147,7 +147,7 @@ static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
|
||||||
return tail + (b->buffer_size - head) - 1;
|
return tail + (b->buffer_size - head) - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void increment_head(struct oprofile_cpu_buffer * b)
|
static void increment_head(struct oprofile_cpu_buffer *b)
|
||||||
{
|
{
|
||||||
unsigned long new_head = b->head_pos + 1;
|
unsigned long new_head = b->head_pos + 1;
|
||||||
|
|
||||||
|
@ -162,17 +162,17 @@ static void increment_head(struct oprofile_cpu_buffer * b)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
add_sample(struct oprofile_cpu_buffer * cpu_buf,
|
add_sample(struct oprofile_cpu_buffer *cpu_buf,
|
||||||
unsigned long pc, unsigned long event)
|
unsigned long pc, unsigned long event)
|
||||||
{
|
{
|
||||||
struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
|
struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos];
|
||||||
entry->eip = pc;
|
entry->eip = pc;
|
||||||
entry->event = event;
|
entry->event = event;
|
||||||
increment_head(cpu_buf);
|
increment_head(cpu_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
|
add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
|
||||||
{
|
{
|
||||||
add_sample(buffer, ESCAPE_CODE, value);
|
add_sample(buffer, ESCAPE_CODE, value);
|
||||||
}
|
}
|
||||||
|
@ -186,10 +186,10 @@ add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
|
||||||
* pc. We tag this in the buffer by generating kernel enter/exit
|
* pc. We tag this in the buffer by generating kernel enter/exit
|
||||||
* events whenever is_kernel changes
|
* events whenever is_kernel changes
|
||||||
*/
|
*/
|
||||||
static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
|
static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
|
||||||
int is_kernel, unsigned long event)
|
int is_kernel, unsigned long event)
|
||||||
{
|
{
|
||||||
struct task_struct * task;
|
struct task_struct *task;
|
||||||
|
|
||||||
cpu_buf->sample_received++;
|
cpu_buf->sample_received++;
|
||||||
|
|
||||||
|
@ -235,7 +235,7 @@ static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
|
static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
|
||||||
{
|
{
|
||||||
cpu_buf->tracing = 0;
|
cpu_buf->tracing = 0;
|
||||||
}
|
}
|
||||||
|
@ -271,20 +271,22 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
|
||||||
#ifdef CONFIG_OPROFILE_IBS
|
#ifdef CONFIG_OPROFILE_IBS
|
||||||
|
|
||||||
#define MAX_IBS_SAMPLE_SIZE 14
|
#define MAX_IBS_SAMPLE_SIZE 14
|
||||||
static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
|
|
||||||
unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code)
|
void oprofile_add_ibs_sample(struct pt_regs *const regs,
|
||||||
|
unsigned int *const ibs_sample, int ibs_code)
|
||||||
{
|
{
|
||||||
|
int is_kernel = !user_mode(regs);
|
||||||
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
|
|
||||||
cpu_buf->sample_received++;
|
cpu_buf->sample_received++;
|
||||||
|
|
||||||
if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
|
if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
|
||||||
|
/* we can't backtrace since we lost the source of this event */
|
||||||
cpu_buf->sample_lost_overflow++;
|
cpu_buf->sample_lost_overflow++;
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
is_kernel = !!is_kernel;
|
|
||||||
|
|
||||||
/* notice a switch from user->kernel or vice versa */
|
/* notice a switch from user->kernel or vice versa */
|
||||||
if (cpu_buf->last_is_kernel != is_kernel) {
|
if (cpu_buf->last_is_kernel != is_kernel) {
|
||||||
cpu_buf->last_is_kernel = is_kernel;
|
cpu_buf->last_is_kernel = is_kernel;
|
||||||
|
@ -294,7 +296,6 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
|
||||||
/* notice a task switch */
|
/* notice a task switch */
|
||||||
if (!is_kernel) {
|
if (!is_kernel) {
|
||||||
task = current;
|
task = current;
|
||||||
|
|
||||||
if (cpu_buf->last_task != task) {
|
if (cpu_buf->last_task != task) {
|
||||||
cpu_buf->last_task = task;
|
cpu_buf->last_task = task;
|
||||||
add_code(cpu_buf, (unsigned long)task);
|
add_code(cpu_buf, (unsigned long)task);
|
||||||
|
@ -302,36 +303,17 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
|
||||||
}
|
}
|
||||||
|
|
||||||
add_code(cpu_buf, ibs_code);
|
add_code(cpu_buf, ibs_code);
|
||||||
add_sample(cpu_buf, ibs[0], ibs[1]);
|
add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
|
||||||
add_sample(cpu_buf, ibs[2], ibs[3]);
|
add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
|
||||||
add_sample(cpu_buf, ibs[4], ibs[5]);
|
add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
|
||||||
|
|
||||||
if (ibs_code == IBS_OP_BEGIN) {
|
if (ibs_code == IBS_OP_BEGIN) {
|
||||||
add_sample(cpu_buf, ibs[6], ibs[7]);
|
add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
|
||||||
add_sample(cpu_buf, ibs[8], ibs[9]);
|
add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
|
||||||
add_sample(cpu_buf, ibs[10], ibs[11]);
|
add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
if (backtrace_depth)
|
||||||
}
|
|
||||||
|
|
||||||
void oprofile_add_ibs_sample(struct pt_regs *const regs,
|
|
||||||
unsigned int * const ibs_sample, u8 code)
|
|
||||||
{
|
|
||||||
int is_kernel = !user_mode(regs);
|
|
||||||
unsigned long pc = profile_pc(regs);
|
|
||||||
|
|
||||||
struct oprofile_cpu_buffer *cpu_buf =
|
|
||||||
&per_cpu(cpu_buffer, smp_processor_id());
|
|
||||||
|
|
||||||
if (!backtrace_depth) {
|
|
||||||
log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if log_sample() fails we can't backtrace since we lost the source
|
|
||||||
* of this event */
|
|
||||||
if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
|
|
||||||
oprofile_ops.backtrace(regs, backtrace_depth);
|
oprofile_ops.backtrace(regs, backtrace_depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,11 +358,16 @@ void oprofile_add_trace(unsigned long pc)
|
||||||
*/
|
*/
|
||||||
static void wq_sync_buffer(struct work_struct *work)
|
static void wq_sync_buffer(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer * b =
|
struct oprofile_cpu_buffer *b =
|
||||||
container_of(work, struct oprofile_cpu_buffer, work.work);
|
container_of(work, struct oprofile_cpu_buffer, work.work);
|
||||||
if (b->cpu != smp_processor_id()) {
|
if (b->cpu != smp_processor_id()) {
|
||||||
printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
|
printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
|
||||||
smp_processor_id(), b->cpu);
|
smp_processor_id(), b->cpu);
|
||||||
|
|
||||||
|
if (!cpu_online(b->cpu)) {
|
||||||
|
cancel_delayed_work(&b->work);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
sync_buffer(b->cpu);
|
sync_buffer(b->cpu);
|
||||||
|
|
||||||
|
|
|
@ -36,10 +36,10 @@ struct oprofile_cpu_buffer {
|
||||||
volatile unsigned long head_pos;
|
volatile unsigned long head_pos;
|
||||||
volatile unsigned long tail_pos;
|
volatile unsigned long tail_pos;
|
||||||
unsigned long buffer_size;
|
unsigned long buffer_size;
|
||||||
struct task_struct * last_task;
|
struct task_struct *last_task;
|
||||||
int last_is_kernel;
|
int last_is_kernel;
|
||||||
int tracing;
|
int tracing;
|
||||||
struct op_sample * buffer;
|
struct op_sample *buffer;
|
||||||
unsigned long sample_received;
|
unsigned long sample_received;
|
||||||
unsigned long sample_lost_overflow;
|
unsigned long sample_lost_overflow;
|
||||||
unsigned long backtrace_aborted;
|
unsigned long backtrace_aborted;
|
||||||
|
@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
|
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
|
||||||
|
|
||||||
void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
|
void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf);
|
||||||
|
|
||||||
/* transient events for the CPU buffer -> event buffer */
|
/* transient events for the CPU buffer -> event buffer */
|
||||||
#define CPU_IS_KERNEL 1
|
#define CPU_IS_KERNEL 1
|
||||||
|
|
|
@ -28,7 +28,7 @@ DEFINE_MUTEX(buffer_mutex);
|
||||||
|
|
||||||
static unsigned long buffer_opened;
|
static unsigned long buffer_opened;
|
||||||
static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
|
static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
|
||||||
static unsigned long * event_buffer;
|
static unsigned long *event_buffer;
|
||||||
static unsigned long buffer_size;
|
static unsigned long buffer_size;
|
||||||
static unsigned long buffer_watershed;
|
static unsigned long buffer_watershed;
|
||||||
static size_t buffer_pos;
|
static size_t buffer_pos;
|
||||||
|
@ -98,7 +98,7 @@ void free_event_buffer(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int event_buffer_open(struct inode * inode, struct file * file)
|
static int event_buffer_open(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
int err = -EPERM;
|
int err = -EPERM;
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int event_buffer_release(struct inode * inode, struct file * file)
|
static int event_buffer_release(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
oprofile_stop();
|
oprofile_stop();
|
||||||
oprofile_shutdown();
|
oprofile_shutdown();
|
||||||
|
@ -146,8 +146,8 @@ static int event_buffer_release(struct inode * inode, struct file * file)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static ssize_t event_buffer_read(struct file * file, char __user * buf,
|
static ssize_t event_buffer_read(struct file *file, char __user *buf,
|
||||||
size_t count, loff_t * offset)
|
size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
int retval = -EINVAL;
|
int retval = -EINVAL;
|
||||||
size_t const max = buffer_size * sizeof(unsigned long);
|
size_t const max = buffer_size * sizeof(unsigned long);
|
||||||
|
|
|
@ -31,8 +31,8 @@ extern unsigned long backtrace_depth;
|
||||||
struct super_block;
|
struct super_block;
|
||||||
struct dentry;
|
struct dentry;
|
||||||
|
|
||||||
void oprofile_create_files(struct super_block * sb, struct dentry * root);
|
void oprofile_create_files(struct super_block *sb, struct dentry *root);
|
||||||
void oprofile_timer_init(struct oprofile_operations * ops);
|
void oprofile_timer_init(struct oprofile_operations *ops);
|
||||||
|
|
||||||
int oprofile_set_backtrace(unsigned long depth);
|
int oprofile_set_backtrace(unsigned long depth);
|
||||||
|
|
||||||
|
|
|
@ -18,13 +18,13 @@ unsigned long fs_buffer_size = 131072;
|
||||||
unsigned long fs_cpu_buffer_size = 8192;
|
unsigned long fs_cpu_buffer_size = 8192;
|
||||||
unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
|
unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
|
||||||
|
|
||||||
static ssize_t depth_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
|
static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
|
return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static ssize_t depth_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
|
static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
int retval;
|
int retval;
|
||||||
|
@ -50,7 +50,7 @@ static const struct file_operations depth_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static ssize_t pointer_size_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
|
static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
|
return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ static const struct file_operations pointer_size_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static ssize_t cpu_type_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
|
static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
|
return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
|
||||||
}
|
}
|
||||||
|
@ -72,13 +72,13 @@ static const struct file_operations cpu_type_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static ssize_t enable_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
|
static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
|
return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static ssize_t enable_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
|
static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
int retval;
|
int retval;
|
||||||
|
@ -107,7 +107,7 @@ static const struct file_operations enable_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static ssize_t dump_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
|
static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
wake_up_buffer_waiter();
|
wake_up_buffer_waiter();
|
||||||
return count;
|
return count;
|
||||||
|
@ -118,7 +118,7 @@ static const struct file_operations dump_fops = {
|
||||||
.write = dump_write,
|
.write = dump_write,
|
||||||
};
|
};
|
||||||
|
|
||||||
void oprofile_create_files(struct super_block * sb, struct dentry * root)
|
void oprofile_create_files(struct super_block *sb, struct dentry *root)
|
||||||
{
|
{
|
||||||
oprofilefs_create_file(sb, root, "enable", &enable_fops);
|
oprofilefs_create_file(sb, root, "enable", &enable_fops);
|
||||||
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
|
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
|
||||||
|
|
|
@ -19,7 +19,7 @@ struct oprofile_stat_struct oprofile_stats;
|
||||||
|
|
||||||
void oprofile_reset_stats(void)
|
void oprofile_reset_stats(void)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer * cpu_buf;
|
struct oprofile_cpu_buffer *cpu_buf;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
|
@ -36,11 +36,11 @@ void oprofile_reset_stats(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
|
void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer * cpu_buf;
|
struct oprofile_cpu_buffer *cpu_buf;
|
||||||
struct dentry * cpudir;
|
struct dentry *cpudir;
|
||||||
struct dentry * dir;
|
struct dentry *dir;
|
||||||
char buf[10];
|
char buf[10];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,6 @@ struct super_block;
|
||||||
struct dentry;
|
struct dentry;
|
||||||
|
|
||||||
/* create the stats/ dir */
|
/* create the stats/ dir */
|
||||||
void oprofile_create_stats_files(struct super_block * sb, struct dentry * root);
|
void oprofile_create_stats_files(struct super_block *sb, struct dentry *root);
|
||||||
|
|
||||||
#endif /* OPROFILE_STATS_H */
|
#endif /* OPROFILE_STATS_H */
|
||||||
|
|
|
@ -23,9 +23,9 @@
|
||||||
|
|
||||||
DEFINE_SPINLOCK(oprofilefs_lock);
|
DEFINE_SPINLOCK(oprofilefs_lock);
|
||||||
|
|
||||||
static struct inode * oprofilefs_get_inode(struct super_block * sb, int mode)
|
static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
|
||||||
{
|
{
|
||||||
struct inode * inode = new_inode(sb);
|
struct inode *inode = new_inode(sb);
|
||||||
|
|
||||||
if (inode) {
|
if (inode) {
|
||||||
inode->i_mode = mode;
|
inode->i_mode = mode;
|
||||||
|
@ -44,7 +44,7 @@ static struct super_operations s_ops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset)
|
ssize_t oprofilefs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
return simple_read_from_buffer(buf, count, offset, str, strlen(str));
|
return simple_read_from_buffer(buf, count, offset, str, strlen(str));
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count
|
||||||
|
|
||||||
#define TMPBUFSIZE 50
|
#define TMPBUFSIZE 50
|
||||||
|
|
||||||
ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset)
|
ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
char tmpbuf[TMPBUFSIZE];
|
char tmpbuf[TMPBUFSIZE];
|
||||||
size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
|
size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
|
||||||
|
@ -62,7 +62,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count)
|
int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
|
||||||
{
|
{
|
||||||
char tmpbuf[TMPBUFSIZE];
|
char tmpbuf[TMPBUFSIZE];
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -85,16 +85,16 @@ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, siz
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static ssize_t ulong_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset)
|
static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
unsigned long * val = file->private_data;
|
unsigned long *val = file->private_data;
|
||||||
return oprofilefs_ulong_to_user(*val, buf, count, offset);
|
return oprofilefs_ulong_to_user(*val, buf, count, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static ssize_t ulong_write_file(struct file * file, char const __user * buf, size_t count, loff_t * offset)
|
static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
unsigned long * value = file->private_data;
|
unsigned long *value = file->private_data;
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
if (*offset)
|
if (*offset)
|
||||||
|
@ -108,7 +108,7 @@ static ssize_t ulong_write_file(struct file * file, char const __user * buf, siz
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int default_open(struct inode * inode, struct file * filp)
|
static int default_open(struct inode *inode, struct file *filp)
|
||||||
{
|
{
|
||||||
if (inode->i_private)
|
if (inode->i_private)
|
||||||
filp->private_data = inode->i_private;
|
filp->private_data = inode->i_private;
|
||||||
|
@ -129,12 +129,12 @@ static const struct file_operations ulong_ro_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static struct dentry * __oprofilefs_create_file(struct super_block * sb,
|
static struct dentry *__oprofilefs_create_file(struct super_block *sb,
|
||||||
struct dentry * root, char const * name, const struct file_operations * fops,
|
struct dentry *root, char const *name, const struct file_operations *fops,
|
||||||
int perm)
|
int perm)
|
||||||
{
|
{
|
||||||
struct dentry * dentry;
|
struct dentry *dentry;
|
||||||
struct inode * inode;
|
struct inode *inode;
|
||||||
|
|
||||||
dentry = d_alloc_name(root, name);
|
dentry = d_alloc_name(root, name);
|
||||||
if (!dentry)
|
if (!dentry)
|
||||||
|
@ -150,10 +150,10 @@ static struct dentry * __oprofilefs_create_file(struct super_block * sb,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
|
int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
|
||||||
char const * name, unsigned long * val)
|
char const *name, unsigned long *val)
|
||||||
{
|
{
|
||||||
struct dentry * d = __oprofilefs_create_file(sb, root, name,
|
struct dentry *d = __oprofilefs_create_file(sb, root, name,
|
||||||
&ulong_fops, 0644);
|
&ulong_fops, 0644);
|
||||||
if (!d)
|
if (!d)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -163,10 +163,10 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
|
int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
|
||||||
char const * name, unsigned long * val)
|
char const *name, unsigned long *val)
|
||||||
{
|
{
|
||||||
struct dentry * d = __oprofilefs_create_file(sb, root, name,
|
struct dentry *d = __oprofilefs_create_file(sb, root, name,
|
||||||
&ulong_ro_fops, 0444);
|
&ulong_ro_fops, 0444);
|
||||||
if (!d)
|
if (!d)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -176,9 +176,9 @@ int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static ssize_t atomic_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset)
|
static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
atomic_t * val = file->private_data;
|
atomic_t *val = file->private_data;
|
||||||
return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
|
return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,10 +189,10 @@ static const struct file_operations atomic_ro_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
|
int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
|
||||||
char const * name, atomic_t * val)
|
char const *name, atomic_t *val)
|
||||||
{
|
{
|
||||||
struct dentry * d = __oprofilefs_create_file(sb, root, name,
|
struct dentry *d = __oprofilefs_create_file(sb, root, name,
|
||||||
&atomic_ro_fops, 0444);
|
&atomic_ro_fops, 0444);
|
||||||
if (!d)
|
if (!d)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -202,8 +202,8 @@ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
|
int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
|
||||||
char const * name, const struct file_operations * fops)
|
char const *name, const struct file_operations *fops)
|
||||||
{
|
{
|
||||||
if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
|
if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -211,8 +211,8 @@ int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
|
int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
|
||||||
char const * name, const struct file_operations * fops, int perm)
|
char const *name, const struct file_operations *fops, int perm)
|
||||||
{
|
{
|
||||||
if (!__oprofilefs_create_file(sb, root, name, fops, perm))
|
if (!__oprofilefs_create_file(sb, root, name, fops, perm))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -220,11 +220,11 @@ int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct dentry * oprofilefs_mkdir(struct super_block * sb,
|
struct dentry *oprofilefs_mkdir(struct super_block *sb,
|
||||||
struct dentry * root, char const * name)
|
struct dentry *root, char const *name)
|
||||||
{
|
{
|
||||||
struct dentry * dentry;
|
struct dentry *dentry;
|
||||||
struct inode * inode;
|
struct inode *inode;
|
||||||
|
|
||||||
dentry = d_alloc_name(root, name);
|
dentry = d_alloc_name(root, name);
|
||||||
if (!dentry)
|
if (!dentry)
|
||||||
|
@ -241,10 +241,10 @@ struct dentry * oprofilefs_mkdir(struct super_block * sb,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int oprofilefs_fill_super(struct super_block * sb, void * data, int silent)
|
static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
{
|
{
|
||||||
struct inode * root_inode;
|
struct inode *root_inode;
|
||||||
struct dentry * root_dentry;
|
struct dentry *root_dentry;
|
||||||
|
|
||||||
sb->s_blocksize = PAGE_CACHE_SIZE;
|
sb->s_blocksize = PAGE_CACHE_SIZE;
|
||||||
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
||||||
|
|
|
@ -35,7 +35,7 @@ static void timer_stop(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void __init oprofile_timer_init(struct oprofile_operations * ops)
|
void __init oprofile_timer_init(struct oprofile_operations *ops)
|
||||||
{
|
{
|
||||||
ops->create_files = NULL;
|
ops->create_files = NULL;
|
||||||
ops->setup = NULL;
|
ops->setup = NULL;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче