Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile: (21 commits)
  OProfile: Fix buffer synchronization for IBS
  oprofile: hotplug cpu fix
  oprofile: fixing whitespaces in arch/x86/oprofile/*
  oprofile: fixing whitespaces in arch/x86/oprofile/*
  oprofile: fixing whitespaces in drivers/oprofile/*
  x86/oprofile: add the logic for enabling additional IBS bits
  x86/oprofile: reordering functions in nmi_int.c
  x86/oprofile: removing unused function parameter in add_ibs_begin()
  oprofile: more whitespace fixes
  oprofile: whitespace fixes
  OProfile: Rename IBS sysfs dir into "ibs_op"
  OProfile: Rework string handling in setup_ibs_files()
  OProfile: Rework oprofile_add_ibs_sample() function
  oprofile: discover counters for op ppro too
  oprofile: Implement Intel architectural perfmon support
  oprofile: Don't report Nehalem as core_2
  oprofile: drop const in num counters field
  Revert "Oprofile Multiplexing Patch"
  x86, oprofile: BUG: using smp_processor_id() in preemptible code
  x86/oprofile: fix on_each_cpu build error
  ...

Manually fixed trivial conflicts in
	drivers/oprofile/{cpu_buffer.c,event_buffer.h}
This commit is contained in:
Linus Torvalds 2008-10-23 10:05:40 -07:00
Родитель a534487606 df13b31c28
Коммит 92fb83afd6
31 изменённых файлов: 431 добавлений и 371 удалений

Просмотреть файл

@ -52,8 +52,7 @@ struct frame_head {
unsigned long ret; unsigned long ret;
} __attribute__((packed)); } __attribute__((packed));
static struct frame_head * static struct frame_head *dump_user_backtrace(struct frame_head *head)
dump_user_backtrace(struct frame_head * head)
{ {
struct frame_head bufhead[2]; struct frame_head bufhead[2];

Просмотреть файл

@ -28,85 +28,9 @@ static struct op_x86_model_spec const *model;
static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
static DEFINE_PER_CPU(unsigned long, saved_lvtpc); static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
static int nmi_start(void);
static void nmi_stop(void);
static void nmi_cpu_start(void *dummy);
static void nmi_cpu_stop(void *dummy);
/* 0 == registered but off, 1 == registered and on */ /* 0 == registered but off, 1 == registered and on */
static int nmi_enabled = 0; static int nmi_enabled = 0;
#ifdef CONFIG_SMP
static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
void *data)
{
int cpu = (unsigned long)data;
switch (action) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block oprofile_cpu_nb = {
.notifier_call = oprofile_cpu_notifier
};
#endif
#ifdef CONFIG_PM
static int nmi_suspend(struct sys_device *dev, pm_message_t state)
{
/* Only one CPU left, just stop that one */
if (nmi_enabled == 1)
nmi_cpu_stop(NULL);
return 0;
}
static int nmi_resume(struct sys_device *dev)
{
if (nmi_enabled == 1)
nmi_cpu_start(NULL);
return 0;
}
static struct sysdev_class oprofile_sysclass = {
.name = "oprofile",
.resume = nmi_resume,
.suspend = nmi_suspend,
};
static struct sys_device device_oprofile = {
.id = 0,
.cls = &oprofile_sysclass,
};
static int __init init_sysfs(void)
{
int error;
error = sysdev_class_register(&oprofile_sysclass);
if (!error)
error = sysdev_register(&device_oprofile);
return error;
}
static void exit_sysfs(void)
{
sysdev_unregister(&device_oprofile);
sysdev_class_unregister(&oprofile_sysclass);
}
#else
#define init_sysfs() do { } while (0)
#define exit_sysfs() do { } while (0)
#endif /* CONFIG_PM */
static int profile_exceptions_notify(struct notifier_block *self, static int profile_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data) unsigned long val, void *data)
{ {
@ -361,6 +285,77 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
return 0; return 0;
} }
#ifdef CONFIG_SMP
static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
void *data)
{
int cpu = (unsigned long)data;
switch (action) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block oprofile_cpu_nb = {
.notifier_call = oprofile_cpu_notifier
};
#endif
#ifdef CONFIG_PM
static int nmi_suspend(struct sys_device *dev, pm_message_t state)
{
/* Only one CPU left, just stop that one */
if (nmi_enabled == 1)
nmi_cpu_stop(NULL);
return 0;
}
static int nmi_resume(struct sys_device *dev)
{
if (nmi_enabled == 1)
nmi_cpu_start(NULL);
return 0;
}
static struct sysdev_class oprofile_sysclass = {
.name = "oprofile",
.resume = nmi_resume,
.suspend = nmi_suspend,
};
static struct sys_device device_oprofile = {
.id = 0,
.cls = &oprofile_sysclass,
};
static int __init init_sysfs(void)
{
int error;
error = sysdev_class_register(&oprofile_sysclass);
if (!error)
error = sysdev_register(&device_oprofile);
return error;
}
static void exit_sysfs(void)
{
sysdev_unregister(&device_oprofile);
sysdev_class_unregister(&oprofile_sysclass);
}
#else
#define init_sysfs() do { } while (0)
#define exit_sysfs() do { } while (0)
#endif /* CONFIG_PM */
static int p4force; static int p4force;
module_param(p4force, int, 0); module_param(p4force, int, 0);
@ -420,9 +415,6 @@ static int __init ppro_init(char **cpu_type)
case 15: case 23: case 15: case 23:
*cpu_type = "i386/core_2"; *cpu_type = "i386/core_2";
break; break;
case 26:
*cpu_type = "i386/core_2";
break;
default: default:
/* Unknown */ /* Unknown */
return 0; return 0;
@ -432,6 +424,16 @@ static int __init ppro_init(char **cpu_type)
return 1; return 1;
} }
static int __init arch_perfmon_init(char **cpu_type)
{
if (!cpu_has_arch_perfmon)
return 0;
*cpu_type = "i386/arch_perfmon";
model = &op_arch_perfmon_spec;
arch_perfmon_setup_counters();
return 1;
}
/* in order to get sysfs right */ /* in order to get sysfs right */
static int using_nmi; static int using_nmi;
@ -439,7 +441,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
{ {
__u8 vendor = boot_cpu_data.x86_vendor; __u8 vendor = boot_cpu_data.x86_vendor;
__u8 family = boot_cpu_data.x86; __u8 family = boot_cpu_data.x86;
char *cpu_type; char *cpu_type = NULL;
int ret = 0; int ret = 0;
if (!cpu_has_apic) if (!cpu_has_apic)
@ -477,19 +479,20 @@ int __init op_nmi_init(struct oprofile_operations *ops)
switch (family) { switch (family) {
/* Pentium IV */ /* Pentium IV */
case 0xf: case 0xf:
if (!p4_init(&cpu_type)) p4_init(&cpu_type);
return -ENODEV;
break; break;
/* A P6-class processor */ /* A P6-class processor */
case 6: case 6:
if (!ppro_init(&cpu_type)) ppro_init(&cpu_type);
return -ENODEV;
break; break;
default: default:
return -ENODEV; break;
} }
if (!cpu_type && !arch_perfmon_init(&cpu_type))
return -ENODEV;
break; break;
default: default:

Просмотреть файл

@ -67,8 +67,9 @@ static unsigned long reset_value[NUM_COUNTERS];
/* The function interface needs to be fixed, something like add /* The function interface needs to be fixed, something like add
data. Should then be added to linux/oprofile.h. */ data. Should then be added to linux/oprofile.h. */
extern void oprofile_add_ibs_sample(struct pt_regs *const regs, extern void
unsigned int * const ibs_sample, u8 code); oprofile_add_ibs_sample(struct pt_regs *const regs,
unsigned int *const ibs_sample, int ibs_code);
struct ibs_fetch_sample { struct ibs_fetch_sample {
/* MSRC001_1031 IBS Fetch Linear Address Register */ /* MSRC001_1031 IBS Fetch Linear Address Register */
@ -309,12 +310,15 @@ static void op_amd_start(struct op_msrs const * const msrs)
#ifdef CONFIG_OPROFILE_IBS #ifdef CONFIG_OPROFILE_IBS
if (ibs_allowed && ibs_config.fetch_enabled) { if (ibs_allowed && ibs_config.fetch_enabled) {
low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
high = IBS_FETCH_HIGH_ENABLE; high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
+ IBS_FETCH_HIGH_ENABLE;
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
} }
if (ibs_allowed && ibs_config.op_enabled) { if (ibs_allowed && ibs_config.op_enabled) {
low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) + IBS_OP_LOW_ENABLE; low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
+ ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
+ IBS_OP_LOW_ENABLE;
high = 0; high = 0;
wrmsr(MSR_AMD64_IBSOPCTL, low, high); wrmsr(MSR_AMD64_IBSOPCTL, low, high);
} }
@ -472,7 +476,6 @@ static int (*create_arch_files)(struct super_block * sb, struct dentry * root);
static int setup_ibs_files(struct super_block *sb, struct dentry *root) static int setup_ibs_files(struct super_block *sb, struct dentry *root)
{ {
char buf[12];
struct dentry *dir; struct dentry *dir;
int ret = 0; int ret = 0;
@ -494,16 +497,16 @@ static int setup_ibs_files(struct super_block * sb, struct dentry * root)
ibs_config.max_cnt_op = 250000; ibs_config.max_cnt_op = 250000;
ibs_config.op_enabled = 0; ibs_config.op_enabled = 0;
ibs_config.dispatched_ops = 1; ibs_config.dispatched_ops = 1;
snprintf(buf, sizeof(buf), "ibs_fetch");
dir = oprofilefs_mkdir(sb, root, buf); dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
oprofilefs_create_ulong(sb, dir, "rand_enable",
&ibs_config.rand_en);
oprofilefs_create_ulong(sb, dir, "enable", oprofilefs_create_ulong(sb, dir, "enable",
&ibs_config.fetch_enabled); &ibs_config.fetch_enabled);
oprofilefs_create_ulong(sb, dir, "max_count", oprofilefs_create_ulong(sb, dir, "max_count",
&ibs_config.max_cnt_fetch); &ibs_config.max_cnt_fetch);
snprintf(buf, sizeof(buf), "ibs_uops"); oprofilefs_create_ulong(sb, dir, "rand_enable",
dir = oprofilefs_mkdir(sb, root, buf); &ibs_config.rand_en);
dir = oprofilefs_mkdir(sb, root, "ibs_op");
oprofilefs_create_ulong(sb, dir, "enable", oprofilefs_create_ulong(sb, dir, "enable",
&ibs_config.op_enabled); &ibs_config.op_enabled);
oprofilefs_create_ulong(sb, dir, "max_count", oprofilefs_create_ulong(sb, dir, "max_count",

Просмотреть файл

@ -1,32 +1,34 @@
/* /*
* @file op_model_ppro.h * @file op_model_ppro.h
* pentium pro / P6 model-specific MSR operations * Family 6 perfmon and architectural perfmon MSR operations
* *
* @remark Copyright 2002 OProfile authors * @remark Copyright 2002 OProfile authors
* @remark Copyright 2008 Intel Corporation
* @remark Read the file COPYING * @remark Read the file COPYING
* *
* @author John Levon * @author John Levon
* @author Philippe Elie * @author Philippe Elie
* @author Graydon Hoare * @author Graydon Hoare
* @author Andi Kleen
*/ */
#include <linux/oprofile.h> #include <linux/oprofile.h>
#include <linux/slab.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/intel_arch_perfmon.h>
#include "op_x86_model.h" #include "op_x86_model.h"
#include "op_counter.h" #include "op_counter.h"
#define NUM_COUNTERS 2 static int num_counters = 2;
#define NUM_CONTROLS 2 static int counter_width = 32;
#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
#define CTR_32BIT_WRITE(l, msrs, c) \ #define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1))))
do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0); } while (0)
#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
@ -40,20 +42,20 @@
#define CTRL_SET_UM(val, m) (val |= (m << 8)) #define CTRL_SET_UM(val, m) (val |= (m << 8))
#define CTRL_SET_EVENT(val, e) (val |= e) #define CTRL_SET_EVENT(val, e) (val |= e)
static unsigned long reset_value[NUM_COUNTERS]; static u64 *reset_value;
static void ppro_fill_in_addresses(struct op_msrs * const msrs) static void ppro_fill_in_addresses(struct op_msrs * const msrs)
{ {
int i; int i;
for (i = 0; i < NUM_COUNTERS; i++) { for (i = 0; i < num_counters; i++) {
if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
else else
msrs->counters[i].addr = 0; msrs->counters[i].addr = 0;
} }
for (i = 0; i < NUM_CONTROLS; i++) { for (i = 0; i < num_counters; i++) {
if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
else else
@ -67,8 +69,22 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
unsigned int low, high; unsigned int low, high;
int i; int i;
if (!reset_value) {
reset_value = kmalloc(sizeof(unsigned) * num_counters,
GFP_ATOMIC);
if (!reset_value)
return;
}
if (cpu_has_arch_perfmon) {
union cpuid10_eax eax;
eax.full = cpuid_eax(0xa);
if (counter_width < eax.split.bit_width)
counter_width = eax.split.bit_width;
}
/* clear all counters */ /* clear all counters */
for (i = 0 ; i < NUM_CONTROLS; ++i) { for (i = 0 ; i < num_counters; ++i) {
if (unlikely(!CTRL_IS_RESERVED(msrs, i))) if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
continue; continue;
CTRL_READ(low, high, msrs, i); CTRL_READ(low, high, msrs, i);
@ -77,18 +93,18 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
} }
/* avoid a false detection of ctr overflows in NMI handler */ /* avoid a false detection of ctr overflows in NMI handler */
for (i = 0; i < NUM_COUNTERS; ++i) { for (i = 0; i < num_counters; ++i) {
if (unlikely(!CTR_IS_RESERVED(msrs, i))) if (unlikely(!CTR_IS_RESERVED(msrs, i)))
continue; continue;
CTR_32BIT_WRITE(1, msrs, i); wrmsrl(msrs->counters[i].addr, -1LL);
} }
/* enable active counters */ /* enable active counters */
for (i = 0; i < NUM_COUNTERS; ++i) { for (i = 0; i < num_counters; ++i) {
if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
reset_value[i] = counter_config[i].count; reset_value[i] = counter_config[i].count;
CTR_32BIT_WRITE(counter_config[i].count, msrs, i); wrmsrl(msrs->counters[i].addr, -reset_value[i]);
CTRL_READ(low, high, msrs, i); CTRL_READ(low, high, msrs, i);
CTRL_CLEAR(low); CTRL_CLEAR(low);
@ -111,13 +127,13 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
unsigned int low, high; unsigned int low, high;
int i; int i;
for (i = 0 ; i < NUM_COUNTERS; ++i) { for (i = 0 ; i < num_counters; ++i) {
if (!reset_value[i]) if (!reset_value[i])
continue; continue;
CTR_READ(low, high, msrs, i); CTR_READ(low, high, msrs, i);
if (CTR_OVERFLOWED(low)) { if (CTR_OVERFLOWED(low)) {
oprofile_add_sample(regs, i); oprofile_add_sample(regs, i);
CTR_32BIT_WRITE(reset_value[i], msrs, i); wrmsrl(msrs->counters[i].addr, -reset_value[i]);
} }
} }
@ -141,7 +157,7 @@ static void ppro_start(struct op_msrs const * const msrs)
unsigned int low, high; unsigned int low, high;
int i; int i;
for (i = 0; i < NUM_COUNTERS; ++i) { for (i = 0; i < num_counters; ++i) {
if (reset_value[i]) { if (reset_value[i]) {
CTRL_READ(low, high, msrs, i); CTRL_READ(low, high, msrs, i);
CTRL_SET_ACTIVE(low); CTRL_SET_ACTIVE(low);
@ -156,7 +172,7 @@ static void ppro_stop(struct op_msrs const * const msrs)
unsigned int low, high; unsigned int low, high;
int i; int i;
for (i = 0; i < NUM_COUNTERS; ++i) { for (i = 0; i < num_counters; ++i) {
if (!reset_value[i]) if (!reset_value[i])
continue; continue;
CTRL_READ(low, high, msrs, i); CTRL_READ(low, high, msrs, i);
@ -169,20 +185,24 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
{ {
int i; int i;
for (i = 0 ; i < NUM_COUNTERS ; ++i) { for (i = 0 ; i < num_counters ; ++i) {
if (CTR_IS_RESERVED(msrs, i)) if (CTR_IS_RESERVED(msrs, i))
release_perfctr_nmi(MSR_P6_PERFCTR0 + i); release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
} }
for (i = 0 ; i < NUM_CONTROLS ; ++i) { for (i = 0 ; i < num_counters ; ++i) {
if (CTRL_IS_RESERVED(msrs, i)) if (CTRL_IS_RESERVED(msrs, i))
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
} }
if (reset_value) {
kfree(reset_value);
reset_value = NULL;
}
} }
struct op_x86_model_spec const op_ppro_spec = { struct op_x86_model_spec op_ppro_spec = {
.num_counters = NUM_COUNTERS, .num_counters = 2, /* can be overriden */
.num_controls = NUM_CONTROLS, .num_controls = 2, /* dito */
.fill_in_addresses = &ppro_fill_in_addresses, .fill_in_addresses = &ppro_fill_in_addresses,
.setup_ctrs = &ppro_setup_ctrs, .setup_ctrs = &ppro_setup_ctrs,
.check_ctrs = &ppro_check_ctrs, .check_ctrs = &ppro_check_ctrs,
@ -190,3 +210,45 @@ struct op_x86_model_spec const op_ppro_spec = {
.stop = &ppro_stop, .stop = &ppro_stop,
.shutdown = &ppro_shutdown .shutdown = &ppro_shutdown
}; };
/*
* Architectural performance monitoring.
*
* Newer Intel CPUs (Core1+) have support for architectural
* events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
* The advantage of this is that it can be done without knowing about
* the specific CPU.
*/
void arch_perfmon_setup_counters(void)
{
union cpuid10_eax eax;
eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
current_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
eax.split.num_counters = 2;
eax.split.bit_width = 40;
}
num_counters = eax.split.num_counters;
op_arch_perfmon_spec.num_counters = num_counters;
op_arch_perfmon_spec.num_controls = num_counters;
op_ppro_spec.num_counters = num_counters;
op_ppro_spec.num_controls = num_counters;
}
struct op_x86_model_spec op_arch_perfmon_spec = {
/* num_counters/num_controls filled in at runtime */
.fill_in_addresses = &ppro_fill_in_addresses,
/* user space does the cpuid check for available events */
.setup_ctrs = &ppro_setup_ctrs,
.check_ctrs = &ppro_check_ctrs,
.start = &ppro_start,
.stop = &ppro_stop,
.shutdown = &ppro_shutdown
};

Просмотреть файл

@ -34,8 +34,8 @@ struct pt_regs;
struct op_x86_model_spec { struct op_x86_model_spec {
int (*init)(struct oprofile_operations *ops); int (*init)(struct oprofile_operations *ops);
void (*exit)(void); void (*exit)(void);
unsigned int const num_counters; unsigned int num_counters;
unsigned int const num_controls; unsigned int num_controls;
void (*fill_in_addresses)(struct op_msrs * const msrs); void (*fill_in_addresses)(struct op_msrs * const msrs);
void (*setup_ctrs)(struct op_msrs const * const msrs); void (*setup_ctrs)(struct op_msrs const * const msrs);
int (*check_ctrs)(struct pt_regs * const regs, int (*check_ctrs)(struct pt_regs * const regs,
@ -45,9 +45,12 @@ struct op_x86_model_spec {
void (*shutdown)(struct op_msrs const * const msrs); void (*shutdown)(struct op_msrs const * const msrs);
}; };
extern struct op_x86_model_spec const op_ppro_spec; extern struct op_x86_model_spec op_ppro_spec;
extern struct op_x86_model_spec const op_p4_spec; extern struct op_x86_model_spec const op_p4_spec;
extern struct op_x86_model_spec const op_p4_ht2_spec; extern struct op_x86_model_spec const op_p4_ht2_spec;
extern struct op_x86_model_spec const op_amd_spec; extern struct op_x86_model_spec const op_amd_spec;
extern struct op_x86_model_spec op_arch_perfmon_spec;
extern void arch_perfmon_setup_counters(void);
#endif /* OP_X86_MODEL_H */ #endif /* OP_X86_MODEL_H */

Просмотреть файл

@ -41,7 +41,6 @@ static cpumask_t marked_cpus = CPU_MASK_NONE;
static DEFINE_SPINLOCK(task_mortuary); static DEFINE_SPINLOCK(task_mortuary);
static void process_task_mortuary(void); static void process_task_mortuary(void);
/* Take ownership of the task struct and place it on the /* Take ownership of the task struct and place it on the
* list for processing. Only after two full buffer syncs * list for processing. Only after two full buffer syncs
* does the task eventually get freed, because by then * does the task eventually get freed, because by then
@ -341,7 +340,7 @@ static void add_trace_begin(void)
* Add IBS fetch and op entries to event buffer * Add IBS fetch and op entries to event buffer
*/ */
static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code, static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
int in_kernel, struct mm_struct *mm) struct mm_struct *mm)
{ {
unsigned long rip; unsigned long rip;
int i, count; int i, count;
@ -565,9 +564,11 @@ void sync_buffer(int cpu)
struct task_struct *new; struct task_struct *new;
unsigned long cookie = 0; unsigned long cookie = 0;
int in_kernel = 1; int in_kernel = 1;
unsigned int i;
sync_buffer_state state = sb_buffer_start; sync_buffer_state state = sb_buffer_start;
#ifndef CONFIG_OPROFILE_IBS
unsigned int i;
unsigned long available; unsigned long available;
#endif
mutex_lock(&buffer_mutex); mutex_lock(&buffer_mutex);
@ -575,9 +576,13 @@ void sync_buffer(int cpu)
/* Remember, only we can modify tail_pos */ /* Remember, only we can modify tail_pos */
#ifndef CONFIG_OPROFILE_IBS
available = get_slots(cpu_buf); available = get_slots(cpu_buf);
for (i = 0; i < available; ++i) { for (i = 0; i < available; ++i) {
#else
while (get_slots(cpu_buf)) {
#endif
struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos]; struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
if (is_code(s->eip)) { if (is_code(s->eip)) {
@ -593,12 +598,10 @@ void sync_buffer(int cpu)
#ifdef CONFIG_OPROFILE_IBS #ifdef CONFIG_OPROFILE_IBS
} else if (s->event == IBS_FETCH_BEGIN) { } else if (s->event == IBS_FETCH_BEGIN) {
state = sb_bt_start; state = sb_bt_start;
add_ibs_begin(cpu_buf, add_ibs_begin(cpu_buf, IBS_FETCH_CODE, mm);
IBS_FETCH_CODE, in_kernel, mm);
} else if (s->event == IBS_OP_BEGIN) { } else if (s->event == IBS_OP_BEGIN) {
state = sb_bt_start; state = sb_bt_start;
add_ibs_begin(cpu_buf, add_ibs_begin(cpu_buf, IBS_OP_CODE, mm);
IBS_OP_CODE, in_kernel, mm);
#endif #endif
} else { } else {
struct mm_struct *oldmm = mm; struct mm_struct *oldmm = mm;

Просмотреть файл

@ -39,7 +39,7 @@ void free_cpu_buffers(void)
{ {
int i; int i;
for_each_online_cpu(i) { for_each_possible_cpu(i) {
vfree(per_cpu(cpu_buffer, i).buffer); vfree(per_cpu(cpu_buffer, i).buffer);
per_cpu(cpu_buffer, i).buffer = NULL; per_cpu(cpu_buffer, i).buffer = NULL;
} }
@ -64,7 +64,7 @@ int alloc_cpu_buffers(void)
unsigned long buffer_size = fs_cpu_buffer_size; unsigned long buffer_size = fs_cpu_buffer_size;
for_each_online_cpu(i) { for_each_possible_cpu(i) {
struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
@ -271,20 +271,22 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
#ifdef CONFIG_OPROFILE_IBS #ifdef CONFIG_OPROFILE_IBS
#define MAX_IBS_SAMPLE_SIZE 14 #define MAX_IBS_SAMPLE_SIZE 14
static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code) void oprofile_add_ibs_sample(struct pt_regs *const regs,
unsigned int *const ibs_sample, int ibs_code)
{ {
int is_kernel = !user_mode(regs);
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
struct task_struct *task; struct task_struct *task;
cpu_buf->sample_received++; cpu_buf->sample_received++;
if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
/* we can't backtrace since we lost the source of this event */
cpu_buf->sample_lost_overflow++; cpu_buf->sample_lost_overflow++;
return 0; return;
} }
is_kernel = !!is_kernel;
/* notice a switch from user->kernel or vice versa */ /* notice a switch from user->kernel or vice versa */
if (cpu_buf->last_is_kernel != is_kernel) { if (cpu_buf->last_is_kernel != is_kernel) {
cpu_buf->last_is_kernel = is_kernel; cpu_buf->last_is_kernel = is_kernel;
@ -294,7 +296,6 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
/* notice a task switch */ /* notice a task switch */
if (!is_kernel) { if (!is_kernel) {
task = current; task = current;
if (cpu_buf->last_task != task) { if (cpu_buf->last_task != task) {
cpu_buf->last_task = task; cpu_buf->last_task = task;
add_code(cpu_buf, (unsigned long)task); add_code(cpu_buf, (unsigned long)task);
@ -302,36 +303,17 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
} }
add_code(cpu_buf, ibs_code); add_code(cpu_buf, ibs_code);
add_sample(cpu_buf, ibs[0], ibs[1]); add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
add_sample(cpu_buf, ibs[2], ibs[3]); add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
add_sample(cpu_buf, ibs[4], ibs[5]); add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
if (ibs_code == IBS_OP_BEGIN) { if (ibs_code == IBS_OP_BEGIN) {
add_sample(cpu_buf, ibs[6], ibs[7]); add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
add_sample(cpu_buf, ibs[8], ibs[9]); add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
add_sample(cpu_buf, ibs[10], ibs[11]); add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
} }
return 1; if (backtrace_depth)
}
void oprofile_add_ibs_sample(struct pt_regs *const regs,
unsigned int * const ibs_sample, u8 code)
{
int is_kernel = !user_mode(regs);
unsigned long pc = profile_pc(regs);
struct oprofile_cpu_buffer *cpu_buf =
&per_cpu(cpu_buffer, smp_processor_id());
if (!backtrace_depth) {
log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
return;
}
/* if log_sample() fails we can't backtrace since we lost the source
* of this event */
if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
oprofile_ops.backtrace(regs, backtrace_depth); oprofile_ops.backtrace(regs, backtrace_depth);
} }
@ -381,6 +363,11 @@ static void wq_sync_buffer(struct work_struct *work)
if (b->cpu != smp_processor_id()) { if (b->cpu != smp_processor_id()) {
printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
smp_processor_id(), b->cpu); smp_processor_id(), b->cpu);
if (!cpu_online(b->cpu)) {
cancel_delayed_work(&b->work);
return;
}
} }
sync_buffer(b->cpu); sync_buffer(b->cpu);