Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile: (31 commits) powerpc/oprofile: fix whitespaces in op_model_cell.c powerpc/oprofile: IBM CELL: add SPU event profiling support powerpc/oprofile: fix cell/pr_util.h powerpc/oprofile: IBM CELL: cleanup and restructuring oprofile: make new cpu buffer functions part of the api oprofile: remove #ifdef CONFIG_OPROFILE_IBS in non-ibs code ring_buffer: fix ring_buffer_event_length() oprofile: use new data sample format for ibs oprofile: add op_cpu_buffer_get_data() oprofile: add op_cpu_buffer_add_data() oprofile: rework implementation of cpu buffer events oprofile: modify op_cpu_buffer_read_entry() oprofile: add op_cpu_buffer_write_reserve() oprofile: rename variables in add_ibs_begin() oprofile: rename add_sample() in cpu_buffer.c oprofile: rename variable ibs_allowed to has_ibs in op_model_amd.c oprofile: making add_sample_entry() inline oprofile: remove backtrace code for ibs oprofile: remove unused ibs macro oprofile: remove unused components in struct oprofile_cpu_buffer ...
This commit is contained in:
Коммит
4ce5f24193
|
@ -37,9 +37,11 @@
|
||||||
#define CBE_PM_STOP_AT_MAX 0x40000000
|
#define CBE_PM_STOP_AT_MAX 0x40000000
|
||||||
#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3)
|
#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3)
|
||||||
#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28)
|
#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28)
|
||||||
|
#define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17)
|
||||||
#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18)
|
#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18)
|
||||||
#define CBE_PM_FREEZE_ALL_CTRS 0x00100000
|
#define CBE_PM_FREEZE_ALL_CTRS 0x00100000
|
||||||
#define CBE_PM_ENABLE_EXT_TRACE 0x00008000
|
#define CBE_PM_ENABLE_EXT_TRACE 0x00008000
|
||||||
|
#define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9)
|
||||||
|
|
||||||
/* Macros for the trace_address register. */
|
/* Macros for the trace_address register. */
|
||||||
#define CBE_PM_TRACE_BUF_FULL 0x00000800
|
#define CBE_PM_TRACE_BUF_FULL 0x00000800
|
||||||
|
|
|
@ -32,6 +32,12 @@ struct op_system_config {
|
||||||
unsigned long mmcr0;
|
unsigned long mmcr0;
|
||||||
unsigned long mmcr1;
|
unsigned long mmcr1;
|
||||||
unsigned long mmcra;
|
unsigned long mmcra;
|
||||||
|
#ifdef CONFIG_OPROFILE_CELL
|
||||||
|
/* Register for oprofile user tool to check cell kernel profiling
|
||||||
|
* suport.
|
||||||
|
*/
|
||||||
|
unsigned long cell_support;
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
unsigned long enable_kernel;
|
unsigned long enable_kernel;
|
||||||
unsigned long enable_user;
|
unsigned long enable_user;
|
||||||
|
|
|
@ -30,6 +30,10 @@
|
||||||
extern struct delayed_work spu_work;
|
extern struct delayed_work spu_work;
|
||||||
extern int spu_prof_running;
|
extern int spu_prof_running;
|
||||||
|
|
||||||
|
#define TRACE_ARRAY_SIZE 1024
|
||||||
|
|
||||||
|
extern spinlock_t oprof_spu_smpl_arry_lck;
|
||||||
|
|
||||||
struct spu_overlay_info { /* map of sections within an SPU overlay */
|
struct spu_overlay_info { /* map of sections within an SPU overlay */
|
||||||
unsigned int vma; /* SPU virtual memory address from elf */
|
unsigned int vma; /* SPU virtual memory address from elf */
|
||||||
unsigned int size; /* size of section from elf */
|
unsigned int size; /* size of section from elf */
|
||||||
|
@ -89,10 +93,11 @@ void vma_map_free(struct vma_to_fileoffset_map *map);
|
||||||
* Entry point for SPU profiling.
|
* Entry point for SPU profiling.
|
||||||
* cycles_reset is the SPU_CYCLES count value specified by the user.
|
* cycles_reset is the SPU_CYCLES count value specified by the user.
|
||||||
*/
|
*/
|
||||||
int start_spu_profiling(unsigned int cycles_reset);
|
int start_spu_profiling_cycles(unsigned int cycles_reset);
|
||||||
|
void start_spu_profiling_events(void);
|
||||||
void stop_spu_profiling(void);
|
|
||||||
|
|
||||||
|
void stop_spu_profiling_cycles(void);
|
||||||
|
void stop_spu_profiling_events(void);
|
||||||
|
|
||||||
/* add the necessary profiling hooks */
|
/* add the necessary profiling hooks */
|
||||||
int spu_sync_start(void);
|
int spu_sync_start(void);
|
||||||
|
|
|
@ -18,11 +18,21 @@
|
||||||
#include <asm/cell-pmu.h>
|
#include <asm/cell-pmu.h>
|
||||||
#include "pr_util.h"
|
#include "pr_util.h"
|
||||||
|
|
||||||
#define TRACE_ARRAY_SIZE 1024
|
|
||||||
#define SCALE_SHIFT 14
|
#define SCALE_SHIFT 14
|
||||||
|
|
||||||
static u32 *samples;
|
static u32 *samples;
|
||||||
|
|
||||||
|
/* spu_prof_running is a flag used to indicate if spu profiling is enabled
|
||||||
|
* or not. It is set by the routines start_spu_profiling_cycles() and
|
||||||
|
* start_spu_profiling_events(). The flag is cleared by the routines
|
||||||
|
* stop_spu_profiling_cycles() and stop_spu_profiling_events(). These
|
||||||
|
* routines are called via global_start() and global_stop() which are called in
|
||||||
|
* op_powerpc_start() and op_powerpc_stop(). These routines are called once
|
||||||
|
* per system as a result of the user starting/stopping oprofile. Hence, only
|
||||||
|
* one CPU per user at a time will be changing the value of spu_prof_running.
|
||||||
|
* In general, OProfile does not protect against multiple users trying to run
|
||||||
|
* OProfile at a time.
|
||||||
|
*/
|
||||||
int spu_prof_running;
|
int spu_prof_running;
|
||||||
static unsigned int profiling_interval;
|
static unsigned int profiling_interval;
|
||||||
|
|
||||||
|
@ -31,8 +41,8 @@ static unsigned int profiling_interval;
|
||||||
|
|
||||||
#define SPU_PC_MASK 0xFFFF
|
#define SPU_PC_MASK 0xFFFF
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(sample_array_lock);
|
DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
|
||||||
unsigned long sample_array_lock_flags;
|
unsigned long oprof_spu_smpl_arry_lck_flags;
|
||||||
|
|
||||||
void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
|
void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
|
||||||
{
|
{
|
||||||
|
@ -145,13 +155,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
|
||||||
* sample array must be loaded and then processed for a given
|
* sample array must be loaded and then processed for a given
|
||||||
* cpu. The sample array is not per cpu.
|
* cpu. The sample array is not per cpu.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&sample_array_lock,
|
spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
|
||||||
sample_array_lock_flags);
|
oprof_spu_smpl_arry_lck_flags);
|
||||||
num_samples = cell_spu_pc_collection(cpu);
|
num_samples = cell_spu_pc_collection(cpu);
|
||||||
|
|
||||||
if (num_samples == 0) {
|
if (num_samples == 0) {
|
||||||
spin_unlock_irqrestore(&sample_array_lock,
|
spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
|
||||||
sample_array_lock_flags);
|
oprof_spu_smpl_arry_lck_flags);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,8 +172,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
|
||||||
num_samples);
|
num_samples);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&sample_array_lock,
|
spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
|
||||||
sample_array_lock_flags);
|
oprof_spu_smpl_arry_lck_flags);
|
||||||
|
|
||||||
}
|
}
|
||||||
smp_wmb(); /* insure spu event buffer updates are written */
|
smp_wmb(); /* insure spu event buffer updates are written */
|
||||||
|
@ -182,13 +192,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
|
||||||
|
|
||||||
static struct hrtimer timer;
|
static struct hrtimer timer;
|
||||||
/*
|
/*
|
||||||
* Entry point for SPU profiling.
|
* Entry point for SPU cycle profiling.
|
||||||
* NOTE: SPU profiling is done system-wide, not per-CPU.
|
* NOTE: SPU profiling is done system-wide, not per-CPU.
|
||||||
*
|
*
|
||||||
* cycles_reset is the count value specified by the user when
|
* cycles_reset is the count value specified by the user when
|
||||||
* setting up OProfile to count SPU_CYCLES.
|
* setting up OProfile to count SPU_CYCLES.
|
||||||
*/
|
*/
|
||||||
int start_spu_profiling(unsigned int cycles_reset)
|
int start_spu_profiling_cycles(unsigned int cycles_reset)
|
||||||
{
|
{
|
||||||
ktime_t kt;
|
ktime_t kt;
|
||||||
|
|
||||||
|
@ -212,10 +222,30 @@ int start_spu_profiling(unsigned int cycles_reset)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void stop_spu_profiling(void)
|
/*
|
||||||
|
* Entry point for SPU event profiling.
|
||||||
|
* NOTE: SPU profiling is done system-wide, not per-CPU.
|
||||||
|
*
|
||||||
|
* cycles_reset is the count value specified by the user when
|
||||||
|
* setting up OProfile to count SPU_CYCLES.
|
||||||
|
*/
|
||||||
|
void start_spu_profiling_events(void)
|
||||||
|
{
|
||||||
|
spu_prof_running = 1;
|
||||||
|
schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
void stop_spu_profiling_cycles(void)
|
||||||
{
|
{
|
||||||
spu_prof_running = 0;
|
spu_prof_running = 0;
|
||||||
hrtimer_cancel(&timer);
|
hrtimer_cancel(&timer);
|
||||||
kfree(samples);
|
kfree(samples);
|
||||||
pr_debug("SPU_PROF: stop_spu_profiling issued\n");
|
pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
void stop_spu_profiling_events(void)
|
||||||
|
{
|
||||||
|
spu_prof_running = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,6 +132,28 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
|
||||||
oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
|
oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
|
||||||
oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
|
oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
|
||||||
oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
|
oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
|
||||||
|
#ifdef CONFIG_OPROFILE_CELL
|
||||||
|
/* create a file the user tool can check to see what level of profiling
|
||||||
|
* support exits with this kernel. Initialize bit mask to indicate
|
||||||
|
* what support the kernel has:
|
||||||
|
* bit 0 - Supports SPU event profiling in addition to PPU
|
||||||
|
* event and cycles; and SPU cycle profiling
|
||||||
|
* bits 1-31 - Currently unused.
|
||||||
|
*
|
||||||
|
* If the file does not exist, then the kernel only supports SPU
|
||||||
|
* cycle profiling, PPU event and cycle profiling.
|
||||||
|
*/
|
||||||
|
oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support);
|
||||||
|
sys.cell_support = 0x1; /* Note, the user OProfile tool must check
|
||||||
|
* that this bit is set before attempting to
|
||||||
|
* user SPU event profiling. Older kernels
|
||||||
|
* will not have this file, hence the user
|
||||||
|
* tool is not allowed to do SPU event
|
||||||
|
* profiling on older kernels. Older kernels
|
||||||
|
* will accept SPU events but collected data
|
||||||
|
* is garbage.
|
||||||
|
*/
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (i = 0; i < model->num_counters; ++i) {
|
for (i = 0; i < model->num_counters; ++i) {
|
||||||
|
|
|
@ -40,14 +40,15 @@
|
||||||
#include "../platforms/cell/interrupt.h"
|
#include "../platforms/cell/interrupt.h"
|
||||||
#include "cell/pr_util.h"
|
#include "cell/pr_util.h"
|
||||||
|
|
||||||
static void cell_global_stop_spu(void);
|
#define PPU_PROFILING 0
|
||||||
|
#define SPU_PROFILING_CYCLES 1
|
||||||
|
#define SPU_PROFILING_EVENTS 2
|
||||||
|
|
||||||
/*
|
#define SPU_EVENT_NUM_START 4100
|
||||||
* spu_cycle_reset is the number of cycles between samples.
|
#define SPU_EVENT_NUM_STOP 4399
|
||||||
* This variable is used for SPU profiling and should ONLY be set
|
#define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */
|
||||||
* at the beginning of cell_reg_setup; otherwise, it's read-only.
|
#define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */
|
||||||
*/
|
#define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */
|
||||||
static unsigned int spu_cycle_reset;
|
|
||||||
|
|
||||||
#define NUM_SPUS_PER_NODE 8
|
#define NUM_SPUS_PER_NODE 8
|
||||||
#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
|
#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
|
||||||
|
@ -66,6 +67,21 @@ static unsigned int spu_cycle_reset;
|
||||||
|
|
||||||
#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
|
#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
|
||||||
|
|
||||||
|
/* Minumum HW interval timer setting to send value to trace buffer is 10 cycle.
|
||||||
|
* To configure counter to send value every N cycles set counter to
|
||||||
|
* 2^32 - 1 - N.
|
||||||
|
*/
|
||||||
|
#define NUM_INTERVAL_CYC 0xFFFFFFFF - 10
|
||||||
|
|
||||||
|
/*
|
||||||
|
* spu_cycle_reset is the number of cycles between samples.
|
||||||
|
* This variable is used for SPU profiling and should ONLY be set
|
||||||
|
* at the beginning of cell_reg_setup; otherwise, it's read-only.
|
||||||
|
*/
|
||||||
|
static unsigned int spu_cycle_reset;
|
||||||
|
static unsigned int profiling_mode;
|
||||||
|
static int spu_evnt_phys_spu_indx;
|
||||||
|
|
||||||
struct pmc_cntrl_data {
|
struct pmc_cntrl_data {
|
||||||
unsigned long vcntr;
|
unsigned long vcntr;
|
||||||
unsigned long evnts;
|
unsigned long evnts;
|
||||||
|
@ -105,6 +121,8 @@ struct pm_cntrl {
|
||||||
u16 trace_mode;
|
u16 trace_mode;
|
||||||
u16 freeze;
|
u16 freeze;
|
||||||
u16 count_mode;
|
u16 count_mode;
|
||||||
|
u16 spu_addr_trace;
|
||||||
|
u8 trace_buf_ovflw;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct {
|
static struct {
|
||||||
|
@ -122,7 +140,7 @@ static struct {
|
||||||
#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
|
#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
|
||||||
|
|
||||||
static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
|
static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
|
||||||
|
static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
|
||||||
static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
|
static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -152,6 +170,7 @@ static u32 hdw_thread;
|
||||||
|
|
||||||
static u32 virt_cntr_inter_mask;
|
static u32 virt_cntr_inter_mask;
|
||||||
static struct timer_list timer_virt_cntr;
|
static struct timer_list timer_virt_cntr;
|
||||||
|
static struct timer_list timer_spu_event_swap;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* pm_signal needs to be global since it is initialized in
|
* pm_signal needs to be global since it is initialized in
|
||||||
|
@ -165,7 +184,7 @@ static int spu_rtas_token; /* token for SPU cycle profiling */
|
||||||
static u32 reset_value[NR_PHYS_CTRS];
|
static u32 reset_value[NR_PHYS_CTRS];
|
||||||
static int num_counters;
|
static int num_counters;
|
||||||
static int oprofile_running;
|
static int oprofile_running;
|
||||||
static DEFINE_SPINLOCK(virt_cntr_lock);
|
static DEFINE_SPINLOCK(cntr_lock);
|
||||||
|
|
||||||
static u32 ctr_enabled;
|
static u32 ctr_enabled;
|
||||||
|
|
||||||
|
@ -336,13 +355,13 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
|
||||||
for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
|
for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
|
||||||
if (bus_word & (1 << i)) {
|
if (bus_word & (1 << i)) {
|
||||||
pm_regs.debug_bus_control |=
|
pm_regs.debug_bus_control |=
|
||||||
(bus_type << (30 - (2 * i)));
|
(bus_type << (30 - (2 * i)));
|
||||||
|
|
||||||
for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
|
for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
|
||||||
if (input_bus[j] == 0xff) {
|
if (input_bus[j] == 0xff) {
|
||||||
input_bus[j] = i;
|
input_bus[j] = i;
|
||||||
pm_regs.group_control |=
|
pm_regs.group_control |=
|
||||||
(i << (30 - (2 * j)));
|
(i << (30 - (2 * j)));
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -367,12 +386,16 @@ static void write_pm_cntrl(int cpu)
|
||||||
if (pm_regs.pm_cntrl.stop_at_max == 1)
|
if (pm_regs.pm_cntrl.stop_at_max == 1)
|
||||||
val |= CBE_PM_STOP_AT_MAX;
|
val |= CBE_PM_STOP_AT_MAX;
|
||||||
|
|
||||||
if (pm_regs.pm_cntrl.trace_mode == 1)
|
if (pm_regs.pm_cntrl.trace_mode != 0)
|
||||||
val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
|
val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
|
||||||
|
|
||||||
|
if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
|
||||||
|
val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
|
||||||
if (pm_regs.pm_cntrl.freeze == 1)
|
if (pm_regs.pm_cntrl.freeze == 1)
|
||||||
val |= CBE_PM_FREEZE_ALL_CTRS;
|
val |= CBE_PM_FREEZE_ALL_CTRS;
|
||||||
|
|
||||||
|
val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Routine set_count_mode must be called previously to set
|
* Routine set_count_mode must be called previously to set
|
||||||
* the count mode based on the user selection of user and kernel.
|
* the count mode based on the user selection of user and kernel.
|
||||||
|
@ -441,7 +464,7 @@ static void cell_virtual_cntr(unsigned long data)
|
||||||
* not both playing with the counters on the same node.
|
* not both playing with the counters on the same node.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spin_lock_irqsave(&virt_cntr_lock, flags);
|
spin_lock_irqsave(&cntr_lock, flags);
|
||||||
|
|
||||||
prev_hdw_thread = hdw_thread;
|
prev_hdw_thread = hdw_thread;
|
||||||
|
|
||||||
|
@ -480,7 +503,7 @@ static void cell_virtual_cntr(unsigned long data)
|
||||||
cbe_disable_pm_interrupts(cpu);
|
cbe_disable_pm_interrupts(cpu);
|
||||||
for (i = 0; i < num_counters; i++) {
|
for (i = 0; i < num_counters; i++) {
|
||||||
per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
|
per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
|
||||||
= cbe_read_ctr(cpu, i);
|
= cbe_read_ctr(cpu, i);
|
||||||
|
|
||||||
if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
|
if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
|
||||||
== 0xFFFFFFFF)
|
== 0xFFFFFFFF)
|
||||||
|
@ -527,7 +550,7 @@ static void cell_virtual_cntr(unsigned long data)
|
||||||
cbe_enable_pm(cpu);
|
cbe_enable_pm(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&virt_cntr_lock, flags);
|
spin_unlock_irqrestore(&cntr_lock, flags);
|
||||||
|
|
||||||
mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
|
mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
|
||||||
}
|
}
|
||||||
|
@ -541,38 +564,146 @@ static void start_virt_cntrs(void)
|
||||||
add_timer(&timer_virt_cntr);
|
add_timer(&timer_virt_cntr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This function is called once for all cpus combined */
|
static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
|
||||||
static int cell_reg_setup(struct op_counter_config *ctr,
|
|
||||||
struct op_system_config *sys, int num_ctrs)
|
struct op_system_config *sys, int num_ctrs)
|
||||||
{
|
{
|
||||||
int i, j, cpu;
|
spu_cycle_reset = ctr[0].count;
|
||||||
spu_cycle_reset = 0;
|
|
||||||
|
|
||||||
if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
|
|
||||||
spu_cycle_reset = ctr[0].count;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Each node will need to make the rtas call to start
|
|
||||||
* and stop SPU profiling. Get the token once and store it.
|
|
||||||
*/
|
|
||||||
spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
|
|
||||||
|
|
||||||
if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
|
|
||||||
printk(KERN_ERR
|
|
||||||
"%s: rtas token ibm,cbe-spu-perftools unknown\n",
|
|
||||||
__func__);
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pm_rtas_token = rtas_token("ibm,cbe-perftools");
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For all events excetp PPU CYCLEs, each node will need to make
|
* Each node will need to make the rtas call to start
|
||||||
|
* and stop SPU profiling. Get the token once and store it.
|
||||||
|
*/
|
||||||
|
spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
|
||||||
|
|
||||||
|
if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
|
||||||
|
printk(KERN_ERR
|
||||||
|
"%s: rtas token ibm,cbe-spu-perftools unknown\n",
|
||||||
|
__func__);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Unfortunately, the hardware will only support event profiling
|
||||||
|
* on one SPU per node at a time. Therefore, we must time slice
|
||||||
|
* the profiling across all SPUs in the node. Note, we do this
|
||||||
|
* in parallel for each node. The following routine is called
|
||||||
|
* periodically based on kernel timer to switch which SPU is
|
||||||
|
* being monitored in a round robbin fashion.
|
||||||
|
*/
|
||||||
|
static void spu_evnt_swap(unsigned long data)
|
||||||
|
{
|
||||||
|
int node;
|
||||||
|
int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
|
||||||
|
unsigned long flags;
|
||||||
|
int cpu;
|
||||||
|
int ret;
|
||||||
|
u32 interrupt_mask;
|
||||||
|
|
||||||
|
|
||||||
|
/* enable interrupts on cntr 0 */
|
||||||
|
interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
|
||||||
|
|
||||||
|
hdw_thread = 0;
|
||||||
|
|
||||||
|
/* Make sure spu event interrupt handler and spu event swap
|
||||||
|
* don't access the counters simultaneously.
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(&cntr_lock, flags);
|
||||||
|
|
||||||
|
cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
|
||||||
|
|
||||||
|
if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
|
||||||
|
spu_evnt_phys_spu_indx = 0;
|
||||||
|
|
||||||
|
pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
|
||||||
|
pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
|
||||||
|
pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
|
||||||
|
|
||||||
|
/* switch the SPU being profiled on each node */
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
if (cbe_get_hw_thread_id(cpu))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
node = cbe_cpu_to_node(cpu);
|
||||||
|
cur_phys_spu = (node * NUM_SPUS_PER_NODE)
|
||||||
|
+ cur_spu_evnt_phys_spu_indx;
|
||||||
|
nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
|
||||||
|
+ spu_evnt_phys_spu_indx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* stop counters, save counter values, restore counts
|
||||||
|
* for previous physical SPU
|
||||||
|
*/
|
||||||
|
cbe_disable_pm(cpu);
|
||||||
|
cbe_disable_pm_interrupts(cpu);
|
||||||
|
|
||||||
|
spu_pm_cnt[cur_phys_spu]
|
||||||
|
= cbe_read_ctr(cpu, 0);
|
||||||
|
|
||||||
|
/* restore previous count for the next spu to sample */
|
||||||
|
/* NOTE, hardware issue, counter will not start if the
|
||||||
|
* counter value is at max (0xFFFFFFFF).
|
||||||
|
*/
|
||||||
|
if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
|
||||||
|
cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
|
||||||
|
else
|
||||||
|
cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
|
||||||
|
|
||||||
|
pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
|
||||||
|
|
||||||
|
/* setup the debug bus measure the one event and
|
||||||
|
* the two events to route the next SPU's PC on
|
||||||
|
* the debug bus
|
||||||
|
*/
|
||||||
|
ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
|
||||||
|
if (ret)
|
||||||
|
printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
|
||||||
|
"SPU event swap\n", __func__);
|
||||||
|
|
||||||
|
/* clear the trace buffer, don't want to take PC for
|
||||||
|
* previous SPU*/
|
||||||
|
cbe_write_pm(cpu, trace_address, 0);
|
||||||
|
|
||||||
|
enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
|
||||||
|
|
||||||
|
/* Enable interrupts on the CPU thread that is starting */
|
||||||
|
cbe_enable_pm_interrupts(cpu, hdw_thread,
|
||||||
|
interrupt_mask);
|
||||||
|
cbe_enable_pm(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&cntr_lock, flags);
|
||||||
|
|
||||||
|
/* swap approximately every 0.1 seconds */
|
||||||
|
mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void start_spu_event_swap(void)
|
||||||
|
{
|
||||||
|
init_timer(&timer_spu_event_swap);
|
||||||
|
timer_spu_event_swap.function = spu_evnt_swap;
|
||||||
|
timer_spu_event_swap.data = 0UL;
|
||||||
|
timer_spu_event_swap.expires = jiffies + HZ / 25;
|
||||||
|
add_timer(&timer_spu_event_swap);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
|
||||||
|
struct op_system_config *sys, int num_ctrs)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* routine is called once for all nodes */
|
||||||
|
|
||||||
|
spu_evnt_phys_spu_indx = 0;
|
||||||
|
/*
|
||||||
|
* For all events except PPU CYCLEs, each node will need to make
|
||||||
* the rtas cbe-perftools call to setup and reset the debug bus.
|
* the rtas cbe-perftools call to setup and reset the debug bus.
|
||||||
* Make the token lookup call once and store it in the global
|
* Make the token lookup call once and store it in the global
|
||||||
* variable pm_rtas_token.
|
* variable pm_rtas_token.
|
||||||
*/
|
*/
|
||||||
|
pm_rtas_token = rtas_token("ibm,cbe-perftools");
|
||||||
|
|
||||||
if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
|
if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"%s: rtas token ibm,cbe-perftools unknown\n",
|
"%s: rtas token ibm,cbe-perftools unknown\n",
|
||||||
|
@ -580,6 +711,58 @@ static int cell_reg_setup(struct op_counter_config *ctr,
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* setup the pm_control register settings,
|
||||||
|
* settings will be written per node by the
|
||||||
|
* cell_cpu_setup() function.
|
||||||
|
*/
|
||||||
|
pm_regs.pm_cntrl.trace_buf_ovflw = 1;
|
||||||
|
|
||||||
|
/* Use the occurrence trace mode to have SPU PC saved
|
||||||
|
* to the trace buffer. Occurrence data in trace buffer
|
||||||
|
* is not used. Bit 2 must be set to store SPU addresses.
|
||||||
|
*/
|
||||||
|
pm_regs.pm_cntrl.trace_mode = 2;
|
||||||
|
|
||||||
|
pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus
|
||||||
|
event 2 & 3 */
|
||||||
|
|
||||||
|
/* setup the debug bus event array with the SPU PC routing events.
|
||||||
|
* Note, pm_signal[0] will be filled in by set_pm_event() call below.
|
||||||
|
*/
|
||||||
|
pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
|
||||||
|
pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
|
||||||
|
pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
|
||||||
|
pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
|
||||||
|
|
||||||
|
pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
|
||||||
|
pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
|
||||||
|
pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
|
||||||
|
pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
|
||||||
|
|
||||||
|
/* Set the user selected spu event to profile on,
|
||||||
|
* note, only one SPU profiling event is supported
|
||||||
|
*/
|
||||||
|
num_counters = 1; /* Only support one SPU event at a time */
|
||||||
|
set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
|
||||||
|
|
||||||
|
reset_value[0] = 0xFFFFFFFF - ctr[0].count;
|
||||||
|
|
||||||
|
/* global, used by cell_cpu_setup */
|
||||||
|
ctr_enabled |= 1;
|
||||||
|
|
||||||
|
/* Initialize the count for each SPU to the reset value */
|
||||||
|
for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
|
||||||
|
spu_pm_cnt[i] = reset_value[0];
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cell_reg_setup_ppu(struct op_counter_config *ctr,
|
||||||
|
struct op_system_config *sys, int num_ctrs)
|
||||||
|
{
|
||||||
|
/* routine is called once for all nodes */
|
||||||
|
int i, j, cpu;
|
||||||
|
|
||||||
num_counters = num_ctrs;
|
num_counters = num_ctrs;
|
||||||
|
|
||||||
if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
|
if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
|
||||||
|
@ -589,14 +772,6 @@ static int cell_reg_setup(struct op_counter_config *ctr,
|
||||||
__func__);
|
__func__);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
pm_regs.group_control = 0;
|
|
||||||
pm_regs.debug_bus_control = 0;
|
|
||||||
|
|
||||||
/* setup the pm_control register */
|
|
||||||
memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
|
|
||||||
pm_regs.pm_cntrl.stop_at_max = 1;
|
|
||||||
pm_regs.pm_cntrl.trace_mode = 0;
|
|
||||||
pm_regs.pm_cntrl.freeze = 1;
|
|
||||||
|
|
||||||
set_count_mode(sys->enable_kernel, sys->enable_user);
|
set_count_mode(sys->enable_kernel, sys->enable_user);
|
||||||
|
|
||||||
|
@ -665,6 +840,63 @@ static int cell_reg_setup(struct op_counter_config *ctr,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* This function is called once for all cpus combined */
|
||||||
|
static int cell_reg_setup(struct op_counter_config *ctr,
|
||||||
|
struct op_system_config *sys, int num_ctrs)
|
||||||
|
{
|
||||||
|
int ret=0;
|
||||||
|
spu_cycle_reset = 0;
|
||||||
|
|
||||||
|
/* initialize the spu_arr_trace value, will be reset if
|
||||||
|
* doing spu event profiling.
|
||||||
|
*/
|
||||||
|
pm_regs.group_control = 0;
|
||||||
|
pm_regs.debug_bus_control = 0;
|
||||||
|
pm_regs.pm_cntrl.stop_at_max = 1;
|
||||||
|
pm_regs.pm_cntrl.trace_mode = 0;
|
||||||
|
pm_regs.pm_cntrl.freeze = 1;
|
||||||
|
pm_regs.pm_cntrl.trace_buf_ovflw = 0;
|
||||||
|
pm_regs.pm_cntrl.spu_addr_trace = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For all events except PPU CYCLEs, each node will need to make
|
||||||
|
* the rtas cbe-perftools call to setup and reset the debug bus.
|
||||||
|
* Make the token lookup call once and store it in the global
|
||||||
|
* variable pm_rtas_token.
|
||||||
|
*/
|
||||||
|
pm_rtas_token = rtas_token("ibm,cbe-perftools");
|
||||||
|
|
||||||
|
if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
|
||||||
|
printk(KERN_ERR
|
||||||
|
"%s: rtas token ibm,cbe-perftools unknown\n",
|
||||||
|
__func__);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
|
||||||
|
profiling_mode = SPU_PROFILING_CYCLES;
|
||||||
|
ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
|
||||||
|
} else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
|
||||||
|
(ctr[0].event <= SPU_EVENT_NUM_STOP)) {
|
||||||
|
profiling_mode = SPU_PROFILING_EVENTS;
|
||||||
|
spu_cycle_reset = ctr[0].count;
|
||||||
|
|
||||||
|
/* for SPU event profiling, need to setup the
|
||||||
|
* pm_signal array with the events to route the
|
||||||
|
* SPU PC before making the FW call. Note, only
|
||||||
|
* one SPU event for profiling can be specified
|
||||||
|
* at a time.
|
||||||
|
*/
|
||||||
|
cell_reg_setup_spu_events(ctr, sys, num_ctrs);
|
||||||
|
} else {
|
||||||
|
profiling_mode = PPU_PROFILING;
|
||||||
|
ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* This function is called once for each cpu */
|
/* This function is called once for each cpu */
|
||||||
static int cell_cpu_setup(struct op_counter_config *cntr)
|
static int cell_cpu_setup(struct op_counter_config *cntr)
|
||||||
|
@ -672,8 +904,13 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
|
||||||
u32 cpu = smp_processor_id();
|
u32 cpu = smp_processor_id();
|
||||||
u32 num_enabled = 0;
|
u32 num_enabled = 0;
|
||||||
int i;
|
int i;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (spu_cycle_reset)
|
/* Cycle based SPU profiling does not use the performance
|
||||||
|
* counters. The trace array is configured to collect
|
||||||
|
* the data.
|
||||||
|
*/
|
||||||
|
if (profiling_mode == SPU_PROFILING_CYCLES)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* There is one performance monitor per processor chip (i.e. node),
|
/* There is one performance monitor per processor chip (i.e. node),
|
||||||
|
@ -686,7 +923,6 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
|
||||||
cbe_disable_pm(cpu);
|
cbe_disable_pm(cpu);
|
||||||
cbe_disable_pm_interrupts(cpu);
|
cbe_disable_pm_interrupts(cpu);
|
||||||
|
|
||||||
cbe_write_pm(cpu, pm_interval, 0);
|
|
||||||
cbe_write_pm(cpu, pm_start_stop, 0);
|
cbe_write_pm(cpu, pm_start_stop, 0);
|
||||||
cbe_write_pm(cpu, group_control, pm_regs.group_control);
|
cbe_write_pm(cpu, group_control, pm_regs.group_control);
|
||||||
cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
|
cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
|
||||||
|
@ -703,7 +939,20 @@ static int cell_cpu_setup(struct op_counter_config *cntr)
|
||||||
* The pm_rtas_activate_signals will return -EIO if the FW
|
* The pm_rtas_activate_signals will return -EIO if the FW
|
||||||
* call failed.
|
* call failed.
|
||||||
*/
|
*/
|
||||||
return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
|
if (profiling_mode == SPU_PROFILING_EVENTS) {
|
||||||
|
/* For SPU event profiling also need to setup the
|
||||||
|
* pm interval timer
|
||||||
|
*/
|
||||||
|
ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
|
||||||
|
num_enabled+2);
|
||||||
|
/* store PC from debug bus to Trace buffer as often
|
||||||
|
* as possible (every 10 cycles)
|
||||||
|
*/
|
||||||
|
cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
|
||||||
|
return ret;
|
||||||
|
} else
|
||||||
|
return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
|
||||||
|
num_enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ENTRIES 303
|
#define ENTRIES 303
|
||||||
|
@ -885,7 +1134,122 @@ static struct notifier_block cpu_freq_notifier_block = {
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int cell_global_start_spu(struct op_counter_config *ctr)
|
/*
|
||||||
|
* Note the generic OProfile stop calls do not support returning
|
||||||
|
* an error on stop. Hence, will not return an error if the FW
|
||||||
|
* calls fail on stop. Failure to reset the debug bus is not an issue.
|
||||||
|
* Failure to disable the SPU profiling is not an issue. The FW calls
|
||||||
|
* to enable the performance counters and debug bus will work even if
|
||||||
|
* the hardware was not cleanly reset.
|
||||||
|
*/
|
||||||
|
static void cell_global_stop_spu_cycles(void)
|
||||||
|
{
|
||||||
|
int subfunc, rtn_value;
|
||||||
|
unsigned int lfsr_value;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
oprofile_running = 0;
|
||||||
|
smp_wmb();
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_FREQ
|
||||||
|
cpufreq_unregister_notifier(&cpu_freq_notifier_block,
|
||||||
|
CPUFREQ_TRANSITION_NOTIFIER);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
if (cbe_get_hw_thread_id(cpu))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
subfunc = 3; /*
|
||||||
|
* 2 - activate SPU tracing,
|
||||||
|
* 3 - deactivate
|
||||||
|
*/
|
||||||
|
lfsr_value = 0x8f100000;
|
||||||
|
|
||||||
|
rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
|
||||||
|
subfunc, cbe_cpu_to_node(cpu),
|
||||||
|
lfsr_value);
|
||||||
|
|
||||||
|
if (unlikely(rtn_value != 0)) {
|
||||||
|
printk(KERN_ERR
|
||||||
|
"%s: rtas call ibm,cbe-spu-perftools " \
|
||||||
|
"failed, return = %d\n",
|
||||||
|
__func__, rtn_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Deactivate the signals */
|
||||||
|
pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
|
||||||
|
}
|
||||||
|
|
||||||
|
stop_spu_profiling_cycles();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cell_global_stop_spu_events(void)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
oprofile_running = 0;
|
||||||
|
|
||||||
|
stop_spu_profiling_events();
|
||||||
|
smp_wmb();
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
if (cbe_get_hw_thread_id(cpu))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
cbe_sync_irq(cbe_cpu_to_node(cpu));
|
||||||
|
/* Stop the counters */
|
||||||
|
cbe_disable_pm(cpu);
|
||||||
|
cbe_write_pm07_control(cpu, 0, 0);
|
||||||
|
|
||||||
|
/* Deactivate the signals */
|
||||||
|
pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
|
||||||
|
|
||||||
|
/* Deactivate interrupts */
|
||||||
|
cbe_disable_pm_interrupts(cpu);
|
||||||
|
}
|
||||||
|
del_timer_sync(&timer_spu_event_swap);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cell_global_stop_ppu(void)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This routine will be called once for the system.
|
||||||
|
* There is one performance monitor per node, so we
|
||||||
|
* only need to perform this function once per node.
|
||||||
|
*/
|
||||||
|
del_timer_sync(&timer_virt_cntr);
|
||||||
|
oprofile_running = 0;
|
||||||
|
smp_wmb();
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
if (cbe_get_hw_thread_id(cpu))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
cbe_sync_irq(cbe_cpu_to_node(cpu));
|
||||||
|
/* Stop the counters */
|
||||||
|
cbe_disable_pm(cpu);
|
||||||
|
|
||||||
|
/* Deactivate the signals */
|
||||||
|
pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
|
||||||
|
|
||||||
|
/* Deactivate interrupts */
|
||||||
|
cbe_disable_pm_interrupts(cpu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cell_global_stop(void)
|
||||||
|
{
|
||||||
|
if (profiling_mode == PPU_PROFILING)
|
||||||
|
cell_global_stop_ppu();
|
||||||
|
else if (profiling_mode == SPU_PROFILING_EVENTS)
|
||||||
|
cell_global_stop_spu_events();
|
||||||
|
else
|
||||||
|
cell_global_stop_spu_cycles();
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
|
||||||
{
|
{
|
||||||
int subfunc;
|
int subfunc;
|
||||||
unsigned int lfsr_value;
|
unsigned int lfsr_value;
|
||||||
|
@ -951,18 +1315,18 @@ static int cell_global_start_spu(struct op_counter_config *ctr)
|
||||||
|
|
||||||
/* start profiling */
|
/* start profiling */
|
||||||
ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
|
ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
|
||||||
cbe_cpu_to_node(cpu), lfsr_value);
|
cbe_cpu_to_node(cpu), lfsr_value);
|
||||||
|
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
|
"%s: rtas call ibm,cbe-spu-perftools failed, " \
|
||||||
__func__, ret);
|
"return = %d\n", __func__, ret);
|
||||||
rtas_error = -EIO;
|
rtas_error = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rtas_error = start_spu_profiling(spu_cycle_reset);
|
rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
|
||||||
if (rtas_error)
|
if (rtas_error)
|
||||||
goto out_stop;
|
goto out_stop;
|
||||||
|
|
||||||
|
@ -970,11 +1334,74 @@ static int cell_global_start_spu(struct op_counter_config *ctr)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_stop:
|
out_stop:
|
||||||
cell_global_stop_spu(); /* clean up the PMU/debug bus */
|
cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */
|
||||||
out:
|
out:
|
||||||
return rtas_error;
|
return rtas_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int cell_global_start_spu_events(struct op_counter_config *ctr)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
u32 interrupt_mask = 0;
|
||||||
|
int rtn = 0;
|
||||||
|
|
||||||
|
hdw_thread = 0;
|
||||||
|
|
||||||
|
/* spu event profiling, uses the performance counters to generate
|
||||||
|
* an interrupt. The hardware is setup to store the SPU program
|
||||||
|
* counter into the trace array. The occurrence mode is used to
|
||||||
|
* enable storing data to the trace buffer. The bits are set
|
||||||
|
* to send/store the SPU address in the trace buffer. The debug
|
||||||
|
* bus must be setup to route the SPU program counter onto the
|
||||||
|
* debug bus. The occurrence data in the trace buffer is not used.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* This routine gets called once for the system.
|
||||||
|
* There is one performance monitor per node, so we
|
||||||
|
* only need to perform this function once per node.
|
||||||
|
*/
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
if (cbe_get_hw_thread_id(cpu))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Setup SPU event-based profiling.
|
||||||
|
* Set perf_mon_control bit 0 to a zero before
|
||||||
|
* enabling spu collection hardware.
|
||||||
|
*
|
||||||
|
* Only support one SPU event on one SPU per node.
|
||||||
|
*/
|
||||||
|
if (ctr_enabled & 1) {
|
||||||
|
cbe_write_ctr(cpu, 0, reset_value[0]);
|
||||||
|
enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
|
||||||
|
interrupt_mask |=
|
||||||
|
CBE_PM_CTR_OVERFLOW_INTR(0);
|
||||||
|
} else {
|
||||||
|
/* Disable counter */
|
||||||
|
cbe_write_pm07_control(cpu, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
cbe_get_and_clear_pm_interrupts(cpu);
|
||||||
|
cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
|
||||||
|
cbe_enable_pm(cpu);
|
||||||
|
|
||||||
|
/* clear the trace buffer */
|
||||||
|
cbe_write_pm(cpu, trace_address, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Start the timer to time slice collecting the event profile
|
||||||
|
* on each of the SPUs. Note, can collect profile on one SPU
|
||||||
|
* per node at a time.
|
||||||
|
*/
|
||||||
|
start_spu_event_swap();
|
||||||
|
start_spu_profiling_events();
|
||||||
|
oprofile_running = 1;
|
||||||
|
smp_wmb();
|
||||||
|
|
||||||
|
return rtn;
|
||||||
|
}
|
||||||
|
|
||||||
static int cell_global_start_ppu(struct op_counter_config *ctr)
|
static int cell_global_start_ppu(struct op_counter_config *ctr)
|
||||||
{
|
{
|
||||||
u32 cpu, i;
|
u32 cpu, i;
|
||||||
|
@ -994,8 +1421,7 @@ static int cell_global_start_ppu(struct op_counter_config *ctr)
|
||||||
if (ctr_enabled & (1 << i)) {
|
if (ctr_enabled & (1 << i)) {
|
||||||
cbe_write_ctr(cpu, i, reset_value[i]);
|
cbe_write_ctr(cpu, i, reset_value[i]);
|
||||||
enable_ctr(cpu, i, pm_regs.pm07_cntrl);
|
enable_ctr(cpu, i, pm_regs.pm07_cntrl);
|
||||||
interrupt_mask |=
|
interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
|
||||||
CBE_PM_CTR_OVERFLOW_INTR(i);
|
|
||||||
} else {
|
} else {
|
||||||
/* Disable counter */
|
/* Disable counter */
|
||||||
cbe_write_pm07_control(cpu, i, 0);
|
cbe_write_pm07_control(cpu, i, 0);
|
||||||
|
@ -1024,99 +1450,162 @@ static int cell_global_start_ppu(struct op_counter_config *ctr)
|
||||||
|
|
||||||
static int cell_global_start(struct op_counter_config *ctr)
|
static int cell_global_start(struct op_counter_config *ctr)
|
||||||
{
|
{
|
||||||
if (spu_cycle_reset)
|
if (profiling_mode == SPU_PROFILING_CYCLES)
|
||||||
return cell_global_start_spu(ctr);
|
return cell_global_start_spu_cycles(ctr);
|
||||||
|
else if (profiling_mode == SPU_PROFILING_EVENTS)
|
||||||
|
return cell_global_start_spu_events(ctr);
|
||||||
else
|
else
|
||||||
return cell_global_start_ppu(ctr);
|
return cell_global_start_ppu(ctr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Note the generic OProfile stop calls do not support returning
|
/* The SPU interrupt handler
|
||||||
* an error on stop. Hence, will not return an error if the FW
|
*
|
||||||
* calls fail on stop. Failure to reset the debug bus is not an issue.
|
* SPU event profiling works as follows:
|
||||||
* Failure to disable the SPU profiling is not an issue. The FW calls
|
* The pm_signal[0] holds the one SPU event to be measured. It is routed on
|
||||||
* to enable the performance counters and debug bus will work even if
|
* the debug bus using word 0 or 1. The value of pm_signal[1] and
|
||||||
* the hardware was not cleanly reset.
|
* pm_signal[2] contain the necessary events to route the SPU program
|
||||||
|
* counter for the selected SPU onto the debug bus using words 2 and 3.
|
||||||
|
* The pm_interval register is setup to write the SPU PC value into the
|
||||||
|
* trace buffer at the maximum rate possible. The trace buffer is configured
|
||||||
|
* to store the PCs, wrapping when it is full. The performance counter is
|
||||||
|
* intialized to the max hardware count minus the number of events, N, between
|
||||||
|
* samples. Once the N events have occured, a HW counter overflow occurs
|
||||||
|
* causing the generation of a HW counter interrupt which also stops the
|
||||||
|
* writing of the SPU PC values to the trace buffer. Hence the last PC
|
||||||
|
* written to the trace buffer is the SPU PC that we want. Unfortunately,
|
||||||
|
* we have to read from the beginning of the trace buffer to get to the
|
||||||
|
* last value written. We just hope the PPU has nothing better to do then
|
||||||
|
* service this interrupt. The PC for the specific SPU being profiled is
|
||||||
|
* extracted from the trace buffer processed and stored. The trace buffer
|
||||||
|
* is cleared, interrupts are cleared, the counter is reset to max - N.
|
||||||
|
* A kernel timer is used to periodically call the routine spu_evnt_swap()
|
||||||
|
* to switch to the next physical SPU in the node to profile in round robbin
|
||||||
|
* order. This way data is collected for all SPUs on the node. It does mean
|
||||||
|
* that we need to use a relatively small value of N to ensure enough samples
|
||||||
|
* on each SPU are collected each SPU is being profiled 1/8 of the time.
|
||||||
|
* It may also be necessary to use a longer sample collection period.
|
||||||
*/
|
*/
|
||||||
static void cell_global_stop_spu(void)
|
static void cell_handle_interrupt_spu(struct pt_regs *regs,
|
||||||
|
struct op_counter_config *ctr)
|
||||||
{
|
{
|
||||||
int subfunc, rtn_value;
|
u32 cpu, cpu_tmp;
|
||||||
unsigned int lfsr_value;
|
u64 trace_entry;
|
||||||
int cpu;
|
u32 interrupt_mask;
|
||||||
|
u64 trace_buffer[2];
|
||||||
|
u64 last_trace_buffer;
|
||||||
|
u32 sample;
|
||||||
|
u32 trace_addr;
|
||||||
|
unsigned long sample_array_lock_flags;
|
||||||
|
int spu_num;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
oprofile_running = 0;
|
/* Make sure spu event interrupt handler and spu event swap
|
||||||
|
* don't access the counters simultaneously.
|
||||||
|
*/
|
||||||
|
cpu = smp_processor_id();
|
||||||
|
spin_lock_irqsave(&cntr_lock, flags);
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_FREQ
|
cpu_tmp = cpu;
|
||||||
cpufreq_unregister_notifier(&cpu_freq_notifier_block,
|
cbe_disable_pm(cpu);
|
||||||
CPUFREQ_TRANSITION_NOTIFIER);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
|
||||||
if (cbe_get_hw_thread_id(cpu))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
subfunc = 3; /*
|
sample = 0xABCDEF;
|
||||||
* 2 - activate SPU tracing,
|
trace_entry = 0xfedcba;
|
||||||
* 3 - deactivate
|
last_trace_buffer = 0xdeadbeaf;
|
||||||
*/
|
|
||||||
lfsr_value = 0x8f100000;
|
|
||||||
|
|
||||||
rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
|
if ((oprofile_running == 1) && (interrupt_mask != 0)) {
|
||||||
subfunc, cbe_cpu_to_node(cpu),
|
/* disable writes to trace buff */
|
||||||
lfsr_value);
|
cbe_write_pm(cpu, pm_interval, 0);
|
||||||
|
|
||||||
if (unlikely(rtn_value != 0)) {
|
/* only have one perf cntr being used, cntr 0 */
|
||||||
printk(KERN_ERR
|
if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
|
||||||
"%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
|
&& ctr[0].enabled)
|
||||||
__func__, rtn_value);
|
/* The SPU PC values will be read
|
||||||
|
* from the trace buffer, reset counter
|
||||||
|
*/
|
||||||
|
|
||||||
|
cbe_write_ctr(cpu, 0, reset_value[0]);
|
||||||
|
|
||||||
|
trace_addr = cbe_read_pm(cpu, trace_address);
|
||||||
|
|
||||||
|
while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
|
||||||
|
/* There is data in the trace buffer to process
|
||||||
|
* Read the buffer until you get to the last
|
||||||
|
* entry. This is the value we want.
|
||||||
|
*/
|
||||||
|
|
||||||
|
cbe_read_trace_buffer(cpu, trace_buffer);
|
||||||
|
trace_addr = cbe_read_pm(cpu, trace_address);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Deactivate the signals */
|
/* SPU Address 16 bit count format for 128 bit
|
||||||
pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
|
* HW trace buffer is used for the SPU PC storage
|
||||||
|
* HDR bits 0:15
|
||||||
|
* SPU Addr 0 bits 16:31
|
||||||
|
* SPU Addr 1 bits 32:47
|
||||||
|
* unused bits 48:127
|
||||||
|
*
|
||||||
|
* HDR: bit4 = 1 SPU Address 0 valid
|
||||||
|
* HDR: bit5 = 1 SPU Address 1 valid
|
||||||
|
* - unfortunately, the valid bits don't seem to work
|
||||||
|
*
|
||||||
|
* Note trace_buffer[0] holds bits 0:63 of the HW
|
||||||
|
* trace buffer, trace_buffer[1] holds bits 64:127
|
||||||
|
*/
|
||||||
|
|
||||||
|
trace_entry = trace_buffer[0]
|
||||||
|
& 0x00000000FFFF0000;
|
||||||
|
|
||||||
|
/* only top 16 of the 18 bit SPU PC address
|
||||||
|
* is stored in trace buffer, hence shift right
|
||||||
|
* by 16 -2 bits */
|
||||||
|
sample = trace_entry >> 14;
|
||||||
|
last_trace_buffer = trace_buffer[0];
|
||||||
|
|
||||||
|
spu_num = spu_evnt_phys_spu_indx
|
||||||
|
+ (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
|
||||||
|
|
||||||
|
/* make sure only one process at a time is calling
|
||||||
|
* spu_sync_buffer()
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
|
||||||
|
sample_array_lock_flags);
|
||||||
|
spu_sync_buffer(spu_num, &sample, 1);
|
||||||
|
spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
|
||||||
|
sample_array_lock_flags);
|
||||||
|
|
||||||
|
smp_wmb(); /* insure spu event buffer updates are written
|
||||||
|
* don't want events intermingled... */
|
||||||
|
|
||||||
|
/* The counters were frozen by the interrupt.
|
||||||
|
* Reenable the interrupt and restart the counters.
|
||||||
|
*/
|
||||||
|
cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
|
||||||
|
cbe_enable_pm_interrupts(cpu, hdw_thread,
|
||||||
|
virt_cntr_inter_mask);
|
||||||
|
|
||||||
|
/* clear the trace buffer, re-enable writes to trace buff */
|
||||||
|
cbe_write_pm(cpu, trace_address, 0);
|
||||||
|
cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
|
||||||
|
|
||||||
|
/* The writes to the various performance counters only writes
|
||||||
|
* to a latch. The new values (interrupt setting bits, reset
|
||||||
|
* counter value etc.) are not copied to the actual registers
|
||||||
|
* until the performance monitor is enabled. In order to get
|
||||||
|
* this to work as desired, the permormance monitor needs to
|
||||||
|
* be disabled while writing to the latches. This is a
|
||||||
|
* HW design issue.
|
||||||
|
*/
|
||||||
|
write_pm_cntrl(cpu);
|
||||||
|
cbe_enable_pm(cpu);
|
||||||
}
|
}
|
||||||
|
spin_unlock_irqrestore(&cntr_lock, flags);
|
||||||
stop_spu_profiling();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cell_global_stop_ppu(void)
|
static void cell_handle_interrupt_ppu(struct pt_regs *regs,
|
||||||
{
|
struct op_counter_config *ctr)
|
||||||
int cpu;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This routine will be called once for the system.
|
|
||||||
* There is one performance monitor per node, so we
|
|
||||||
* only need to perform this function once per node.
|
|
||||||
*/
|
|
||||||
del_timer_sync(&timer_virt_cntr);
|
|
||||||
oprofile_running = 0;
|
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
if (cbe_get_hw_thread_id(cpu))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
cbe_sync_irq(cbe_cpu_to_node(cpu));
|
|
||||||
/* Stop the counters */
|
|
||||||
cbe_disable_pm(cpu);
|
|
||||||
|
|
||||||
/* Deactivate the signals */
|
|
||||||
pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
|
|
||||||
|
|
||||||
/* Deactivate interrupts */
|
|
||||||
cbe_disable_pm_interrupts(cpu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cell_global_stop(void)
|
|
||||||
{
|
|
||||||
if (spu_cycle_reset)
|
|
||||||
cell_global_stop_spu();
|
|
||||||
else
|
|
||||||
cell_global_stop_ppu();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cell_handle_interrupt(struct pt_regs *regs,
|
|
||||||
struct op_counter_config *ctr)
|
|
||||||
{
|
{
|
||||||
u32 cpu;
|
u32 cpu;
|
||||||
u64 pc;
|
u64 pc;
|
||||||
|
@ -1132,7 +1621,7 @@ static void cell_handle_interrupt(struct pt_regs *regs,
|
||||||
* routine are not running at the same time. See the
|
* routine are not running at the same time. See the
|
||||||
* cell_virtual_cntr() routine for additional comments.
|
* cell_virtual_cntr() routine for additional comments.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&virt_cntr_lock, flags);
|
spin_lock_irqsave(&cntr_lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Need to disable and reenable the performance counters
|
* Need to disable and reenable the performance counters
|
||||||
|
@ -1185,7 +1674,16 @@ static void cell_handle_interrupt(struct pt_regs *regs,
|
||||||
*/
|
*/
|
||||||
cbe_enable_pm(cpu);
|
cbe_enable_pm(cpu);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&virt_cntr_lock, flags);
|
spin_unlock_irqrestore(&cntr_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cell_handle_interrupt(struct pt_regs *regs,
|
||||||
|
struct op_counter_config *ctr)
|
||||||
|
{
|
||||||
|
if (profiling_mode == PPU_PROFILING)
|
||||||
|
cell_handle_interrupt_ppu(regs, ctr);
|
||||||
|
else
|
||||||
|
cell_handle_interrupt_spu(regs, ctr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1195,7 +1693,8 @@ static void cell_handle_interrupt(struct pt_regs *regs,
|
||||||
*/
|
*/
|
||||||
static int cell_sync_start(void)
|
static int cell_sync_start(void)
|
||||||
{
|
{
|
||||||
if (spu_cycle_reset)
|
if ((profiling_mode == SPU_PROFILING_CYCLES) ||
|
||||||
|
(profiling_mode == SPU_PROFILING_EVENTS))
|
||||||
return spu_sync_start();
|
return spu_sync_start();
|
||||||
else
|
else
|
||||||
return DO_GENERIC_SYNC;
|
return DO_GENERIC_SYNC;
|
||||||
|
@ -1203,7 +1702,8 @@ static int cell_sync_start(void)
|
||||||
|
|
||||||
static int cell_sync_stop(void)
|
static int cell_sync_stop(void)
|
||||||
{
|
{
|
||||||
if (spu_cycle_reset)
|
if ((profiling_mode == SPU_PROFILING_CYCLES) ||
|
||||||
|
(profiling_mode == SPU_PROFILING_EVENTS))
|
||||||
return spu_sync_stop();
|
return spu_sync_stop();
|
||||||
else
|
else
|
||||||
return 1;
|
return 1;
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
* @file op_model_amd.c
|
* @file op_model_amd.c
|
||||||
* athlon / K7 / K8 / Family 10h model-specific MSR operations
|
* athlon / K7 / K8 / Family 10h model-specific MSR operations
|
||||||
*
|
*
|
||||||
* @remark Copyright 2002-2008 OProfile authors
|
* @remark Copyright 2002-2009 OProfile authors
|
||||||
* @remark Read the file COPYING
|
* @remark Read the file COPYING
|
||||||
*
|
*
|
||||||
* @author John Levon
|
* @author John Levon
|
||||||
|
@ -10,7 +10,7 @@
|
||||||
* @author Graydon Hoare
|
* @author Graydon Hoare
|
||||||
* @author Robert Richter <robert.richter@amd.com>
|
* @author Robert Richter <robert.richter@amd.com>
|
||||||
* @author Barry Kasindorf
|
* @author Barry Kasindorf
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/oprofile.h>
|
#include <linux/oprofile.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
|
@ -60,53 +60,10 @@ static unsigned long reset_value[NUM_COUNTERS];
|
||||||
#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
|
#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
|
||||||
#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
|
#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
|
||||||
|
|
||||||
/* Codes used in cpu_buffer.c */
|
#define IBS_FETCH_SIZE 6
|
||||||
/* This produces duplicate code, need to be fixed */
|
#define IBS_OP_SIZE 12
|
||||||
#define IBS_FETCH_BEGIN 3
|
|
||||||
#define IBS_OP_BEGIN 4
|
|
||||||
|
|
||||||
/*
|
static int has_ibs; /* AMD Family10h and later */
|
||||||
* The function interface needs to be fixed, something like add
|
|
||||||
* data. Should then be added to linux/oprofile.h.
|
|
||||||
*/
|
|
||||||
extern void
|
|
||||||
oprofile_add_ibs_sample(struct pt_regs * const regs,
|
|
||||||
unsigned int * const ibs_sample, int ibs_code);
|
|
||||||
|
|
||||||
struct ibs_fetch_sample {
|
|
||||||
/* MSRC001_1031 IBS Fetch Linear Address Register */
|
|
||||||
unsigned int ibs_fetch_lin_addr_low;
|
|
||||||
unsigned int ibs_fetch_lin_addr_high;
|
|
||||||
/* MSRC001_1030 IBS Fetch Control Register */
|
|
||||||
unsigned int ibs_fetch_ctl_low;
|
|
||||||
unsigned int ibs_fetch_ctl_high;
|
|
||||||
/* MSRC001_1032 IBS Fetch Physical Address Register */
|
|
||||||
unsigned int ibs_fetch_phys_addr_low;
|
|
||||||
unsigned int ibs_fetch_phys_addr_high;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ibs_op_sample {
|
|
||||||
/* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */
|
|
||||||
unsigned int ibs_op_rip_low;
|
|
||||||
unsigned int ibs_op_rip_high;
|
|
||||||
/* MSRC001_1035 IBS Op Data Register */
|
|
||||||
unsigned int ibs_op_data1_low;
|
|
||||||
unsigned int ibs_op_data1_high;
|
|
||||||
/* MSRC001_1036 IBS Op Data 2 Register */
|
|
||||||
unsigned int ibs_op_data2_low;
|
|
||||||
unsigned int ibs_op_data2_high;
|
|
||||||
/* MSRC001_1037 IBS Op Data 3 Register */
|
|
||||||
unsigned int ibs_op_data3_low;
|
|
||||||
unsigned int ibs_op_data3_high;
|
|
||||||
/* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */
|
|
||||||
unsigned int ibs_dc_linear_low;
|
|
||||||
unsigned int ibs_dc_linear_high;
|
|
||||||
/* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */
|
|
||||||
unsigned int ibs_dc_phys_low;
|
|
||||||
unsigned int ibs_dc_phys_high;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int ibs_allowed; /* AMD Family10h and later */
|
|
||||||
|
|
||||||
struct op_ibs_config {
|
struct op_ibs_config {
|
||||||
unsigned long op_enabled;
|
unsigned long op_enabled;
|
||||||
|
@ -197,31 +154,29 @@ static inline int
|
||||||
op_amd_handle_ibs(struct pt_regs * const regs,
|
op_amd_handle_ibs(struct pt_regs * const regs,
|
||||||
struct op_msrs const * const msrs)
|
struct op_msrs const * const msrs)
|
||||||
{
|
{
|
||||||
unsigned int low, high;
|
u32 low, high;
|
||||||
struct ibs_fetch_sample ibs_fetch;
|
u64 msr;
|
||||||
struct ibs_op_sample ibs_op;
|
struct op_entry entry;
|
||||||
|
|
||||||
if (!ibs_allowed)
|
if (!has_ibs)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (ibs_config.fetch_enabled) {
|
if (ibs_config.fetch_enabled) {
|
||||||
rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
|
rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
|
||||||
if (high & IBS_FETCH_HIGH_VALID_BIT) {
|
if (high & IBS_FETCH_HIGH_VALID_BIT) {
|
||||||
ibs_fetch.ibs_fetch_ctl_high = high;
|
rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr);
|
||||||
ibs_fetch.ibs_fetch_ctl_low = low;
|
oprofile_write_reserve(&entry, regs, msr,
|
||||||
rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high);
|
IBS_FETCH_CODE, IBS_FETCH_SIZE);
|
||||||
ibs_fetch.ibs_fetch_lin_addr_high = high;
|
oprofile_add_data(&entry, (u32)msr);
|
||||||
ibs_fetch.ibs_fetch_lin_addr_low = low;
|
oprofile_add_data(&entry, (u32)(msr >> 32));
|
||||||
rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high);
|
oprofile_add_data(&entry, low);
|
||||||
ibs_fetch.ibs_fetch_phys_addr_high = high;
|
oprofile_add_data(&entry, high);
|
||||||
ibs_fetch.ibs_fetch_phys_addr_low = low;
|
rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr);
|
||||||
|
oprofile_add_data(&entry, (u32)msr);
|
||||||
oprofile_add_ibs_sample(regs,
|
oprofile_add_data(&entry, (u32)(msr >> 32));
|
||||||
(unsigned int *)&ibs_fetch,
|
oprofile_write_commit(&entry);
|
||||||
IBS_FETCH_BEGIN);
|
|
||||||
|
|
||||||
/* reenable the IRQ */
|
/* reenable the IRQ */
|
||||||
rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
|
|
||||||
high &= ~IBS_FETCH_HIGH_VALID_BIT;
|
high &= ~IBS_FETCH_HIGH_VALID_BIT;
|
||||||
high |= IBS_FETCH_HIGH_ENABLE;
|
high |= IBS_FETCH_HIGH_ENABLE;
|
||||||
low &= IBS_FETCH_LOW_MAX_CNT_MASK;
|
low &= IBS_FETCH_LOW_MAX_CNT_MASK;
|
||||||
|
@ -232,30 +187,29 @@ op_amd_handle_ibs(struct pt_regs * const regs,
|
||||||
if (ibs_config.op_enabled) {
|
if (ibs_config.op_enabled) {
|
||||||
rdmsr(MSR_AMD64_IBSOPCTL, low, high);
|
rdmsr(MSR_AMD64_IBSOPCTL, low, high);
|
||||||
if (low & IBS_OP_LOW_VALID_BIT) {
|
if (low & IBS_OP_LOW_VALID_BIT) {
|
||||||
rdmsr(MSR_AMD64_IBSOPRIP, low, high);
|
rdmsrl(MSR_AMD64_IBSOPRIP, msr);
|
||||||
ibs_op.ibs_op_rip_low = low;
|
oprofile_write_reserve(&entry, regs, msr,
|
||||||
ibs_op.ibs_op_rip_high = high;
|
IBS_OP_CODE, IBS_OP_SIZE);
|
||||||
rdmsr(MSR_AMD64_IBSOPDATA, low, high);
|
oprofile_add_data(&entry, (u32)msr);
|
||||||
ibs_op.ibs_op_data1_low = low;
|
oprofile_add_data(&entry, (u32)(msr >> 32));
|
||||||
ibs_op.ibs_op_data1_high = high;
|
rdmsrl(MSR_AMD64_IBSOPDATA, msr);
|
||||||
rdmsr(MSR_AMD64_IBSOPDATA2, low, high);
|
oprofile_add_data(&entry, (u32)msr);
|
||||||
ibs_op.ibs_op_data2_low = low;
|
oprofile_add_data(&entry, (u32)(msr >> 32));
|
||||||
ibs_op.ibs_op_data2_high = high;
|
rdmsrl(MSR_AMD64_IBSOPDATA2, msr);
|
||||||
rdmsr(MSR_AMD64_IBSOPDATA3, low, high);
|
oprofile_add_data(&entry, (u32)msr);
|
||||||
ibs_op.ibs_op_data3_low = low;
|
oprofile_add_data(&entry, (u32)(msr >> 32));
|
||||||
ibs_op.ibs_op_data3_high = high;
|
rdmsrl(MSR_AMD64_IBSOPDATA3, msr);
|
||||||
rdmsr(MSR_AMD64_IBSDCLINAD, low, high);
|
oprofile_add_data(&entry, (u32)msr);
|
||||||
ibs_op.ibs_dc_linear_low = low;
|
oprofile_add_data(&entry, (u32)(msr >> 32));
|
||||||
ibs_op.ibs_dc_linear_high = high;
|
rdmsrl(MSR_AMD64_IBSDCLINAD, msr);
|
||||||
rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high);
|
oprofile_add_data(&entry, (u32)msr);
|
||||||
ibs_op.ibs_dc_phys_low = low;
|
oprofile_add_data(&entry, (u32)(msr >> 32));
|
||||||
ibs_op.ibs_dc_phys_high = high;
|
rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr);
|
||||||
|
oprofile_add_data(&entry, (u32)msr);
|
||||||
|
oprofile_add_data(&entry, (u32)(msr >> 32));
|
||||||
|
oprofile_write_commit(&entry);
|
||||||
|
|
||||||
/* reenable the IRQ */
|
/* reenable the IRQ */
|
||||||
oprofile_add_ibs_sample(regs,
|
|
||||||
(unsigned int *)&ibs_op,
|
|
||||||
IBS_OP_BEGIN);
|
|
||||||
rdmsr(MSR_AMD64_IBSOPCTL, low, high);
|
|
||||||
high = 0;
|
high = 0;
|
||||||
low &= ~IBS_OP_LOW_VALID_BIT;
|
low &= ~IBS_OP_LOW_VALID_BIT;
|
||||||
low |= IBS_OP_LOW_ENABLE;
|
low |= IBS_OP_LOW_ENABLE;
|
||||||
|
@ -305,14 +259,14 @@ static void op_amd_start(struct op_msrs const * const msrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_OPROFILE_IBS
|
#ifdef CONFIG_OPROFILE_IBS
|
||||||
if (ibs_allowed && ibs_config.fetch_enabled) {
|
if (has_ibs && ibs_config.fetch_enabled) {
|
||||||
low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
|
low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
|
||||||
high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
|
high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
|
||||||
+ IBS_FETCH_HIGH_ENABLE;
|
+ IBS_FETCH_HIGH_ENABLE;
|
||||||
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
|
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ibs_allowed && ibs_config.op_enabled) {
|
if (has_ibs && ibs_config.op_enabled) {
|
||||||
low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
|
low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
|
||||||
+ ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
|
+ ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
|
||||||
+ IBS_OP_LOW_ENABLE;
|
+ IBS_OP_LOW_ENABLE;
|
||||||
|
@ -341,14 +295,14 @@ static void op_amd_stop(struct op_msrs const * const msrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_OPROFILE_IBS
|
#ifdef CONFIG_OPROFILE_IBS
|
||||||
if (ibs_allowed && ibs_config.fetch_enabled) {
|
if (has_ibs && ibs_config.fetch_enabled) {
|
||||||
/* clear max count and enable */
|
/* clear max count and enable */
|
||||||
low = 0;
|
low = 0;
|
||||||
high = 0;
|
high = 0;
|
||||||
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
|
wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ibs_allowed && ibs_config.op_enabled) {
|
if (has_ibs && ibs_config.op_enabled) {
|
||||||
/* clear max count and enable */
|
/* clear max count and enable */
|
||||||
low = 0;
|
low = 0;
|
||||||
high = 0;
|
high = 0;
|
||||||
|
@ -409,6 +363,7 @@ static int init_ibs_nmi(void)
|
||||||
| IBSCTL_LVTOFFSETVAL);
|
| IBSCTL_LVTOFFSETVAL);
|
||||||
pci_read_config_dword(cpu_cfg, IBSCTL, &value);
|
pci_read_config_dword(cpu_cfg, IBSCTL, &value);
|
||||||
if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
|
if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
|
||||||
|
pci_dev_put(cpu_cfg);
|
||||||
printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
|
printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
|
||||||
"IBSCTL = 0x%08x", value);
|
"IBSCTL = 0x%08x", value);
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -436,20 +391,20 @@ static int init_ibs_nmi(void)
|
||||||
/* uninitialize the APIC for the IBS interrupts if needed */
|
/* uninitialize the APIC for the IBS interrupts if needed */
|
||||||
static void clear_ibs_nmi(void)
|
static void clear_ibs_nmi(void)
|
||||||
{
|
{
|
||||||
if (ibs_allowed)
|
if (has_ibs)
|
||||||
on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
|
on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* initialize the APIC for the IBS interrupts if available */
|
/* initialize the APIC for the IBS interrupts if available */
|
||||||
static void ibs_init(void)
|
static void ibs_init(void)
|
||||||
{
|
{
|
||||||
ibs_allowed = boot_cpu_has(X86_FEATURE_IBS);
|
has_ibs = boot_cpu_has(X86_FEATURE_IBS);
|
||||||
|
|
||||||
if (!ibs_allowed)
|
if (!has_ibs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (init_ibs_nmi()) {
|
if (init_ibs_nmi()) {
|
||||||
ibs_allowed = 0;
|
has_ibs = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -458,7 +413,7 @@ static void ibs_init(void)
|
||||||
|
|
||||||
static void ibs_exit(void)
|
static void ibs_exit(void)
|
||||||
{
|
{
|
||||||
if (!ibs_allowed)
|
if (!has_ibs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
clear_ibs_nmi();
|
clear_ibs_nmi();
|
||||||
|
@ -478,7 +433,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!ibs_allowed)
|
if (!has_ibs)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* model specific files */
|
/* model specific files */
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
/**
|
/**
|
||||||
* @file buffer_sync.c
|
* @file buffer_sync.c
|
||||||
*
|
*
|
||||||
* @remark Copyright 2002 OProfile authors
|
* @remark Copyright 2002-2009 OProfile authors
|
||||||
* @remark Read the file COPYING
|
* @remark Read the file COPYING
|
||||||
*
|
*
|
||||||
* @author John Levon <levon@movementarian.org>
|
* @author John Levon <levon@movementarian.org>
|
||||||
* @author Barry Kasindorf
|
* @author Barry Kasindorf
|
||||||
|
* @author Robert Richter <robert.richter@amd.com>
|
||||||
*
|
*
|
||||||
* This is the core of the buffer management. Each
|
* This is the core of the buffer management. Each
|
||||||
* CPU buffer is processed and entered into the
|
* CPU buffer is processed and entered into the
|
||||||
|
@ -315,88 +316,73 @@ static void add_trace_begin(void)
|
||||||
add_event_entry(TRACE_BEGIN_CODE);
|
add_event_entry(TRACE_BEGIN_CODE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_OPROFILE_IBS
|
static void add_data(struct op_entry *entry, struct mm_struct *mm)
|
||||||
|
|
||||||
#define IBS_FETCH_CODE_SIZE 2
|
|
||||||
#define IBS_OP_CODE_SIZE 5
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add IBS fetch and op entries to event buffer
|
|
||||||
*/
|
|
||||||
static void add_ibs_begin(int cpu, int code, struct mm_struct *mm)
|
|
||||||
{
|
{
|
||||||
unsigned long rip;
|
unsigned long code, pc, val;
|
||||||
int i, count;
|
unsigned long cookie;
|
||||||
unsigned long ibs_cookie = 0;
|
|
||||||
off_t offset;
|
off_t offset;
|
||||||
struct op_sample *sample;
|
|
||||||
|
|
||||||
sample = cpu_buffer_read_entry(cpu);
|
if (!op_cpu_buffer_get_data(entry, &code))
|
||||||
if (!sample)
|
return;
|
||||||
goto Error;
|
if (!op_cpu_buffer_get_data(entry, &pc))
|
||||||
rip = sample->eip;
|
return;
|
||||||
|
if (!op_cpu_buffer_get_size(entry))
|
||||||
#ifdef __LP64__
|
return;
|
||||||
rip += sample->event << 32;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (mm) {
|
if (mm) {
|
||||||
ibs_cookie = lookup_dcookie(mm, rip, &offset);
|
cookie = lookup_dcookie(mm, pc, &offset);
|
||||||
|
|
||||||
if (ibs_cookie == NO_COOKIE)
|
if (cookie == NO_COOKIE)
|
||||||
offset = rip;
|
offset = pc;
|
||||||
if (ibs_cookie == INVALID_COOKIE) {
|
if (cookie == INVALID_COOKIE) {
|
||||||
atomic_inc(&oprofile_stats.sample_lost_no_mapping);
|
atomic_inc(&oprofile_stats.sample_lost_no_mapping);
|
||||||
offset = rip;
|
offset = pc;
|
||||||
}
|
}
|
||||||
if (ibs_cookie != last_cookie) {
|
if (cookie != last_cookie) {
|
||||||
add_cookie_switch(ibs_cookie);
|
add_cookie_switch(cookie);
|
||||||
last_cookie = ibs_cookie;
|
last_cookie = cookie;
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
offset = rip;
|
offset = pc;
|
||||||
|
|
||||||
add_event_entry(ESCAPE_CODE);
|
add_event_entry(ESCAPE_CODE);
|
||||||
add_event_entry(code);
|
add_event_entry(code);
|
||||||
add_event_entry(offset); /* Offset from Dcookie */
|
add_event_entry(offset); /* Offset from Dcookie */
|
||||||
|
|
||||||
/* we send the Dcookie offset, but send the raw Linear Add also*/
|
while (op_cpu_buffer_get_data(entry, &val))
|
||||||
add_event_entry(sample->eip);
|
add_event_entry(val);
|
||||||
add_event_entry(sample->event);
|
|
||||||
|
|
||||||
if (code == IBS_FETCH_CODE)
|
|
||||||
count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
|
|
||||||
else
|
|
||||||
count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
|
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
|
||||||
sample = cpu_buffer_read_entry(cpu);
|
|
||||||
if (!sample)
|
|
||||||
goto Error;
|
|
||||||
add_event_entry(sample->eip);
|
|
||||||
add_event_entry(sample->event);
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
|
|
||||||
Error:
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
static inline void add_sample_entry(unsigned long offset, unsigned long event)
|
||||||
|
|
||||||
static void add_sample_entry(unsigned long offset, unsigned long event)
|
|
||||||
{
|
{
|
||||||
add_event_entry(offset);
|
add_event_entry(offset);
|
||||||
add_event_entry(event);
|
add_event_entry(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
|
/*
|
||||||
|
* Add a sample to the global event buffer. If possible the
|
||||||
|
* sample is converted into a persistent dentry/offset pair
|
||||||
|
* for later lookup from userspace. Return 0 on failure.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
|
||||||
{
|
{
|
||||||
unsigned long cookie;
|
unsigned long cookie;
|
||||||
off_t offset;
|
off_t offset;
|
||||||
|
|
||||||
|
if (in_kernel) {
|
||||||
|
add_sample_entry(s->eip, s->event);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* add userspace sample */
|
||||||
|
|
||||||
|
if (!mm) {
|
||||||
|
atomic_inc(&oprofile_stats.sample_lost_no_mm);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
cookie = lookup_dcookie(mm, s->eip, &offset);
|
cookie = lookup_dcookie(mm, s->eip, &offset);
|
||||||
|
|
||||||
if (cookie == INVALID_COOKIE) {
|
if (cookie == INVALID_COOKIE) {
|
||||||
|
@ -415,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Add a sample to the global event buffer. If possible the
|
|
||||||
* sample is converted into a persistent dentry/offset pair
|
|
||||||
* for later lookup from userspace.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
|
|
||||||
{
|
|
||||||
if (in_kernel) {
|
|
||||||
add_sample_entry(s->eip, s->event);
|
|
||||||
return 1;
|
|
||||||
} else if (mm) {
|
|
||||||
return add_us_sample(mm, s);
|
|
||||||
} else {
|
|
||||||
atomic_inc(&oprofile_stats.sample_lost_no_mm);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void release_mm(struct mm_struct *mm)
|
static void release_mm(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
if (!mm)
|
if (!mm)
|
||||||
|
@ -526,66 +493,69 @@ void sync_buffer(int cpu)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = NULL;
|
struct mm_struct *mm = NULL;
|
||||||
struct mm_struct *oldmm;
|
struct mm_struct *oldmm;
|
||||||
|
unsigned long val;
|
||||||
struct task_struct *new;
|
struct task_struct *new;
|
||||||
unsigned long cookie = 0;
|
unsigned long cookie = 0;
|
||||||
int in_kernel = 1;
|
int in_kernel = 1;
|
||||||
sync_buffer_state state = sb_buffer_start;
|
sync_buffer_state state = sb_buffer_start;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
unsigned long available;
|
unsigned long available;
|
||||||
|
unsigned long flags;
|
||||||
|
struct op_entry entry;
|
||||||
|
struct op_sample *sample;
|
||||||
|
|
||||||
mutex_lock(&buffer_mutex);
|
mutex_lock(&buffer_mutex);
|
||||||
|
|
||||||
add_cpu_switch(cpu);
|
add_cpu_switch(cpu);
|
||||||
|
|
||||||
cpu_buffer_reset(cpu);
|
op_cpu_buffer_reset(cpu);
|
||||||
available = cpu_buffer_entries(cpu);
|
available = op_cpu_buffer_entries(cpu);
|
||||||
|
|
||||||
for (i = 0; i < available; ++i) {
|
for (i = 0; i < available; ++i) {
|
||||||
struct op_sample *s = cpu_buffer_read_entry(cpu);
|
sample = op_cpu_buffer_read_entry(&entry, cpu);
|
||||||
if (!s)
|
if (!sample)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (is_code(s->eip)) {
|
if (is_code(sample->eip)) {
|
||||||
switch (s->event) {
|
flags = sample->event;
|
||||||
case 0:
|
if (flags & TRACE_BEGIN) {
|
||||||
case CPU_IS_KERNEL:
|
|
||||||
/* kernel/userspace switch */
|
|
||||||
in_kernel = s->event;
|
|
||||||
if (state == sb_buffer_start)
|
|
||||||
state = sb_sample_start;
|
|
||||||
add_kernel_ctx_switch(s->event);
|
|
||||||
break;
|
|
||||||
case CPU_TRACE_BEGIN:
|
|
||||||
state = sb_bt_start;
|
state = sb_bt_start;
|
||||||
add_trace_begin();
|
add_trace_begin();
|
||||||
break;
|
}
|
||||||
#ifdef CONFIG_OPROFILE_IBS
|
if (flags & KERNEL_CTX_SWITCH) {
|
||||||
case IBS_FETCH_BEGIN:
|
/* kernel/userspace switch */
|
||||||
state = sb_bt_start;
|
in_kernel = flags & IS_KERNEL;
|
||||||
add_ibs_begin(cpu, IBS_FETCH_CODE, mm);
|
if (state == sb_buffer_start)
|
||||||
break;
|
state = sb_sample_start;
|
||||||
case IBS_OP_BEGIN:
|
add_kernel_ctx_switch(flags & IS_KERNEL);
|
||||||
state = sb_bt_start;
|
}
|
||||||
add_ibs_begin(cpu, IBS_OP_CODE, mm);
|
if (flags & USER_CTX_SWITCH
|
||||||
break;
|
&& op_cpu_buffer_get_data(&entry, &val)) {
|
||||||
#endif
|
|
||||||
default:
|
|
||||||
/* userspace context switch */
|
/* userspace context switch */
|
||||||
|
new = (struct task_struct *)val;
|
||||||
oldmm = mm;
|
oldmm = mm;
|
||||||
new = (struct task_struct *)s->event;
|
|
||||||
release_mm(oldmm);
|
release_mm(oldmm);
|
||||||
mm = take_tasks_mm(new);
|
mm = take_tasks_mm(new);
|
||||||
if (mm != oldmm)
|
if (mm != oldmm)
|
||||||
cookie = get_exec_dcookie(mm);
|
cookie = get_exec_dcookie(mm);
|
||||||
add_user_ctx_switch(new, cookie);
|
add_user_ctx_switch(new, cookie);
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else if (state >= sb_bt_start &&
|
|
||||||
!add_sample(mm, s, in_kernel)) {
|
|
||||||
if (state == sb_bt_start) {
|
|
||||||
state = sb_bt_ignore;
|
|
||||||
atomic_inc(&oprofile_stats.bt_lost_no_mapping);
|
|
||||||
}
|
}
|
||||||
|
if (op_cpu_buffer_get_size(&entry))
|
||||||
|
add_data(&entry, mm);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state < sb_bt_start)
|
||||||
|
/* ignore sample */
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (add_sample(mm, sample, in_kernel))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* ignore backtraces if failed to add a sample */
|
||||||
|
if (state == sb_bt_start) {
|
||||||
|
state = sb_bt_ignore;
|
||||||
|
atomic_inc(&oprofile_stats.bt_lost_no_mapping);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
release_mm(mm);
|
release_mm(mm);
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
/**
|
/**
|
||||||
* @file cpu_buffer.c
|
* @file cpu_buffer.c
|
||||||
*
|
*
|
||||||
* @remark Copyright 2002 OProfile authors
|
* @remark Copyright 2002-2009 OProfile authors
|
||||||
* @remark Read the file COPYING
|
* @remark Read the file COPYING
|
||||||
*
|
*
|
||||||
* @author John Levon <levon@movementarian.org>
|
* @author John Levon <levon@movementarian.org>
|
||||||
* @author Barry Kasindorf <barry.kasindorf@amd.com>
|
* @author Barry Kasindorf <barry.kasindorf@amd.com>
|
||||||
|
* @author Robert Richter <robert.richter@amd.com>
|
||||||
*
|
*
|
||||||
* Each CPU has a local buffer that stores PC value/event
|
* Each CPU has a local buffer that stores PC value/event
|
||||||
* pairs. We also log context switches when we notice them.
|
* pairs. We also log context switches when we notice them.
|
||||||
|
@ -45,8 +46,8 @@
|
||||||
* can be changed to a single buffer solution when the ring buffer
|
* can be changed to a single buffer solution when the ring buffer
|
||||||
* access is implemented as non-locking atomic code.
|
* access is implemented as non-locking atomic code.
|
||||||
*/
|
*/
|
||||||
struct ring_buffer *op_ring_buffer_read;
|
static struct ring_buffer *op_ring_buffer_read;
|
||||||
struct ring_buffer *op_ring_buffer_write;
|
static struct ring_buffer *op_ring_buffer_write;
|
||||||
DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
|
DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
|
||||||
|
|
||||||
static void wq_sync_buffer(struct work_struct *work);
|
static void wq_sync_buffer(struct work_struct *work);
|
||||||
|
@ -54,6 +55,19 @@ static void wq_sync_buffer(struct work_struct *work);
|
||||||
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
|
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
|
||||||
static int work_enabled;
|
static int work_enabled;
|
||||||
|
|
||||||
|
unsigned long oprofile_get_cpu_buffer_size(void)
|
||||||
|
{
|
||||||
|
return oprofile_cpu_buffer_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void oprofile_cpu_buffer_inc_smpl_lost(void)
|
||||||
|
{
|
||||||
|
struct oprofile_cpu_buffer *cpu_buf
|
||||||
|
= &__get_cpu_var(cpu_buffer);
|
||||||
|
|
||||||
|
cpu_buf->sample_lost_overflow++;
|
||||||
|
}
|
||||||
|
|
||||||
void free_cpu_buffers(void)
|
void free_cpu_buffers(void)
|
||||||
{
|
{
|
||||||
if (op_ring_buffer_read)
|
if (op_ring_buffer_read)
|
||||||
|
@ -64,24 +78,11 @@ void free_cpu_buffers(void)
|
||||||
op_ring_buffer_write = NULL;
|
op_ring_buffer_write = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long oprofile_get_cpu_buffer_size(void)
|
|
||||||
{
|
|
||||||
return fs_cpu_buffer_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
void oprofile_cpu_buffer_inc_smpl_lost(void)
|
|
||||||
{
|
|
||||||
struct oprofile_cpu_buffer *cpu_buf
|
|
||||||
= &__get_cpu_var(cpu_buffer);
|
|
||||||
|
|
||||||
cpu_buf->sample_lost_overflow++;
|
|
||||||
}
|
|
||||||
|
|
||||||
int alloc_cpu_buffers(void)
|
int alloc_cpu_buffers(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
unsigned long buffer_size = fs_cpu_buffer_size;
|
unsigned long buffer_size = oprofile_cpu_buffer_size;
|
||||||
|
|
||||||
op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
|
op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
|
||||||
if (!op_ring_buffer_read)
|
if (!op_ring_buffer_read)
|
||||||
|
@ -97,8 +98,6 @@ int alloc_cpu_buffers(void)
|
||||||
b->last_is_kernel = -1;
|
b->last_is_kernel = -1;
|
||||||
b->tracing = 0;
|
b->tracing = 0;
|
||||||
b->buffer_size = buffer_size;
|
b->buffer_size = buffer_size;
|
||||||
b->tail_pos = 0;
|
|
||||||
b->head_pos = 0;
|
|
||||||
b->sample_received = 0;
|
b->sample_received = 0;
|
||||||
b->sample_lost_overflow = 0;
|
b->sample_lost_overflow = 0;
|
||||||
b->backtrace_aborted = 0;
|
b->backtrace_aborted = 0;
|
||||||
|
@ -145,47 +144,156 @@ void end_cpu_work(void)
|
||||||
flush_scheduled_work();
|
flush_scheduled_work();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
/*
|
||||||
add_sample(struct oprofile_cpu_buffer *cpu_buf,
|
* This function prepares the cpu buffer to write a sample.
|
||||||
unsigned long pc, unsigned long event)
|
*
|
||||||
|
* Struct op_entry is used during operations on the ring buffer while
|
||||||
|
* struct op_sample contains the data that is stored in the ring
|
||||||
|
* buffer. Struct entry can be uninitialized. The function reserves a
|
||||||
|
* data array that is specified by size. Use
|
||||||
|
* op_cpu_buffer_write_commit() after preparing the sample. In case of
|
||||||
|
* errors a null pointer is returned, otherwise the pointer to the
|
||||||
|
* sample.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
struct op_sample
|
||||||
|
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
|
||||||
|
{
|
||||||
|
entry->event = ring_buffer_lock_reserve
|
||||||
|
(op_ring_buffer_write, sizeof(struct op_sample) +
|
||||||
|
size * sizeof(entry->sample->data[0]), &entry->irq_flags);
|
||||||
|
if (entry->event)
|
||||||
|
entry->sample = ring_buffer_event_data(entry->event);
|
||||||
|
else
|
||||||
|
entry->sample = NULL;
|
||||||
|
|
||||||
|
if (!entry->sample)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
entry->size = size;
|
||||||
|
entry->data = entry->sample->data;
|
||||||
|
|
||||||
|
return entry->sample;
|
||||||
|
}
|
||||||
|
|
||||||
|
int op_cpu_buffer_write_commit(struct op_entry *entry)
|
||||||
|
{
|
||||||
|
return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
|
||||||
|
entry->irq_flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
|
||||||
|
{
|
||||||
|
struct ring_buffer_event *e;
|
||||||
|
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
|
||||||
|
if (e)
|
||||||
|
goto event;
|
||||||
|
if (ring_buffer_swap_cpu(op_ring_buffer_read,
|
||||||
|
op_ring_buffer_write,
|
||||||
|
cpu))
|
||||||
|
return NULL;
|
||||||
|
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
|
||||||
|
if (e)
|
||||||
|
goto event;
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
event:
|
||||||
|
entry->event = e;
|
||||||
|
entry->sample = ring_buffer_event_data(e);
|
||||||
|
entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
|
||||||
|
/ sizeof(entry->sample->data[0]);
|
||||||
|
entry->data = entry->sample->data;
|
||||||
|
return entry->sample;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned long op_cpu_buffer_entries(int cpu)
|
||||||
|
{
|
||||||
|
return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
|
||||||
|
+ ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
|
||||||
|
int is_kernel, struct task_struct *task)
|
||||||
{
|
{
|
||||||
struct op_entry entry;
|
struct op_entry entry;
|
||||||
int ret;
|
struct op_sample *sample;
|
||||||
|
unsigned long flags;
|
||||||
|
int size;
|
||||||
|
|
||||||
ret = cpu_buffer_write_entry(&entry);
|
flags = 0;
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
entry.sample->eip = pc;
|
if (backtrace)
|
||||||
entry.sample->event = event;
|
flags |= TRACE_BEGIN;
|
||||||
|
|
||||||
ret = cpu_buffer_write_commit(&entry);
|
/* notice a switch from user->kernel or vice versa */
|
||||||
if (ret)
|
is_kernel = !!is_kernel;
|
||||||
return ret;
|
if (cpu_buf->last_is_kernel != is_kernel) {
|
||||||
|
cpu_buf->last_is_kernel = is_kernel;
|
||||||
|
flags |= KERNEL_CTX_SWITCH;
|
||||||
|
if (is_kernel)
|
||||||
|
flags |= IS_KERNEL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* notice a task switch */
|
||||||
|
if (cpu_buf->last_task != task) {
|
||||||
|
cpu_buf->last_task = task;
|
||||||
|
flags |= USER_CTX_SWITCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!flags)
|
||||||
|
/* nothing to do */
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (flags & USER_CTX_SWITCH)
|
||||||
|
size = 1;
|
||||||
|
else
|
||||||
|
size = 0;
|
||||||
|
|
||||||
|
sample = op_cpu_buffer_write_reserve(&entry, size);
|
||||||
|
if (!sample)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
sample->eip = ESCAPE_CODE;
|
||||||
|
sample->event = flags;
|
||||||
|
|
||||||
|
if (size)
|
||||||
|
op_cpu_buffer_add_data(&entry, (unsigned long)task);
|
||||||
|
|
||||||
|
op_cpu_buffer_write_commit(&entry);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
|
op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
|
||||||
|
unsigned long pc, unsigned long event)
|
||||||
{
|
{
|
||||||
return add_sample(buffer, ESCAPE_CODE, value);
|
struct op_entry entry;
|
||||||
|
struct op_sample *sample;
|
||||||
|
|
||||||
|
sample = op_cpu_buffer_write_reserve(&entry, 0);
|
||||||
|
if (!sample)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
sample->eip = pc;
|
||||||
|
sample->event = event;
|
||||||
|
|
||||||
|
return op_cpu_buffer_write_commit(&entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This must be safe from any context. It's safe writing here
|
/*
|
||||||
* because of the head/tail separation of the writer and reader
|
* This must be safe from any context.
|
||||||
* of the CPU buffer.
|
|
||||||
*
|
*
|
||||||
* is_kernel is needed because on some architectures you cannot
|
* is_kernel is needed because on some architectures you cannot
|
||||||
* tell if you are in kernel or user space simply by looking at
|
* tell if you are in kernel or user space simply by looking at
|
||||||
* pc. We tag this in the buffer by generating kernel enter/exit
|
* pc. We tag this in the buffer by generating kernel enter/exit
|
||||||
* events whenever is_kernel changes
|
* events whenever is_kernel changes
|
||||||
*/
|
*/
|
||||||
static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
|
static int
|
||||||
int is_kernel, unsigned long event)
|
log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
|
||||||
|
unsigned long backtrace, int is_kernel, unsigned long event)
|
||||||
{
|
{
|
||||||
struct task_struct *task;
|
|
||||||
|
|
||||||
cpu_buf->sample_received++;
|
cpu_buf->sample_received++;
|
||||||
|
|
||||||
if (pc == ESCAPE_CODE) {
|
if (pc == ESCAPE_CODE) {
|
||||||
|
@ -193,25 +301,10 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
is_kernel = !!is_kernel;
|
if (op_add_code(cpu_buf, backtrace, is_kernel, current))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
task = current;
|
if (op_add_sample(cpu_buf, pc, event))
|
||||||
|
|
||||||
/* notice a switch from user->kernel or vice versa */
|
|
||||||
if (cpu_buf->last_is_kernel != is_kernel) {
|
|
||||||
cpu_buf->last_is_kernel = is_kernel;
|
|
||||||
if (add_code(cpu_buf, is_kernel))
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* notice a task switch */
|
|
||||||
if (cpu_buf->last_task != task) {
|
|
||||||
cpu_buf->last_task = task;
|
|
||||||
if (add_code(cpu_buf, (unsigned long)task))
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (add_sample(cpu_buf, pc, event))
|
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -221,109 +314,102 @@ fail:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
|
static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
|
||||||
{
|
{
|
||||||
add_code(cpu_buf, CPU_TRACE_BEGIN);
|
|
||||||
cpu_buf->tracing = 1;
|
cpu_buf->tracing = 1;
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
|
static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
|
||||||
{
|
{
|
||||||
cpu_buf->tracing = 0;
|
cpu_buf->tracing = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
|
static inline void
|
||||||
unsigned long event, int is_kernel)
|
__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
|
||||||
|
unsigned long event, int is_kernel)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
||||||
|
unsigned long backtrace = oprofile_backtrace_depth;
|
||||||
if (!backtrace_depth) {
|
|
||||||
log_sample(cpu_buf, pc, is_kernel, event);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!oprofile_begin_trace(cpu_buf))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if log_sample() fail we can't backtrace since we lost the
|
* if log_sample() fail we can't backtrace since we lost the
|
||||||
* source of this event
|
* source of this event
|
||||||
*/
|
*/
|
||||||
if (log_sample(cpu_buf, pc, is_kernel, event))
|
if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
|
||||||
oprofile_ops.backtrace(regs, backtrace_depth);
|
/* failed */
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!backtrace)
|
||||||
|
return;
|
||||||
|
|
||||||
|
oprofile_begin_trace(cpu_buf);
|
||||||
|
oprofile_ops.backtrace(regs, backtrace);
|
||||||
oprofile_end_trace(cpu_buf);
|
oprofile_end_trace(cpu_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
|
||||||
|
unsigned long event, int is_kernel)
|
||||||
|
{
|
||||||
|
__oprofile_add_ext_sample(pc, regs, event, is_kernel);
|
||||||
|
}
|
||||||
|
|
||||||
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
|
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
|
||||||
{
|
{
|
||||||
int is_kernel = !user_mode(regs);
|
int is_kernel = !user_mode(regs);
|
||||||
unsigned long pc = profile_pc(regs);
|
unsigned long pc = profile_pc(regs);
|
||||||
|
|
||||||
oprofile_add_ext_sample(pc, regs, event, is_kernel);
|
__oprofile_add_ext_sample(pc, regs, event, is_kernel);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_OPROFILE_IBS
|
/*
|
||||||
|
* Add samples with data to the ring buffer.
|
||||||
#define MAX_IBS_SAMPLE_SIZE 14
|
*
|
||||||
|
* Use oprofile_add_data(&entry, val) to add data and
|
||||||
void oprofile_add_ibs_sample(struct pt_regs * const regs,
|
* oprofile_write_commit(&entry) to commit the sample.
|
||||||
unsigned int * const ibs_sample, int ibs_code)
|
*/
|
||||||
|
void
|
||||||
|
oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
|
||||||
|
unsigned long pc, int code, int size)
|
||||||
{
|
{
|
||||||
|
struct op_sample *sample;
|
||||||
int is_kernel = !user_mode(regs);
|
int is_kernel = !user_mode(regs);
|
||||||
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
||||||
struct task_struct *task;
|
|
||||||
int fail = 0;
|
|
||||||
|
|
||||||
cpu_buf->sample_received++;
|
cpu_buf->sample_received++;
|
||||||
|
|
||||||
/* notice a switch from user->kernel or vice versa */
|
/* no backtraces for samples with data */
|
||||||
if (cpu_buf->last_is_kernel != is_kernel) {
|
if (op_add_code(cpu_buf, 0, is_kernel, current))
|
||||||
if (add_code(cpu_buf, is_kernel))
|
|
||||||
goto fail;
|
|
||||||
cpu_buf->last_is_kernel = is_kernel;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* notice a task switch */
|
|
||||||
if (!is_kernel) {
|
|
||||||
task = current;
|
|
||||||
if (cpu_buf->last_task != task) {
|
|
||||||
if (add_code(cpu_buf, (unsigned long)task))
|
|
||||||
goto fail;
|
|
||||||
cpu_buf->last_task = task;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fail = fail || add_code(cpu_buf, ibs_code);
|
|
||||||
fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
|
|
||||||
fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
|
|
||||||
fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
|
|
||||||
|
|
||||||
if (ibs_code == IBS_OP_BEGIN) {
|
|
||||||
fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
|
|
||||||
fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
|
|
||||||
fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fail)
|
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (backtrace_depth)
|
sample = op_cpu_buffer_write_reserve(entry, size + 2);
|
||||||
oprofile_ops.backtrace(regs, backtrace_depth);
|
if (!sample)
|
||||||
|
goto fail;
|
||||||
|
sample->eip = ESCAPE_CODE;
|
||||||
|
sample->event = 0; /* no flags */
|
||||||
|
|
||||||
|
op_cpu_buffer_add_data(entry, code);
|
||||||
|
op_cpu_buffer_add_data(entry, pc);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
cpu_buf->sample_lost_overflow++;
|
cpu_buf->sample_lost_overflow++;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
int oprofile_add_data(struct op_entry *entry, unsigned long val)
|
||||||
|
{
|
||||||
|
return op_cpu_buffer_add_data(entry, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
int oprofile_write_commit(struct op_entry *entry)
|
||||||
|
{
|
||||||
|
return op_cpu_buffer_write_commit(entry);
|
||||||
|
}
|
||||||
|
|
||||||
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
|
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
||||||
log_sample(cpu_buf, pc, is_kernel, event);
|
log_sample(cpu_buf, pc, 0, is_kernel, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
void oprofile_add_trace(unsigned long pc)
|
void oprofile_add_trace(unsigned long pc)
|
||||||
|
@ -340,7 +426,7 @@ void oprofile_add_trace(unsigned long pc)
|
||||||
if (pc == ESCAPE_CODE)
|
if (pc == ESCAPE_CODE)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (add_sample(cpu_buf, pc, 0))
|
if (op_add_sample(cpu_buf, pc, 0))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
/**
|
/**
|
||||||
* @file cpu_buffer.h
|
* @file cpu_buffer.h
|
||||||
*
|
*
|
||||||
* @remark Copyright 2002 OProfile authors
|
* @remark Copyright 2002-2009 OProfile authors
|
||||||
* @remark Read the file COPYING
|
* @remark Read the file COPYING
|
||||||
*
|
*
|
||||||
* @author John Levon <levon@movementarian.org>
|
* @author John Levon <levon@movementarian.org>
|
||||||
|
* @author Robert Richter <robert.richter@amd.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef OPROFILE_CPU_BUFFER_H
|
#ifndef OPROFILE_CPU_BUFFER_H
|
||||||
|
@ -31,17 +32,12 @@ void end_cpu_work(void);
|
||||||
struct op_sample {
|
struct op_sample {
|
||||||
unsigned long eip;
|
unsigned long eip;
|
||||||
unsigned long event;
|
unsigned long event;
|
||||||
|
unsigned long data[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct op_entry {
|
struct op_entry;
|
||||||
struct ring_buffer_event *event;
|
|
||||||
struct op_sample *sample;
|
|
||||||
unsigned long irq_flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct oprofile_cpu_buffer {
|
struct oprofile_cpu_buffer {
|
||||||
volatile unsigned long head_pos;
|
|
||||||
volatile unsigned long tail_pos;
|
|
||||||
unsigned long buffer_size;
|
unsigned long buffer_size;
|
||||||
struct task_struct *last_task;
|
struct task_struct *last_task;
|
||||||
int last_is_kernel;
|
int last_is_kernel;
|
||||||
|
@ -54,8 +50,6 @@ struct oprofile_cpu_buffer {
|
||||||
struct delayed_work work;
|
struct delayed_work work;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct ring_buffer *op_ring_buffer_read;
|
|
||||||
extern struct ring_buffer *op_ring_buffer_write;
|
|
||||||
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
|
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -64,7 +58,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
|
||||||
* reset these to invalid values; the next sample collected will
|
* reset these to invalid values; the next sample collected will
|
||||||
* populate the buffer with proper values to initialize the buffer
|
* populate the buffer with proper values to initialize the buffer
|
||||||
*/
|
*/
|
||||||
static inline void cpu_buffer_reset(int cpu)
|
static inline void op_cpu_buffer_reset(int cpu)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
|
struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
|
||||||
|
|
||||||
|
@ -72,55 +66,48 @@ static inline void cpu_buffer_reset(int cpu)
|
||||||
cpu_buf->last_task = NULL;
|
cpu_buf->last_task = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int cpu_buffer_write_entry(struct op_entry *entry)
|
struct op_sample
|
||||||
|
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
|
||||||
|
int op_cpu_buffer_write_commit(struct op_entry *entry);
|
||||||
|
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
|
||||||
|
unsigned long op_cpu_buffer_entries(int cpu);
|
||||||
|
|
||||||
|
/* returns the remaining free size of data in the entry */
|
||||||
|
static inline
|
||||||
|
int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
|
||||||
{
|
{
|
||||||
entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
|
if (!entry->size)
|
||||||
sizeof(struct op_sample),
|
return 0;
|
||||||
&entry->irq_flags);
|
*entry->data = val;
|
||||||
if (entry->event)
|
entry->size--;
|
||||||
entry->sample = ring_buffer_event_data(entry->event);
|
entry->data++;
|
||||||
else
|
return entry->size;
|
||||||
entry->sample = NULL;
|
|
||||||
|
|
||||||
if (!entry->sample)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int cpu_buffer_write_commit(struct op_entry *entry)
|
/* returns the size of data in the entry */
|
||||||
|
static inline
|
||||||
|
int op_cpu_buffer_get_size(struct op_entry *entry)
|
||||||
{
|
{
|
||||||
return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
|
return entry->size;
|
||||||
entry->irq_flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct op_sample *cpu_buffer_read_entry(int cpu)
|
/* returns 0 if empty or the size of data including the current value */
|
||||||
|
static inline
|
||||||
|
int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
|
||||||
{
|
{
|
||||||
struct ring_buffer_event *e;
|
int size = entry->size;
|
||||||
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
|
if (!size)
|
||||||
if (e)
|
return 0;
|
||||||
return ring_buffer_event_data(e);
|
*val = *entry->data;
|
||||||
if (ring_buffer_swap_cpu(op_ring_buffer_read,
|
entry->size--;
|
||||||
op_ring_buffer_write,
|
entry->data++;
|
||||||
cpu))
|
return size;
|
||||||
return NULL;
|
|
||||||
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
|
|
||||||
if (e)
|
|
||||||
return ring_buffer_event_data(e);
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* "acquire" as many cpu buffer slots as we can */
|
/* extra data flags */
|
||||||
static inline unsigned long cpu_buffer_entries(int cpu)
|
#define KERNEL_CTX_SWITCH (1UL << 0)
|
||||||
{
|
#define IS_KERNEL (1UL << 1)
|
||||||
return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
|
#define TRACE_BEGIN (1UL << 2)
|
||||||
+ ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
|
#define USER_CTX_SWITCH (1UL << 3)
|
||||||
}
|
|
||||||
|
|
||||||
/* transient events for the CPU buffer -> event buffer */
|
|
||||||
#define CPU_IS_KERNEL 1
|
|
||||||
#define CPU_TRACE_BEGIN 2
|
|
||||||
#define IBS_FETCH_BEGIN 3
|
|
||||||
#define IBS_OP_BEGIN 4
|
|
||||||
|
|
||||||
#endif /* OPROFILE_CPU_BUFFER_H */
|
#endif /* OPROFILE_CPU_BUFFER_H */
|
||||||
|
|
|
@ -73,8 +73,8 @@ int alloc_event_buffer(void)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&oprofilefs_lock, flags);
|
spin_lock_irqsave(&oprofilefs_lock, flags);
|
||||||
buffer_size = fs_buffer_size;
|
buffer_size = oprofile_buffer_size;
|
||||||
buffer_watershed = fs_buffer_watershed;
|
buffer_watershed = oprofile_buffer_watershed;
|
||||||
spin_unlock_irqrestore(&oprofilefs_lock, flags);
|
spin_unlock_irqrestore(&oprofilefs_lock, flags);
|
||||||
|
|
||||||
if (buffer_watershed >= buffer_size)
|
if (buffer_watershed >= buffer_size)
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
struct oprofile_operations oprofile_ops;
|
struct oprofile_operations oprofile_ops;
|
||||||
|
|
||||||
unsigned long oprofile_started;
|
unsigned long oprofile_started;
|
||||||
unsigned long backtrace_depth;
|
unsigned long oprofile_backtrace_depth;
|
||||||
static unsigned long is_setup;
|
static unsigned long is_setup;
|
||||||
static DEFINE_MUTEX(start_mutex);
|
static DEFINE_MUTEX(start_mutex);
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
backtrace_depth = val;
|
oprofile_backtrace_depth = val;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&start_mutex);
|
mutex_unlock(&start_mutex);
|
||||||
|
|
|
@ -21,12 +21,12 @@ void oprofile_stop(void);
|
||||||
|
|
||||||
struct oprofile_operations;
|
struct oprofile_operations;
|
||||||
|
|
||||||
extern unsigned long fs_buffer_size;
|
extern unsigned long oprofile_buffer_size;
|
||||||
extern unsigned long fs_cpu_buffer_size;
|
extern unsigned long oprofile_cpu_buffer_size;
|
||||||
extern unsigned long fs_buffer_watershed;
|
extern unsigned long oprofile_buffer_watershed;
|
||||||
extern struct oprofile_operations oprofile_ops;
|
extern struct oprofile_operations oprofile_ops;
|
||||||
extern unsigned long oprofile_started;
|
extern unsigned long oprofile_started;
|
||||||
extern unsigned long backtrace_depth;
|
extern unsigned long oprofile_backtrace_depth;
|
||||||
|
|
||||||
struct super_block;
|
struct super_block;
|
||||||
struct dentry;
|
struct dentry;
|
||||||
|
|
|
@ -14,17 +14,18 @@
|
||||||
#include "oprofile_stats.h"
|
#include "oprofile_stats.h"
|
||||||
#include "oprof.h"
|
#include "oprof.h"
|
||||||
|
|
||||||
#define FS_BUFFER_SIZE_DEFAULT 131072
|
#define BUFFER_SIZE_DEFAULT 131072
|
||||||
#define FS_CPU_BUFFER_SIZE_DEFAULT 8192
|
#define CPU_BUFFER_SIZE_DEFAULT 8192
|
||||||
#define FS_BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
|
#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
|
||||||
|
|
||||||
unsigned long fs_buffer_size;
|
unsigned long oprofile_buffer_size;
|
||||||
unsigned long fs_cpu_buffer_size;
|
unsigned long oprofile_cpu_buffer_size;
|
||||||
unsigned long fs_buffer_watershed;
|
unsigned long oprofile_buffer_watershed;
|
||||||
|
|
||||||
static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
|
static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
|
||||||
{
|
{
|
||||||
return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
|
return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
|
||||||
|
offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -125,16 +126,16 @@ static const struct file_operations dump_fops = {
|
||||||
void oprofile_create_files(struct super_block *sb, struct dentry *root)
|
void oprofile_create_files(struct super_block *sb, struct dentry *root)
|
||||||
{
|
{
|
||||||
/* reinitialize default values */
|
/* reinitialize default values */
|
||||||
fs_buffer_size = FS_BUFFER_SIZE_DEFAULT;
|
oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
|
||||||
fs_cpu_buffer_size = FS_CPU_BUFFER_SIZE_DEFAULT;
|
oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
|
||||||
fs_buffer_watershed = FS_BUFFER_WATERSHED_DEFAULT;
|
oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
|
||||||
|
|
||||||
oprofilefs_create_file(sb, root, "enable", &enable_fops);
|
oprofilefs_create_file(sb, root, "enable", &enable_fops);
|
||||||
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
|
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
|
||||||
oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
|
oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
|
||||||
oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
|
oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
|
||||||
oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
|
oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
|
||||||
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
|
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
|
||||||
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
|
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
|
||||||
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
|
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
|
||||||
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
|
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
|
||||||
|
|
|
@ -164,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start,
|
||||||
unsigned long oprofile_get_cpu_buffer_size(void);
|
unsigned long oprofile_get_cpu_buffer_size(void);
|
||||||
void oprofile_cpu_buffer_inc_smpl_lost(void);
|
void oprofile_cpu_buffer_inc_smpl_lost(void);
|
||||||
|
|
||||||
|
/* cpu buffer functions */
|
||||||
|
|
||||||
|
struct op_sample;
|
||||||
|
|
||||||
|
struct op_entry {
|
||||||
|
struct ring_buffer_event *event;
|
||||||
|
struct op_sample *sample;
|
||||||
|
unsigned long irq_flags;
|
||||||
|
unsigned long size;
|
||||||
|
unsigned long *data;
|
||||||
|
};
|
||||||
|
|
||||||
|
void oprofile_write_reserve(struct op_entry *entry,
|
||||||
|
struct pt_regs * const regs,
|
||||||
|
unsigned long pc, int code, int size);
|
||||||
|
int oprofile_add_data(struct op_entry *entry, unsigned long val);
|
||||||
|
int oprofile_write_commit(struct op_entry *entry);
|
||||||
|
|
||||||
#endif /* OPROFILE_H */
|
#endif /* OPROFILE_H */
|
||||||
|
|
|
@ -168,7 +168,13 @@ rb_event_length(struct ring_buffer_event *event)
|
||||||
*/
|
*/
|
||||||
unsigned ring_buffer_event_length(struct ring_buffer_event *event)
|
unsigned ring_buffer_event_length(struct ring_buffer_event *event)
|
||||||
{
|
{
|
||||||
return rb_event_length(event);
|
unsigned length = rb_event_length(event);
|
||||||
|
if (event->type != RINGBUF_TYPE_DATA)
|
||||||
|
return length;
|
||||||
|
length -= RB_EVNT_HDR_SIZE;
|
||||||
|
if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
|
||||||
|
length -= sizeof(event->array[0]);
|
||||||
|
return length;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ring_buffer_event_length);
|
EXPORT_SYMBOL_GPL(ring_buffer_event_length);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче