x86/oprofile: replace CTR_OVERFLOWED macros
The patch replaces all CTR_OVERFLOWED macros. 64 bit MSR functions and 64 bit counter values are used now. Thus, it will be easier to later extend the models to use more than 32 bit width counters. Signed-off-by: Robert Richter <robert.richter@amd.com>
This commit is contained in:
Родитель
3370d35856
Коммит
42399adb23
|
@ -26,11 +26,10 @@
|
|||
#define NUM_COUNTERS 4
|
||||
#define NUM_CONTROLS 4
|
||||
#define OP_EVENT_MASK 0x0FFF
|
||||
#define OP_CTR_OVERFLOW (1ULL<<31)
|
||||
|
||||
#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
|
||||
|
||||
#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
|
||||
|
||||
static unsigned long reset_value[NUM_COUNTERS];
|
||||
|
||||
#ifdef CONFIG_OPROFILE_IBS
|
||||
|
@ -241,18 +240,19 @@ static inline void op_amd_stop_ibs(void) { }
|
|||
static int op_amd_check_ctrs(struct pt_regs * const regs,
|
||||
struct op_msrs const * const msrs)
|
||||
{
|
||||
unsigned int low, high;
|
||||
u64 val;
|
||||
int i;
|
||||
|
||||
for (i = 0 ; i < NUM_COUNTERS; ++i) {
|
||||
if (!reset_value[i])
|
||||
continue;
|
||||
rdmsr(msrs->counters[i].addr, low, high);
|
||||
if (CTR_OVERFLOWED(low)) {
|
||||
rdmsrl(msrs->counters[i].addr, val);
|
||||
/* bit is clear if overflowed: */
|
||||
if (val & OP_CTR_OVERFLOW)
|
||||
continue;
|
||||
oprofile_add_sample(regs, i);
|
||||
wrmsr(msrs->counters[i].addr, -(unsigned int)reset_value[i], -1);
|
||||
}
|
||||
}
|
||||
|
||||
op_amd_handle_ibs(regs, msrs);
|
||||
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#define NUM_CCCRS_HT2 9
|
||||
#define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2)
|
||||
|
||||
#define OP_CTR_OVERFLOW (1ULL<<31)
|
||||
|
||||
static unsigned int num_counters = NUM_COUNTERS_NON_HT;
|
||||
static unsigned int num_controls = NUM_CONTROLS_NON_HT;
|
||||
|
||||
|
@ -362,8 +364,6 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
|
|||
#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
|
||||
#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
|
||||
|
||||
#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
|
||||
|
||||
|
||||
/* this assigns a "stagger" to the current CPU, which is used throughout
|
||||
the code in this module as an extra array offset, to select the "even"
|
||||
|
@ -622,7 +622,7 @@ static int p4_check_ctrs(struct pt_regs * const regs,
|
|||
|
||||
rdmsr(p4_counters[real].cccr_address, low, high);
|
||||
rdmsr(p4_counters[real].counter_address, ctr, high);
|
||||
if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
|
||||
if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) {
|
||||
oprofile_add_sample(regs, i);
|
||||
wrmsr(p4_counters[real].counter_address,
|
||||
-(u32)reset_value[i], -1);
|
||||
|
|
|
@ -26,8 +26,6 @@
|
|||
static int num_counters = 2;
|
||||
static int counter_width = 32;
|
||||
|
||||
#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
|
||||
|
||||
#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
|
||||
|
||||
static u64 *reset_value;
|
||||
|
@ -124,11 +122,11 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
|
|||
if (!reset_value[i])
|
||||
continue;
|
||||
rdmsrl(msrs->counters[i].addr, val);
|
||||
if (CTR_OVERFLOWED(val)) {
|
||||
if (val & (1ULL << (counter_width - 1)))
|
||||
continue;
|
||||
oprofile_add_sample(regs, i);
|
||||
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Only P6 based Pentium M need to re-unmask the apic vector but it
|
||||
* doesn't hurt other P6 variant */
|
||||
|
|
Загрузка…
Ссылка в новой задаче