2010-02-26 14:05:05 +03:00
|
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
|
|
|
|
2011-03-03 05:34:47 +03:00
|
|
|
#define MAX_EXTRA_REGS 2
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per register state.
|
|
|
|
*/
|
|
|
|
struct er_account {
|
|
|
|
int ref; /* reference count */
|
|
|
|
unsigned int extra_reg; /* extra MSR number */
|
|
|
|
u64 extra_config; /* extra MSR config */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per core state
|
|
|
|
* This used to coordinate shared registers for HT threads.
|
|
|
|
*/
|
|
|
|
struct intel_percore {
|
|
|
|
raw_spinlock_t lock; /* protect structure */
|
|
|
|
struct er_account regs[MAX_EXTRA_REGS];
|
|
|
|
int refcnt; /* number of threads */
|
|
|
|
unsigned core_id;
|
|
|
|
};
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
/*
|
2010-02-01 17:36:30 +03:00
|
|
|
* Intel PerfMon, used on Core and later.
|
2010-02-26 14:05:05 +03:00
|
|
|
*/
|
2011-04-27 13:51:41 +04:00
|
|
|
static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
|
2010-02-26 14:05:05 +03:00
|
|
|
{
|
|
|
|
[PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
|
|
|
|
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
|
|
|
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
|
|
|
|
[PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
|
|
|
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
|
|
|
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
|
|
|
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct event_constraint intel_core_event_constraints[] =
|
|
|
|
{
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
|
|
|
|
EVENT_CONSTRAINT_END
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct event_constraint intel_core2_event_constraints[] =
|
|
|
|
{
|
2010-02-01 17:36:30 +03:00
|
|
|
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
|
|
|
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
|
|
|
/*
|
|
|
|
* Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
|
|
|
|
* 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
|
|
|
|
* ratio between these counters.
|
|
|
|
*/
|
|
|
|
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
|
2010-02-26 14:05:05 +03:00
|
|
|
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
|
2010-02-01 17:36:30 +03:00
|
|
|
INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
|
2010-02-26 14:05:05 +03:00
|
|
|
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
|
|
|
|
EVENT_CONSTRAINT_END
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct event_constraint intel_nehalem_event_constraints[] =
|
|
|
|
{
|
2010-02-01 17:36:30 +03:00
|
|
|
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
|
|
|
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
|
|
|
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
|
2010-02-26 14:05:05 +03:00
|
|
|
INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
|
|
|
|
EVENT_CONSTRAINT_END
|
|
|
|
};
|
|
|
|
|
2011-03-03 05:34:47 +03:00
|
|
|
static struct extra_reg intel_nehalem_extra_regs[] =
|
|
|
|
{
|
|
|
|
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
|
|
|
|
EVENT_EXTRA_END
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct event_constraint intel_nehalem_percore_constraints[] =
|
|
|
|
{
|
|
|
|
INTEL_EVENT_CONSTRAINT(0xb7, 0),
|
|
|
|
EVENT_CONSTRAINT_END
|
|
|
|
};
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
static struct event_constraint intel_westmere_event_constraints[] =
|
|
|
|
{
|
2010-02-01 17:36:30 +03:00
|
|
|
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
|
|
|
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
|
|
|
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
|
2010-02-26 14:05:05 +03:00
|
|
|
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
|
2010-06-10 15:25:01 +04:00
|
|
|
INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
|
2010-02-26 14:05:05 +03:00
|
|
|
EVENT_CONSTRAINT_END
|
|
|
|
};
|
|
|
|
|
2011-03-02 16:27:04 +03:00
|
|
|
static struct event_constraint intel_snb_event_constraints[] =
|
|
|
|
{
|
|
|
|
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
|
|
|
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
|
|
|
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
|
|
|
|
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
|
|
|
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
|
|
|
EVENT_CONSTRAINT_END
|
|
|
|
};
|
|
|
|
|
2011-03-03 05:34:47 +03:00
|
|
|
static struct extra_reg intel_westmere_extra_regs[] =
|
|
|
|
{
|
|
|
|
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
|
|
|
|
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff),
|
|
|
|
EVENT_EXTRA_END
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct event_constraint intel_westmere_percore_constraints[] =
|
|
|
|
{
|
|
|
|
INTEL_EVENT_CONSTRAINT(0xb7, 0),
|
|
|
|
INTEL_EVENT_CONSTRAINT(0xbb, 0),
|
|
|
|
EVENT_CONSTRAINT_END
|
|
|
|
};
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
static struct event_constraint intel_gen_event_constraints[] =
|
|
|
|
{
|
2010-02-01 17:36:30 +03:00
|
|
|
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
|
|
|
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
|
|
|
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
|
2010-02-26 14:05:05 +03:00
|
|
|
EVENT_CONSTRAINT_END
|
|
|
|
};
|
|
|
|
|
|
|
|
static u64 intel_pmu_event_map(int hw_event)
|
|
|
|
{
|
|
|
|
return intel_perfmon_event_map[hw_event];
|
|
|
|
}
|
|
|
|
|
2011-03-02 16:27:04 +03:00
|
|
|
static __initconst const u64 snb_hw_cache_event_ids
|
|
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
|
{
|
|
|
|
[ C(L1D) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0,
|
|
|
|
[ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(L1I ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0,
|
|
|
|
[ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0,
|
|
|
|
[ C(RESULT_MISS) ] = 0x0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(LL ) ] = {
|
|
|
|
/*
|
|
|
|
* TBD: Need Off-core Response Performance Monitoring support
|
|
|
|
*/
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
/* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01b7,
|
|
|
|
/* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x01bb,
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
/* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01b7,
|
|
|
|
/* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x01bb,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
/* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01b7,
|
|
|
|
/* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x01bb,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(DTLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0,
|
|
|
|
[ C(RESULT_MISS) ] = 0x0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(ITLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(BPU ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
|
|
|
|
[ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2010-03-29 15:09:53 +04:00
|
|
|
static __initconst const u64 westmere_hw_cache_event_ids
|
2010-02-26 14:05:05 +03:00
|
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
|
{
|
|
|
|
[ C(L1D) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(L1I ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0,
|
|
|
|
[ C(RESULT_MISS) ] = 0x0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(LL ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
2011-03-03 05:34:48 +03:00
|
|
|
/* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01b7,
|
|
|
|
/* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x01bb,
|
2010-02-26 14:05:05 +03:00
|
|
|
},
|
2011-03-03 05:34:48 +03:00
|
|
|
/*
|
|
|
|
* Use RFO, not WRITEBACK, because a write miss would typically occur
|
|
|
|
* on RFO.
|
|
|
|
*/
|
2010-02-26 14:05:05 +03:00
|
|
|
[ C(OP_WRITE) ] = {
|
2011-03-03 05:34:48 +03:00
|
|
|
/* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01bb,
|
|
|
|
/* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x01b7,
|
2010-02-26 14:05:05 +03:00
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
2011-03-03 05:34:48 +03:00
|
|
|
/* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01b7,
|
|
|
|
/* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x01bb,
|
2010-02-26 14:05:05 +03:00
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(DTLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
|
|
|
|
[ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0,
|
|
|
|
[ C(RESULT_MISS) ] = 0x0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(ITLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(BPU ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
|
|
|
|
[ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2011-03-03 05:34:48 +03:00
|
|
|
/*
|
|
|
|
* OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define DMND_DATA_RD (1 << 0)
|
|
|
|
#define DMND_RFO (1 << 1)
|
|
|
|
#define DMND_WB (1 << 3)
|
|
|
|
#define PF_DATA_RD (1 << 4)
|
|
|
|
#define PF_DATA_RFO (1 << 5)
|
|
|
|
#define RESP_UNCORE_HIT (1 << 8)
|
|
|
|
#define RESP_MISS (0xf600) /* non uncore hit */
|
|
|
|
|
|
|
|
static __initconst const u64 nehalem_hw_cache_extra_regs
|
|
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
|
{
|
|
|
|
[ C(LL ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT,
|
|
|
|
[ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS,
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT,
|
|
|
|
[ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT,
|
|
|
|
[ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2010-03-29 15:09:53 +04:00
|
|
|
static __initconst const u64 nehalem_hw_cache_event_ids
|
2010-02-26 14:05:05 +03:00
|
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
|
{
|
|
|
|
[ C(L1D) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
perf, x86: Update/fix Intel Nehalem cache events
Change the Nehalem cache events to use retired memory instruction counters
(similar to Westmere), this greatly improves the provided stats.
Using:
main ()
{
int i;
for (i = 0; i < 1000000000; i++) {
asm("mov (%%rsp), %%rbx;"
"mov %%rbx, (%%rsp);" : : : "rbx");
}
}
We find:
$ perf stat --repeat 10 -e instructions:u -e l1-dcache-loads:u -e l1-dcache-stores:u ./loop_1b_loads+stores
Performance counter stats for './loop_1b_loads+stores' (10 runs):
4,000,081,056 instructions:u # 0.000 IPC ( +- 0.000% )
4,999,502,846 l1-dcache-loads:u ( +- 0.008% )
1,000,034,832 l1-dcache-stores:u ( +- 0.000% )
1.565184942 seconds time elapsed ( +- 0.005% )
The 5b is surprising - we'd expect 1b:
$ perf stat --repeat 10 -e instructions:u -e r10b:u -e l1-dcache-stores:u ./loop_1b_loads+stores
Performance counter stats for './loop_1b_loads+stores' (10 runs):
4,000,081,054 instructions:u # 0.000 IPC ( +- 0.000% )
1,000,021,961 r10b:u ( +- 0.000% )
1,000,030,951 l1-dcache-stores:u ( +- 0.000% )
1.565055422 seconds time elapsed ( +- 0.003% )
Which this patch thus fixes.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Link: http://lkml.kernel.org/n/tip-q9rtru7b7840tws75xzboapv@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-04-22 15:39:56 +04:00
|
|
|
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
|
2010-02-26 14:05:05 +03:00
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
perf, x86: Update/fix Intel Nehalem cache events
Change the Nehalem cache events to use retired memory instruction counters
(similar to Westmere), this greatly improves the provided stats.
Using:
main ()
{
int i;
for (i = 0; i < 1000000000; i++) {
asm("mov (%%rsp), %%rbx;"
"mov %%rbx, (%%rsp);" : : : "rbx");
}
}
We find:
$ perf stat --repeat 10 -e instructions:u -e l1-dcache-loads:u -e l1-dcache-stores:u ./loop_1b_loads+stores
Performance counter stats for './loop_1b_loads+stores' (10 runs):
4,000,081,056 instructions:u # 0.000 IPC ( +- 0.000% )
4,999,502,846 l1-dcache-loads:u ( +- 0.008% )
1,000,034,832 l1-dcache-stores:u ( +- 0.000% )
1.565184942 seconds time elapsed ( +- 0.005% )
The 5b is surprising - we'd expect 1b:
$ perf stat --repeat 10 -e instructions:u -e r10b:u -e l1-dcache-stores:u ./loop_1b_loads+stores
Performance counter stats for './loop_1b_loads+stores' (10 runs):
4,000,081,054 instructions:u # 0.000 IPC ( +- 0.000% )
1,000,021,961 r10b:u ( +- 0.000% )
1,000,030,951 l1-dcache-stores:u ( +- 0.000% )
1.565055422 seconds time elapsed ( +- 0.003% )
Which this patch thus fixes.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Link: http://lkml.kernel.org/n/tip-q9rtru7b7840tws75xzboapv@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-04-22 15:39:56 +04:00
|
|
|
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
|
2010-02-26 14:05:05 +03:00
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(L1I ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0,
|
|
|
|
[ C(RESULT_MISS) ] = 0x0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(LL ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
2011-03-03 05:34:48 +03:00
|
|
|
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01b7,
|
|
|
|
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x01b7,
|
2010-02-26 14:05:05 +03:00
|
|
|
},
|
2011-03-03 05:34:48 +03:00
|
|
|
/*
|
|
|
|
* Use RFO, not WRITEBACK, because a write miss would typically occur
|
|
|
|
* on RFO.
|
|
|
|
*/
|
2010-02-26 14:05:05 +03:00
|
|
|
[ C(OP_WRITE) ] = {
|
2011-03-03 05:34:48 +03:00
|
|
|
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01b7,
|
|
|
|
/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x01b7,
|
2010-02-26 14:05:05 +03:00
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
2011-03-03 05:34:48 +03:00
|
|
|
/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01b7,
|
|
|
|
/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x01b7,
|
2010-02-26 14:05:05 +03:00
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(DTLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
|
|
|
|
[ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0,
|
|
|
|
[ C(RESULT_MISS) ] = 0x0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(ITLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
|
|
|
|
[ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(BPU ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
|
|
|
|
[ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2010-03-29 15:09:53 +04:00
|
|
|
static __initconst const u64 core2_hw_cache_event_ids
|
2010-02-26 14:05:05 +03:00
|
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
|
{
|
|
|
|
[ C(L1D) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(L1I ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(LL ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
|
|
|
|
[ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
|
|
|
|
[ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(DTLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(ITLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
|
|
|
|
[ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(BPU ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
|
|
|
|
[ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2010-03-29 15:09:53 +04:00
|
|
|
static __initconst const u64 atom_hw_cache_event_ids
|
2010-02-26 14:05:05 +03:00
|
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
|
{
|
|
|
|
[ C(L1D) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(L1I ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(LL ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
|
|
|
|
[ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
|
|
|
|
[ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(DTLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(ITLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(BPU ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
|
|
|
|
[ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void intel_pmu_disable_all(void)
|
|
|
|
{
|
|
|
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
|
|
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
|
|
|
|
|
|
|
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
|
|
|
intel_pmu_disable_bts();
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
|
|
|
|
intel_pmu_pebs_disable_all();
|
2010-03-03 14:02:30 +03:00
|
|
|
intel_pmu_lbr_disable_all();
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
2010-03-26 16:08:44 +03:00
|
|
|
static void intel_pmu_enable_all(int added)
|
2010-02-26 14:05:05 +03:00
|
|
|
{
|
|
|
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
2010-03-08 15:57:14 +03:00
|
|
|
intel_pmu_pebs_enable_all();
|
|
|
|
intel_pmu_lbr_enable_all();
|
2010-02-26 14:05:05 +03:00
|
|
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
|
|
|
|
|
|
|
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
|
|
|
struct perf_event *event =
|
|
|
|
cpuc->events[X86_PMC_IDX_FIXED_BTS];
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!event))
|
|
|
|
return;
|
|
|
|
|
|
|
|
intel_pmu_enable_bts(event->hw.config);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-26 16:08:44 +03:00
|
|
|
/*
|
|
|
|
* Workaround for:
|
|
|
|
* Intel Errata AAK100 (model 26)
|
|
|
|
* Intel Errata AAP53 (model 30)
|
2010-03-29 18:37:17 +04:00
|
|
|
* Intel Errata BD53 (model 44)
|
2010-03-26 16:08:44 +03:00
|
|
|
*
|
2010-08-06 09:39:08 +04:00
|
|
|
* The official story:
|
|
|
|
* These chips need to be 'reset' when adding counters by programming the
|
|
|
|
* magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
|
|
|
|
* in sequence on the same PMC or on different PMCs.
|
|
|
|
*
|
|
|
|
* In practise it appears some of these events do in fact count, and
|
|
|
|
* we need to programm all 4 events.
|
2010-03-26 16:08:44 +03:00
|
|
|
*/
|
2010-08-06 09:39:08 +04:00
|
|
|
static void intel_pmu_nhm_workaround(void)
|
2010-03-26 16:08:44 +03:00
|
|
|
{
|
2010-08-06 09:39:08 +04:00
|
|
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
|
static const unsigned long nhm_magic[4] = {
|
|
|
|
0x4300B5,
|
|
|
|
0x4300D2,
|
|
|
|
0x4300B1,
|
|
|
|
0x4300B1
|
|
|
|
};
|
|
|
|
struct perf_event *event;
|
|
|
|
int i;
|
2010-03-26 16:08:44 +03:00
|
|
|
|
2010-08-06 09:39:08 +04:00
|
|
|
/*
|
|
|
|
* The Errata requires below steps:
|
|
|
|
* 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
|
|
|
|
* 2) Configure 4 PERFEVTSELx with the magic events and clear
|
|
|
|
* the corresponding PMCx;
|
|
|
|
* 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
|
|
|
|
* 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
|
|
|
|
* 5) Clear 4 pairs of ERFEVTSELx and PMCx;
|
|
|
|
*/
|
2010-03-26 16:08:44 +03:00
|
|
|
|
2010-08-06 09:39:08 +04:00
|
|
|
/*
|
|
|
|
* The real steps we choose are a little different from above.
|
|
|
|
* A) To reduce MSR operations, we don't run step 1) as they
|
|
|
|
* are already cleared before this function is called;
|
|
|
|
* B) Call x86_perf_event_update to save PMCx before configuring
|
|
|
|
* PERFEVTSELx with magic number;
|
|
|
|
* C) With step 5), we do clear only when the PERFEVTSELx is
|
|
|
|
* not used currently.
|
|
|
|
* D) Call x86_perf_event_set_period to restore PMCx;
|
|
|
|
*/
|
2010-03-26 16:08:44 +03:00
|
|
|
|
2010-08-06 09:39:08 +04:00
|
|
|
/* We always operate 4 pairs of PERF Counters */
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
event = cpuc->events[i];
|
|
|
|
if (event)
|
|
|
|
x86_perf_event_update(event);
|
|
|
|
}
|
2010-03-26 16:08:44 +03:00
|
|
|
|
2010-08-06 09:39:08 +04:00
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
|
|
|
|
wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
|
|
|
|
}
|
|
|
|
|
|
|
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
|
|
|
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
|
2010-03-26 16:08:44 +03:00
|
|
|
|
2010-08-06 09:39:08 +04:00
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
event = cpuc->events[i];
|
|
|
|
|
|
|
|
if (event) {
|
|
|
|
x86_perf_event_set_period(event);
|
2010-04-14 00:23:14 +04:00
|
|
|
__x86_pmu_enable_event(&event->hw,
|
2010-08-06 09:39:08 +04:00
|
|
|
ARCH_PERFMON_EVENTSEL_ENABLE);
|
|
|
|
} else
|
|
|
|
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
|
2010-03-26 16:08:44 +03:00
|
|
|
}
|
2010-08-06 09:39:08 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pmu_nhm_enable_all(int added)
|
|
|
|
{
|
|
|
|
if (added)
|
|
|
|
intel_pmu_nhm_workaround();
|
2010-03-26 16:08:44 +03:00
|
|
|
intel_pmu_enable_all(added);
|
|
|
|
}
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
static inline u64 intel_pmu_get_status(void)
|
|
|
|
{
|
|
|
|
u64 status;
|
|
|
|
|
|
|
|
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void intel_pmu_ack_status(u64 ack)
|
|
|
|
{
|
|
|
|
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
|
|
|
|
}
|
|
|
|
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
|
2010-02-26 14:05:05 +03:00
|
|
|
{
|
2010-03-02 22:32:08 +03:00
|
|
|
int idx = hwc->idx - X86_PMC_IDX_FIXED;
|
2010-02-26 14:05:05 +03:00
|
|
|
u64 ctrl_val, mask;
|
|
|
|
|
|
|
|
mask = 0xfULL << (idx * 4);
|
|
|
|
|
|
|
|
rdmsrl(hwc->config_base, ctrl_val);
|
|
|
|
ctrl_val &= ~mask;
|
2010-03-08 15:51:31 +03:00
|
|
|
wrmsrl(hwc->config_base, ctrl_val);
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
static void intel_pmu_disable_event(struct perf_event *event)
|
2010-02-26 14:05:05 +03:00
|
|
|
{
|
2010-03-02 22:32:08 +03:00
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
|
|
|
|
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
2010-02-26 14:05:05 +03:00
|
|
|
intel_pmu_disable_bts();
|
|
|
|
intel_pmu_drain_bts_buffer();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
2010-03-02 22:32:08 +03:00
|
|
|
intel_pmu_disable_fixed(hwc);
|
2010-02-26 14:05:05 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-03-02 22:32:08 +03:00
|
|
|
x86_pmu_disable_event(event);
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
|
2010-04-09 01:03:20 +04:00
|
|
|
if (unlikely(event->attr.precise_ip))
|
2010-03-03 15:12:23 +03:00
|
|
|
intel_pmu_pebs_disable(event);
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
2010-02-26 14:05:05 +03:00
|
|
|
{
|
2010-03-02 22:32:08 +03:00
|
|
|
int idx = hwc->idx - X86_PMC_IDX_FIXED;
|
2010-02-26 14:05:05 +03:00
|
|
|
u64 ctrl_val, bits, mask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable IRQ generation (0x8),
|
|
|
|
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
|
|
|
|
* if requested:
|
|
|
|
*/
|
|
|
|
bits = 0x8ULL;
|
|
|
|
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
|
|
|
|
bits |= 0x2;
|
|
|
|
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
|
|
|
|
bits |= 0x1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ANY bit is supported in v3 and up
|
|
|
|
*/
|
|
|
|
if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
|
|
|
|
bits |= 0x4;
|
|
|
|
|
|
|
|
bits <<= (idx * 4);
|
|
|
|
mask = 0xfULL << (idx * 4);
|
|
|
|
|
|
|
|
rdmsrl(hwc->config_base, ctrl_val);
|
|
|
|
ctrl_val &= ~mask;
|
|
|
|
ctrl_val |= bits;
|
2010-03-08 15:51:31 +03:00
|
|
|
wrmsrl(hwc->config_base, ctrl_val);
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
2010-03-02 22:32:08 +03:00
|
|
|
static void intel_pmu_enable_event(struct perf_event *event)
|
2010-02-26 14:05:05 +03:00
|
|
|
{
|
2010-03-02 22:32:08 +03:00
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
|
|
|
|
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
2010-12-18 18:28:55 +03:00
|
|
|
if (!__this_cpu_read(cpu_hw_events.enabled))
|
2010-02-26 14:05:05 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
intel_pmu_enable_bts(hwc->config);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
2010-03-02 22:32:08 +03:00
|
|
|
intel_pmu_enable_fixed(hwc);
|
2010-02-26 14:05:05 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-04-09 01:03:20 +04:00
|
|
|
if (unlikely(event->attr.precise_ip))
|
2010-03-03 15:12:23 +03:00
|
|
|
intel_pmu_pebs_enable(event);
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
|
2010-04-14 00:23:14 +04:00
|
|
|
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save and restart an expired event. Called by NMI contexts,
|
|
|
|
* so it has to be careful about preempting normal event ops:
|
|
|
|
*/
|
|
|
|
static int intel_pmu_save_and_restart(struct perf_event *event)
|
|
|
|
{
|
2010-03-02 22:18:39 +03:00
|
|
|
x86_perf_event_update(event);
|
|
|
|
return x86_perf_event_set_period(event);
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pmu_reset(void)
|
|
|
|
{
|
2010-12-18 18:28:55 +03:00
|
|
|
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
|
2010-02-26 14:05:05 +03:00
|
|
|
unsigned long flags;
|
|
|
|
int idx;
|
|
|
|
|
2010-03-29 20:36:50 +04:00
|
|
|
if (!x86_pmu.num_counters)
|
2010-02-26 14:05:05 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
|
|
|
|
|
2010-03-29 20:36:50 +04:00
|
|
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
2011-02-02 19:40:57 +03:00
|
|
|
checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
|
|
|
|
checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
2010-03-29 20:36:50 +04:00
|
|
|
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
|
2010-02-26 14:05:05 +03:00
|
|
|
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
|
2010-03-29 20:36:50 +04:00
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
if (ds)
|
|
|
|
ds->bts_index = ds->bts_buffer_base;
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This handler is triggered by the local APIC, so the APIC IRQ handling
|
|
|
|
* rules apply:
|
|
|
|
*/
|
|
|
|
static int intel_pmu_handle_irq(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct perf_sample_data data;
|
|
|
|
struct cpu_hw_events *cpuc;
|
|
|
|
int bit, loops;
|
2010-09-02 23:07:47 +04:00
|
|
|
u64 status;
|
2010-09-10 15:28:01 +04:00
|
|
|
int handled;
|
2010-02-26 14:05:05 +03:00
|
|
|
|
2010-03-03 17:55:04 +03:00
|
|
|
perf_sample_data_init(&data, 0);
|
2010-02-26 14:05:05 +03:00
|
|
|
|
|
|
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
2010-03-08 15:51:01 +03:00
|
|
|
intel_pmu_disable_all();
|
2010-09-10 15:28:01 +04:00
|
|
|
handled = intel_pmu_drain_bts_buffer();
|
2010-02-26 14:05:05 +03:00
|
|
|
status = intel_pmu_get_status();
|
|
|
|
if (!status) {
|
2010-03-26 16:08:44 +03:00
|
|
|
intel_pmu_enable_all(0);
|
2010-09-10 15:28:01 +04:00
|
|
|
return handled;
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
loops = 0;
|
|
|
|
again:
|
2010-09-02 23:07:47 +04:00
|
|
|
intel_pmu_ack_status(status);
|
2010-02-26 14:05:05 +03:00
|
|
|
if (++loops > 100) {
|
|
|
|
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
|
|
|
|
perf_event_print_debug();
|
|
|
|
intel_pmu_reset();
|
2010-03-08 15:51:01 +03:00
|
|
|
goto done;
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
inc_irq_stat(apic_perf_irqs);
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
|
2010-03-03 14:02:30 +03:00
|
|
|
intel_pmu_lbr_read();
|
|
|
|
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
/*
|
|
|
|
* PEBS overflow sets bit 62 in the global status register
|
|
|
|
*/
|
2010-09-02 23:07:49 +04:00
|
|
|
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
|
|
|
|
handled++;
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
x86_pmu.drain_pebs(regs);
|
2010-09-02 23:07:49 +04:00
|
|
|
}
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
|
2010-03-06 00:41:37 +03:00
|
|
|
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
2010-02-26 14:05:05 +03:00
|
|
|
struct perf_event *event = cpuc->events[bit];
|
|
|
|
|
2010-09-02 23:07:49 +04:00
|
|
|
handled++;
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
if (!test_bit(bit, cpuc->active_mask))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!intel_pmu_save_and_restart(event))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
data.period = event->hw.last_period;
|
|
|
|
|
|
|
|
if (perf_event_overflow(event, 1, &data, regs))
|
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
|
|
|
x86_pmu_stop(event, 0);
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Repeat if there is more work to be done:
|
|
|
|
*/
|
|
|
|
status = intel_pmu_get_status();
|
|
|
|
if (status)
|
|
|
|
goto again;
|
|
|
|
|
2010-03-08 15:51:01 +03:00
|
|
|
done:
|
2010-03-26 16:08:44 +03:00
|
|
|
intel_pmu_enable_all(0);
|
2010-09-02 23:07:49 +04:00
|
|
|
return handled;
|
2010-02-26 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct event_constraint *
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
intel_bts_constraints(struct perf_event *event)
|
2010-02-26 14:05:05 +03:00
|
|
|
{
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
unsigned int hw_event, bts_event;
|
2010-02-26 14:05:05 +03:00
|
|
|
|
2011-04-26 15:24:33 +04:00
|
|
|
if (event->attr.freq)
|
|
|
|
return NULL;
|
|
|
|
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
|
|
|
|
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
2010-02-26 14:05:05 +03:00
|
|
|
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
|
2010-02-26 14:05:05 +03:00
|
|
|
return &bts_constraint;
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-03-03 05:34:47 +03:00
|
|
|
static struct event_constraint *
|
|
|
|
intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
|
|
|
{
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
|
|
|
|
struct event_constraint *c;
|
|
|
|
struct intel_percore *pc;
|
|
|
|
struct er_account *era;
|
|
|
|
int i;
|
|
|
|
int free_slot;
|
|
|
|
int found;
|
|
|
|
|
|
|
|
if (!x86_pmu.percore_constraints || hwc->extra_alloc)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (c = x86_pmu.percore_constraints; c->cmask; c++) {
|
|
|
|
if (e != c->code)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate resource per core.
|
|
|
|
*/
|
|
|
|
pc = cpuc->per_core;
|
|
|
|
if (!pc)
|
|
|
|
break;
|
|
|
|
c = &emptyconstraint;
|
|
|
|
raw_spin_lock(&pc->lock);
|
|
|
|
free_slot = -1;
|
|
|
|
found = 0;
|
|
|
|
for (i = 0; i < MAX_EXTRA_REGS; i++) {
|
|
|
|
era = &pc->regs[i];
|
|
|
|
if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
|
|
|
|
/* Allow sharing same config */
|
|
|
|
if (hwc->extra_config == era->extra_config) {
|
|
|
|
era->ref++;
|
|
|
|
cpuc->percore_used = 1;
|
|
|
|
hwc->extra_alloc = 1;
|
|
|
|
c = NULL;
|
|
|
|
}
|
|
|
|
/* else conflict */
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
} else if (era->ref == 0 && free_slot == -1)
|
|
|
|
free_slot = i;
|
|
|
|
}
|
|
|
|
if (!found && free_slot != -1) {
|
|
|
|
era = &pc->regs[free_slot];
|
|
|
|
era->ref = 1;
|
|
|
|
era->extra_reg = hwc->extra_reg;
|
|
|
|
era->extra_config = hwc->extra_config;
|
|
|
|
cpuc->percore_used = 1;
|
|
|
|
hwc->extra_alloc = 1;
|
|
|
|
c = NULL;
|
|
|
|
}
|
|
|
|
raw_spin_unlock(&pc->lock);
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
static struct event_constraint *
|
|
|
|
intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
|
|
|
{
|
|
|
|
struct event_constraint *c;
|
|
|
|
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
c = intel_bts_constraints(event);
|
|
|
|
if (c)
|
|
|
|
return c;
|
|
|
|
|
|
|
|
c = intel_pebs_constraints(event);
|
2010-02-26 14:05:05 +03:00
|
|
|
if (c)
|
|
|
|
return c;
|
|
|
|
|
2011-03-03 05:34:47 +03:00
|
|
|
c = intel_percore_constraints(cpuc, event);
|
|
|
|
if (c)
|
|
|
|
return c;
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
return x86_get_event_constraints(cpuc, event);
|
|
|
|
}
|
|
|
|
|
2011-03-03 05:34:47 +03:00
|
|
|
static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
|
|
|
|
struct perf_event *event)
|
|
|
|
{
|
|
|
|
struct extra_reg *er;
|
|
|
|
struct intel_percore *pc;
|
|
|
|
struct er_account *era;
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
int i, allref;
|
|
|
|
|
|
|
|
if (!cpuc->percore_used)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (er = x86_pmu.extra_regs; er->msr; er++) {
|
|
|
|
if (er->event != (hwc->config & er->config_mask))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pc = cpuc->per_core;
|
|
|
|
raw_spin_lock(&pc->lock);
|
|
|
|
for (i = 0; i < MAX_EXTRA_REGS; i++) {
|
|
|
|
era = &pc->regs[i];
|
|
|
|
if (era->ref > 0 &&
|
|
|
|
era->extra_config == hwc->extra_config &&
|
|
|
|
era->extra_reg == er->msr) {
|
|
|
|
era->ref--;
|
|
|
|
hwc->extra_alloc = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
allref = 0;
|
|
|
|
for (i = 0; i < MAX_EXTRA_REGS; i++)
|
|
|
|
allref += pc->regs[i].ref;
|
|
|
|
if (allref == 0)
|
|
|
|
cpuc->percore_used = 0;
|
|
|
|
raw_spin_unlock(&pc->lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-30 19:00:06 +04:00
|
|
|
static int intel_pmu_hw_config(struct perf_event *event)
|
|
|
|
{
|
|
|
|
int ret = x86_pmu_hw_config(event);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2010-12-14 23:26:40 +03:00
|
|
|
if (event->attr.precise_ip &&
|
|
|
|
(event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
|
|
|
|
/*
|
|
|
|
* Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
|
|
|
|
* (0x003c) so that we can use it with PEBS.
|
|
|
|
*
|
|
|
|
* The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
|
|
|
|
* PEBS capable. However we can use INST_RETIRED.ANY_P
|
|
|
|
* (0x00c0), which is a PEBS capable event, to get the same
|
|
|
|
* count.
|
|
|
|
*
|
|
|
|
* INST_RETIRED.ANY_P counts the number of cycles that retires
|
|
|
|
* CNTMASK instructions. By setting CNTMASK to a value (16)
|
|
|
|
* larger than the maximum number of instructions that can be
|
|
|
|
* retired per cycle (4) and then inverting the condition, we
|
|
|
|
* count all cycles that retire 16 or less instructions, which
|
|
|
|
* is every cycle.
|
|
|
|
*
|
|
|
|
* Thereby we gain a PEBS capable cycle counter.
|
|
|
|
*/
|
|
|
|
u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
|
|
|
|
|
|
|
|
alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
|
|
|
|
event->hw.config = alt_config;
|
|
|
|
}
|
|
|
|
|
2010-03-30 19:00:06 +04:00
|
|
|
if (event->attr.type != PERF_TYPE_RAW)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (x86_pmu.version < 3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
|
|
|
|
return -EACCES;
|
|
|
|
|
|
|
|
event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-29 15:09:53 +04:00
|
|
|
static __initconst const struct x86_pmu core_pmu = {
|
2010-02-26 14:05:05 +03:00
|
|
|
.name = "core",
|
|
|
|
.handle_irq = x86_pmu_handle_irq,
|
|
|
|
.disable_all = x86_pmu_disable_all,
|
|
|
|
.enable_all = x86_pmu_enable_all,
|
|
|
|
.enable = x86_pmu_enable_event,
|
|
|
|
.disable = x86_pmu_disable_event,
|
2010-03-30 19:00:06 +04:00
|
|
|
.hw_config = x86_pmu_hw_config,
|
2010-03-11 19:54:39 +03:00
|
|
|
.schedule_events = x86_schedule_events,
|
2010-02-26 14:05:05 +03:00
|
|
|
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
|
|
|
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
|
|
|
.event_map = intel_pmu_event_map,
|
|
|
|
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
|
|
|
.apic = 1,
|
|
|
|
/*
|
|
|
|
* Intel PMCs cannot be accessed sanely above 32 bit width,
|
|
|
|
* so we install an artificial 1<<31 period regardless of
|
|
|
|
* the generic event period:
|
|
|
|
*/
|
|
|
|
.max_period = (1ULL << 31) - 1,
|
|
|
|
.get_event_constraints = intel_get_event_constraints,
|
2011-03-03 05:34:47 +03:00
|
|
|
.put_event_constraints = intel_put_event_constraints,
|
2010-02-26 14:05:05 +03:00
|
|
|
.event_constraints = intel_core_event_constraints,
|
|
|
|
};
|
|
|
|
|
2011-03-03 05:34:47 +03:00
|
|
|
static int intel_pmu_cpu_prepare(int cpu)
|
|
|
|
{
|
|
|
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
|
|
|
|
2011-03-03 05:34:50 +03:00
|
|
|
if (!cpu_has_ht_siblings())
|
|
|
|
return NOTIFY_OK;
|
|
|
|
|
2011-03-03 05:34:47 +03:00
|
|
|
cpuc->per_core = kzalloc_node(sizeof(struct intel_percore),
|
|
|
|
GFP_KERNEL, cpu_to_node(cpu));
|
|
|
|
if (!cpuc->per_core)
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
|
|
|
|
raw_spin_lock_init(&cpuc->per_core->lock);
|
|
|
|
cpuc->per_core->core_id = -1;
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
2010-03-05 15:49:35 +03:00
|
|
|
static void intel_pmu_cpu_starting(int cpu)
|
|
|
|
{
|
2011-03-03 05:34:47 +03:00
|
|
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
|
|
|
int core_id = topology_core_id(cpu);
|
|
|
|
int i;
|
|
|
|
|
2011-03-03 05:34:50 +03:00
|
|
|
init_debug_store_on_cpu(cpu);
|
|
|
|
/*
|
|
|
|
* Deal with CPUs that don't clear their LBRs on power-up.
|
|
|
|
*/
|
|
|
|
intel_pmu_lbr_reset();
|
|
|
|
|
|
|
|
if (!cpu_has_ht_siblings())
|
|
|
|
return;
|
|
|
|
|
2011-03-03 05:34:47 +03:00
|
|
|
for_each_cpu(i, topology_thread_cpumask(cpu)) {
|
|
|
|
struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core;
|
|
|
|
|
|
|
|
if (pc && pc->core_id == core_id) {
|
|
|
|
kfree(cpuc->per_core);
|
|
|
|
cpuc->per_core = pc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cpuc->per_core->core_id = core_id;
|
|
|
|
cpuc->per_core->refcnt++;
|
2010-03-05 15:49:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_pmu_cpu_dying(int cpu)
|
|
|
|
{
|
2011-03-03 05:34:47 +03:00
|
|
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
|
|
|
struct intel_percore *pc = cpuc->per_core;
|
|
|
|
|
|
|
|
if (pc) {
|
|
|
|
if (pc->core_id == -1 || --pc->refcnt == 0)
|
|
|
|
kfree(pc);
|
|
|
|
cpuc->per_core = NULL;
|
|
|
|
}
|
|
|
|
|
2010-03-05 15:49:35 +03:00
|
|
|
fini_debug_store_on_cpu(cpu);
|
|
|
|
}
|
|
|
|
|
2010-03-29 15:09:53 +04:00
|
|
|
static __initconst const struct x86_pmu intel_pmu = {
|
2010-02-26 14:05:05 +03:00
|
|
|
.name = "Intel",
|
|
|
|
.handle_irq = intel_pmu_handle_irq,
|
|
|
|
.disable_all = intel_pmu_disable_all,
|
|
|
|
.enable_all = intel_pmu_enable_all,
|
|
|
|
.enable = intel_pmu_enable_event,
|
|
|
|
.disable = intel_pmu_disable_event,
|
2010-03-30 19:00:06 +04:00
|
|
|
.hw_config = intel_pmu_hw_config,
|
2010-03-11 19:54:39 +03:00
|
|
|
.schedule_events = x86_schedule_events,
|
2010-02-26 14:05:05 +03:00
|
|
|
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
|
|
|
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
|
|
|
.event_map = intel_pmu_event_map,
|
|
|
|
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
|
|
|
.apic = 1,
|
|
|
|
/*
|
|
|
|
* Intel PMCs cannot be accessed sanely above 32 bit width,
|
|
|
|
* so we install an artificial 1<<31 period regardless of
|
|
|
|
* the generic event period:
|
|
|
|
*/
|
|
|
|
.max_period = (1ULL << 31) - 1,
|
2010-03-05 15:01:18 +03:00
|
|
|
.get_event_constraints = intel_get_event_constraints,
|
2011-03-03 05:34:47 +03:00
|
|
|
.put_event_constraints = intel_put_event_constraints,
|
2010-03-05 15:01:18 +03:00
|
|
|
|
2011-03-03 05:34:47 +03:00
|
|
|
.cpu_prepare = intel_pmu_cpu_prepare,
|
2010-03-05 15:49:35 +03:00
|
|
|
.cpu_starting = intel_pmu_cpu_starting,
|
|
|
|
.cpu_dying = intel_pmu_cpu_dying,
|
2010-02-26 14:05:05 +03:00
|
|
|
};
|
|
|
|
|
2010-03-04 23:49:01 +03:00
|
|
|
static void intel_clovertown_quirks(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* PEBS is unreliable due to:
|
|
|
|
*
|
|
|
|
* AJ67 - PEBS may experience CPL leaks
|
|
|
|
* AJ68 - PEBS PMI may be delayed by one event
|
|
|
|
* AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
|
|
|
|
* AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
|
|
|
|
*
|
|
|
|
* AJ67 could be worked around by restricting the OS/USR flags.
|
|
|
|
* AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
|
|
|
|
*
|
|
|
|
* AJ106 could possibly be worked around by not allowing LBR
|
|
|
|
* usage from PEBS, including the fixup.
|
|
|
|
* AJ68 could possibly be worked around by always programming
|
2011-04-27 13:51:41 +04:00
|
|
|
* a pebs_event_reset[0] value and coping with the lost events.
|
2010-03-04 23:49:01 +03:00
|
|
|
*
|
|
|
|
* But taken together it might just make sense to not enable PEBS on
|
|
|
|
* these chips.
|
|
|
|
*/
|
|
|
|
printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
|
|
|
|
x86_pmu.pebs = 0;
|
|
|
|
x86_pmu.pebs_constraints = NULL;
|
|
|
|
}
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
static __init int intel_pmu_init(void)
|
|
|
|
{
|
|
|
|
union cpuid10_edx edx;
|
|
|
|
union cpuid10_eax eax;
|
|
|
|
unsigned int unused;
|
|
|
|
unsigned int ebx;
|
|
|
|
int version;
|
|
|
|
|
|
|
|
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
|
2010-03-11 19:54:39 +03:00
|
|
|
switch (boot_cpu_data.x86) {
|
|
|
|
case 0x6:
|
|
|
|
return p6_pmu_init();
|
|
|
|
case 0xf:
|
|
|
|
return p4_pmu_init();
|
|
|
|
}
|
2010-02-26 14:05:05 +03:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether the Architectural PerfMon supports
|
|
|
|
* Branch Misses Retired hw_event or not.
|
|
|
|
*/
|
|
|
|
cpuid(10, &eax.full, &ebx, &unused, &edx.full);
|
|
|
|
if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
version = eax.split.version_id;
|
|
|
|
if (version < 2)
|
|
|
|
x86_pmu = core_pmu;
|
|
|
|
else
|
|
|
|
x86_pmu = intel_pmu;
|
|
|
|
|
|
|
|
x86_pmu.version = version;
|
2010-03-29 20:36:50 +04:00
|
|
|
x86_pmu.num_counters = eax.split.num_counters;
|
|
|
|
x86_pmu.cntval_bits = eax.split.bit_width;
|
|
|
|
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
|
2010-02-26 14:05:05 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Quirk: v2 perfmon does not report fixed-purpose events, so
|
|
|
|
* assume at least 3 events:
|
|
|
|
*/
|
|
|
|
if (version > 1)
|
2010-03-29 20:36:50 +04:00
|
|
|
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
|
2010-02-26 14:05:05 +03:00
|
|
|
|
2010-03-03 19:07:40 +03:00
|
|
|
/*
|
|
|
|
* v2 and above have a perf capabilities MSR
|
|
|
|
*/
|
|
|
|
if (version > 1) {
|
|
|
|
u64 capabilities;
|
|
|
|
|
|
|
|
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
|
|
|
|
x86_pmu.intel_cap.capabilities = capabilities;
|
|
|
|
}
|
|
|
|
|
perf, x86: Add PEBS infrastructure
This patch implements support for Intel Precise Event Based Sampling,
which is an alternative counter mode in which the counter triggers a
hardware assist to collect information on events. The hardware assist
takes a trap like snapshot of a subset of the machine registers.
This data is written to the Intel Debug-Store, which can be programmed
with a data threshold at which to raise a PMI.
With the PEBS hardware assist being trap like, the reported IP is always
one instruction after the actual instruction that triggered the event.
This implements a simple PEBS model that always takes a single PEBS event
at a time. This is done so that the interaction with the rest of the
system is as expected (freq adjust, period randomization, lbr,
callchains, etc.).
It adds an ABI element: perf_event_attr::precise, which indicates that we
wish to use this (constrained, but precise) mode.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <20100304140100.392111285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-03-02 21:52:12 +03:00
|
|
|
intel_ds_init();
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
/*
|
|
|
|
* Install the hw-cache-events table:
|
|
|
|
*/
|
|
|
|
switch (boot_cpu_data.x86_model) {
|
|
|
|
case 14: /* 65 nm core solo/duo, "Yonah" */
|
|
|
|
pr_cont("Core events, ");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
|
2010-03-04 23:49:01 +03:00
|
|
|
x86_pmu.quirks = intel_clovertown_quirks;
|
2010-02-26 14:05:05 +03:00
|
|
|
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
|
|
|
|
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
|
|
|
|
case 29: /* six-core 45 nm xeon "Dunnington" */
|
|
|
|
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
|
|
|
|
sizeof(hw_cache_event_ids));
|
|
|
|
|
2010-03-03 14:02:30 +03:00
|
|
|
intel_pmu_lbr_init_core();
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
x86_pmu.event_constraints = intel_core2_event_constraints;
|
2011-03-02 18:05:01 +03:00
|
|
|
x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
|
2010-02-26 14:05:05 +03:00
|
|
|
pr_cont("Core2 events, ");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 26: /* 45 nm nehalem, "Bloomfield" */
|
|
|
|
case 30: /* 45 nm nehalem, "Lynnfield" */
|
2010-04-06 18:01:19 +04:00
|
|
|
case 46: /* 45 nm nehalem-ex, "Beckton" */
|
2010-02-26 14:05:05 +03:00
|
|
|
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
|
|
|
|
sizeof(hw_cache_event_ids));
|
2011-03-03 05:34:48 +03:00
|
|
|
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
|
|
|
|
sizeof(hw_cache_extra_regs));
|
2010-02-26 14:05:05 +03:00
|
|
|
|
2010-03-03 14:02:30 +03:00
|
|
|
intel_pmu_lbr_init_nhm();
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
x86_pmu.event_constraints = intel_nehalem_event_constraints;
|
2011-03-02 18:05:01 +03:00
|
|
|
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
|
2011-03-03 05:34:47 +03:00
|
|
|
x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
|
2010-03-26 16:08:44 +03:00
|
|
|
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
|
2011-03-03 05:34:47 +03:00
|
|
|
x86_pmu.extra_regs = intel_nehalem_extra_regs;
|
2011-04-27 13:51:41 +04:00
|
|
|
|
2011-04-24 10:18:31 +04:00
|
|
|
/* Install the stalled-cycles event: 0xff: All reasons, 0xa2: Resource stalls */
|
|
|
|
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES] = 0xffa2;
|
|
|
|
|
2011-04-27 13:51:41 +04:00
|
|
|
if (ebx & 0x40) {
|
|
|
|
/*
|
|
|
|
* Erratum AAJ80 detected, we work it around by using
|
|
|
|
* the BR_MISP_EXEC.ANY event. This will over-count
|
|
|
|
* branch-misses, but it's still much better than the
|
|
|
|
* architectural event which is often completely bogus:
|
|
|
|
*/
|
|
|
|
intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
|
|
|
|
|
|
|
|
pr_cont("erratum AAJ80 worked around, ");
|
|
|
|
}
|
2010-03-26 16:08:44 +03:00
|
|
|
pr_cont("Nehalem events, ");
|
2010-02-26 14:05:05 +03:00
|
|
|
break;
|
2010-03-03 14:02:30 +03:00
|
|
|
|
2010-02-01 17:36:30 +03:00
|
|
|
case 28: /* Atom */
|
2010-02-26 14:05:05 +03:00
|
|
|
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
|
|
|
sizeof(hw_cache_event_ids));
|
|
|
|
|
2010-03-03 14:02:30 +03:00
|
|
|
intel_pmu_lbr_init_atom();
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
x86_pmu.event_constraints = intel_gen_event_constraints;
|
2011-03-02 18:05:01 +03:00
|
|
|
x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
|
2010-02-26 14:05:05 +03:00
|
|
|
pr_cont("Atom events, ");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 37: /* 32 nm nehalem, "Clarkdale" */
|
|
|
|
case 44: /* 32 nm nehalem, "Gulftown" */
|
2011-04-22 03:48:35 +04:00
|
|
|
case 47: /* 32 nm Xeon E7 */
|
2010-02-26 14:05:05 +03:00
|
|
|
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
|
|
|
|
sizeof(hw_cache_event_ids));
|
2011-03-03 05:34:48 +03:00
|
|
|
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
|
|
|
|
sizeof(hw_cache_extra_regs));
|
2010-02-26 14:05:05 +03:00
|
|
|
|
2010-03-03 14:02:30 +03:00
|
|
|
intel_pmu_lbr_init_nhm();
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
x86_pmu.event_constraints = intel_westmere_event_constraints;
|
2011-03-03 05:34:47 +03:00
|
|
|
x86_pmu.percore_constraints = intel_westmere_percore_constraints;
|
2010-03-29 18:37:17 +04:00
|
|
|
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
|
2011-03-02 18:05:01 +03:00
|
|
|
x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
|
2011-03-03 05:34:47 +03:00
|
|
|
x86_pmu.extra_regs = intel_westmere_extra_regs;
|
2010-02-26 14:05:05 +03:00
|
|
|
pr_cont("Westmere events, ");
|
|
|
|
break;
|
2010-02-01 17:36:30 +03:00
|
|
|
|
2011-03-02 16:27:04 +03:00
|
|
|
case 42: /* SandyBridge */
|
|
|
|
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
|
|
|
|
sizeof(hw_cache_event_ids));
|
|
|
|
|
|
|
|
intel_pmu_lbr_init_nhm();
|
|
|
|
|
|
|
|
x86_pmu.event_constraints = intel_snb_event_constraints;
|
|
|
|
x86_pmu.pebs_constraints = intel_snb_pebs_events;
|
|
|
|
pr_cont("SandyBridge events, ");
|
|
|
|
break;
|
|
|
|
|
2010-02-26 14:05:05 +03:00
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* default constraints for v2 and up
|
|
|
|
*/
|
|
|
|
x86_pmu.event_constraints = intel_gen_event_constraints;
|
|
|
|
pr_cont("generic architected perfmon, ");
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* CONFIG_CPU_SUP_INTEL */
|
|
|
|
|
|
|
|
static int intel_pmu_init(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_CPU_SUP_INTEL */
|