xen/PMU: Intercept PMU-related MSR and APIC accesses

Provide interfaces for recognizing accesses to PMU-related MSRs and
LVTPC APIC and process these accesses in Xen PMU code.

(The interrupt handler performs XENPMU_flush right away in the beginning
since no PMU emulation is available. It will be added with a later patch).

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
This commit is contained in:
Boris Ostrovsky 2015-08-10 16:34:36 -04:00 коммит произвёл David Vrabel
Родитель e27b72df01
Коммит 6b08cd6328
5 изменённых файлов: 109 добавлений и 8 удалений

Просмотреть файл

@ -7,6 +7,7 @@
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include "xen-ops.h"
#include "pmu.h"
#include "smp.h"
static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
@ -72,8 +73,10 @@ static u32 xen_apic_read(u32 reg)
static void xen_apic_write(u32 reg, u32 val)
{
if (reg == APIC_LVTPC)
if (reg == APIC_LVTPC) {
(void)pmu_apic_update(reg);
return;
}
/* Warn to see if there's any stray references */
WARN(1,"register: %x, value: %x\n", reg, val);

Просмотреть файл

@ -1031,6 +1031,9 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
{
u64 val;
if (pmu_msr_read(msr, &val, err))
return val;
val = native_read_msr_safe(msr, err);
switch (msr) {
case MSR_IA32_APICBASE:
@ -1077,17 +1080,13 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
Xen console noise. */
default:
ret = native_write_msr_safe(msr, low, high);
if (!pmu_msr_write(msr, low, high, &ret))
ret = native_write_msr_safe(msr, low, high);
}
return ret;
}
unsigned long long xen_read_pmc(int counter)
{
return 0;
}
void xen_setup_shared_info(void)
{
if (!xen_feature(XENFEAT_auto_translated_physmap)) {

Просмотреть файл

@ -51,6 +51,8 @@ static __read_mostly int amd_num_counters;
/* Alias registers (0x4c1) for full-width writes to PMCs */
#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_PMC0))
#define INTEL_PMC_TYPE_SHIFT 30
static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters;
@ -167,6 +169,91 @@ static int is_intel_pmu_msr(u32 msr_index, int *type, int *index)
}
}
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
if (is_amd_pmu_msr(msr)) {
*val = native_read_msr_safe(msr, err);
return true;
}
} else {
int type, index;
if (is_intel_pmu_msr(msr, &type, &index)) {
*val = native_read_msr_safe(msr, err);
return true;
}
}
return false;
}
bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
if (is_amd_pmu_msr(msr)) {
*err = native_write_msr_safe(msr, low, high);
return true;
}
} else {
int type, index;
if (is_intel_pmu_msr(msr, &type, &index)) {
*err = native_write_msr_safe(msr, low, high);
return true;
}
}
return false;
}
static unsigned long long xen_amd_read_pmc(int counter)
{
uint32_t msr;
int err;
msr = amd_counters_base + (counter * amd_msr_step);
return native_read_msr_safe(msr, &err);
}
static unsigned long long xen_intel_read_pmc(int counter)
{
int err;
uint32_t msr;
if (counter & (1<<INTEL_PMC_TYPE_SHIFT))
msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
else
msr = MSR_IA32_PERFCTR0 + counter;
return native_read_msr_safe(msr, &err);
}
unsigned long long xen_read_pmc(int counter)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return xen_amd_read_pmc(counter);
else
return xen_intel_read_pmc(counter);
}
int pmu_apic_update(uint32_t val)
{
int ret;
struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
if (!xenpmu_data) {
pr_warn_once("%s: pmudata not initialized\n", __func__);
return -EINVAL;
}
xenpmu_data->pmu.l.lapic_lvtpc = val;
ret = HYPERVISOR_xenpmu_op(XENPMU_lvtpc_set, NULL);
return ret;
}
/* perf callbacks */
static int xen_is_in_guest(void)
{
@ -239,7 +326,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
{
int ret = IRQ_NONE;
int err, ret = IRQ_NONE;
struct pt_regs regs;
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
@ -248,6 +335,12 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
return ret;
}
err = HYPERVISOR_xenpmu_op(XENPMU_flush, NULL);
if (err) {
pr_warn_once("%s: failed hypercall, err: %d\n", __func__, err);
return ret;
}
xen_convert_regs(&xenpmu_data->pmu.r.regs, &regs,
xenpmu_data->pmu.pmu_flags);
if (x86_pmu.handle_irq(&regs))

Просмотреть файл

@ -7,5 +7,9 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
void xen_pmu_init(int cpu);
void xen_pmu_finish(int cpu);
bool is_xen_pmu(int cpu);
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
int pmu_apic_update(uint32_t reg);
unsigned long long xen_read_pmc(int counter);
#endif /* __XEN_PMU_H */

Просмотреть файл

@ -20,6 +20,8 @@
#define XENPMU_feature_set 3
#define XENPMU_init 4
#define XENPMU_finish 5
#define XENPMU_lvtpc_set 6
#define XENPMU_flush 7
/* ` } */