KVM: Reduce stack usage in kvm_pv_mmu_op()
We're in a hot path. We can't use kmalloc() because it might impact performance. So, we just stick the buffer that we need into the kvm_vcpu_arch structure. This is used very often, so it is not really a waste. We also have to move the buffer structure's definition to the arch-specific x86 kvm header. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Родитель
b772ff362e
Коммит
6ad18fba05
|
@ -135,13 +135,6 @@ module_param(dbg, bool, 0644);
|
||||||
#define ACC_USER_MASK PT_USER_MASK
|
#define ACC_USER_MASK PT_USER_MASK
|
||||||
#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
|
#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
|
||||||
|
|
||||||
struct kvm_pv_mmu_op_buffer {
|
|
||||||
void *ptr;
|
|
||||||
unsigned len;
|
|
||||||
unsigned processed;
|
|
||||||
char buf[512] __aligned(sizeof(long));
|
|
||||||
};
|
|
||||||
|
|
||||||
struct kvm_rmap_desc {
|
struct kvm_rmap_desc {
|
||||||
u64 *shadow_ptes[RMAP_EXT];
|
u64 *shadow_ptes[RMAP_EXT];
|
||||||
struct kvm_rmap_desc *more;
|
struct kvm_rmap_desc *more;
|
||||||
|
@ -2292,18 +2285,18 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
|
||||||
gpa_t addr, unsigned long *ret)
|
gpa_t addr, unsigned long *ret)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
struct kvm_pv_mmu_op_buffer buffer;
|
struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
|
||||||
|
|
||||||
buffer.ptr = buffer.buf;
|
buffer->ptr = buffer->buf;
|
||||||
buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
|
buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
|
||||||
buffer.processed = 0;
|
buffer->processed = 0;
|
||||||
|
|
||||||
r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
|
r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
while (buffer.len) {
|
while (buffer->len) {
|
||||||
r = kvm_pv_mmu_op_one(vcpu, &buffer);
|
r = kvm_pv_mmu_op_one(vcpu, buffer);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto out;
|
goto out;
|
||||||
if (r == 0)
|
if (r == 0)
|
||||||
|
@ -2312,7 +2305,7 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
|
||||||
|
|
||||||
r = 1;
|
r = 1;
|
||||||
out:
|
out:
|
||||||
*ret = buffer.processed;
|
*ret = buffer->processed;
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -201,6 +201,13 @@ struct kvm_mmu_page {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct kvm_pv_mmu_op_buffer {
|
||||||
|
void *ptr;
|
||||||
|
unsigned len;
|
||||||
|
unsigned processed;
|
||||||
|
char buf[512] __aligned(sizeof(long));
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
|
* x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
|
||||||
* 32-bit). The kvm_mmu structure abstracts the details of the current mmu
|
* 32-bit). The kvm_mmu structure abstracts the details of the current mmu
|
||||||
|
@ -248,6 +255,9 @@ struct kvm_vcpu_arch {
|
||||||
bool tpr_access_reporting;
|
bool tpr_access_reporting;
|
||||||
|
|
||||||
struct kvm_mmu mmu;
|
struct kvm_mmu mmu;
|
||||||
|
/* only needed in kvm_pv_mmu_op() path, but it's hot so
|
||||||
|
* put it here to avoid allocation */
|
||||||
|
struct kvm_pv_mmu_op_buffer mmu_op_buffer;
|
||||||
|
|
||||||
struct kvm_mmu_memory_cache mmu_pte_chain_cache;
|
struct kvm_mmu_memory_cache mmu_pte_chain_cache;
|
||||||
struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
|
struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче