KVM: MMU: only update unsync page in invlpg path
Only unsync pages need updated at invlpg time since other shadow pages are write-protected Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Родитель
e02aa901b1
Коммит
f78978aa3a
|
@ -461,6 +461,7 @@ out_unlock:
|
|||
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
||||
{
|
||||
struct kvm_shadow_walk_iterator iterator;
|
||||
struct kvm_mmu_page *sp;
|
||||
gpa_t pte_gpa = -1;
|
||||
int level;
|
||||
u64 *sptep;
|
||||
|
@ -472,10 +473,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
|||
level = iterator.level;
|
||||
sptep = iterator.sptep;
|
||||
|
||||
sp = page_header(__pa(sptep));
|
||||
if (is_last_spte(*sptep, level)) {
|
||||
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
||||
int offset, shift;
|
||||
|
||||
if (!sp->unsync)
|
||||
break;
|
||||
|
||||
shift = PAGE_SHIFT -
|
||||
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
|
||||
offset = sp->role.quadrant << shift;
|
||||
|
@ -493,7 +497,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
|||
break;
|
||||
}
|
||||
|
||||
if (!is_shadow_present_pte(*sptep))
|
||||
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче