KVM: x86/mmu: Replace UNMAPPED_GVA with INVALID_GPA for gva_to_gpa()

The result of gva_to_gpa() is physical address not virtual address,
it is odd that UNMAPPED_GVA macro is used as the result for physical
address. Replace UNMAPPED_GVA with INVALID_GPA and drop UNMAPPED_GVA
macro.

No functional change intended.

Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/6104978956449467d3c68f1ad7f2c2f6d771d0ee.1656667239.git.houwenlong.hwl@antgroup.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Hou Wenlong 2022-07-01 17:24:13 +08:00 коммит произвёл Sean Christopherson
Родитель 156b9d76e8
Коммит 6e1d2a3f25
4 изменённых файлов: 13 добавлений и 14 удалений

Просмотреть файл

@ -129,7 +129,6 @@
#define INVALID_PAGE (~(hpa_t)0)
#define VALID_PAGE(x) ((x) != INVALID_PAGE)
#define UNMAPPED_GVA (~(gpa_t)0)
#define INVALID_GPA (~(gpa_t)0)
/* KVM Hugepage definitions for x86 */

Просмотреть файл

@ -378,7 +378,7 @@ retry_walk:
* information to fix the exit_qualification or exit_info_1
* fields.
*/
if (unlikely(real_gpa == UNMAPPED_GVA))
if (unlikely(real_gpa == INVALID_GPA))
return 0;
host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
@ -431,7 +431,7 @@ retry_walk:
#endif
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
if (real_gpa == UNMAPPED_GVA)
if (real_gpa == INVALID_GPA)
return 0;
walker->gfn = real_gpa >> PAGE_SHIFT;
@ -962,7 +962,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
struct x86_exception *exception)
{
struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA;
gpa_t gpa = INVALID_GPA;
int r;
#ifndef CONFIG_X86_64

Просмотреть файл

@ -79,7 +79,7 @@ static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write,
else
*gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex);
if (*gpa == UNMAPPED_GVA) {
if (*gpa == INVALID_GPA) {
kvm_inject_emulated_page_fault(vcpu, &ex);
return -EFAULT;
}

Просмотреть файл

@ -849,7 +849,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
*/
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn),
PFERR_USER_MASK | PFERR_WRITE_MASK, NULL);
if (real_gpa == UNMAPPED_GVA)
if (real_gpa == INVALID_GPA)
return 0;
/* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */
@ -7072,7 +7072,7 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
if (gpa == UNMAPPED_GVA)
if (gpa == INVALID_GPA)
return X86EMUL_PROPAGATE_FAULT;
ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
offset, toread);
@ -7103,7 +7103,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
/* Inline kvm_read_guest_virt_helper for speed. */
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK,
exception);
if (unlikely(gpa == UNMAPPED_GVA))
if (unlikely(gpa == INVALID_GPA))
return X86EMUL_PROPAGATE_FAULT;
offset = addr & (PAGE_SIZE-1);
@ -7173,7 +7173,7 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
if (gpa == UNMAPPED_GVA)
if (gpa == INVALID_GPA)
return X86EMUL_PROPAGATE_FAULT;
ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
if (ret < 0) {
@ -7284,7 +7284,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
*gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
if (*gpa == UNMAPPED_GVA)
if (*gpa == INVALID_GPA)
return -1;
return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
@ -7521,7 +7521,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
if (gpa == UNMAPPED_GVA ||
if (gpa == INVALID_GPA ||
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto emul_write;
@ -8338,7 +8338,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* If the mapping is invalid in guest, let cpu retry
* it to generate fault.
*/
if (gpa == UNMAPPED_GVA)
if (gpa == INVALID_GPA)
return true;
}
@ -11378,7 +11378,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->valid = gpa != INVALID_GPA;
tr->writeable = 1;
tr->usermode = 0;
@ -12983,7 +12983,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
if (!(error_code & PFERR_PRESENT_MASK) ||
mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != UNMAPPED_GVA) {
mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) {
/*
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
* tables probably do not match the TLB. Just proceed