KVM: PPC: Book3S HV: Move kvmppc_svm_page_out up

kvmppc_svm_page_out() will need to be called by kvmppc_uvmem_drop_pages()
so move it up earlier in this file.

Furthermore it will be interesting to call this function when already
holding the kvm->arch.uvmem_lock, so prefix the original function with __
and remove the locking in it, and introduce a wrapper which call that
function with the lock held.

There is no functional change.

Reviewed-by: Bharata B Rao <bharata@linux.ibm.com>
Signed-off-by: Laurent Dufour <ldufour@linux.ibm.com>
Signed-off-by: Ram Pai <linuxram@us.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
Laurent Dufour 2020-07-27 12:24:28 -07:00 коммит произвёл Paul Mackerras
Родитель a2ce720038
Коммит f1b87ea878
1 изменённых файлов: 90 добавлений и 76 удалений

Просмотреть файл

@ -496,6 +496,96 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
return ret;
}
/*
* Provision a new page on HV side and copy over the contents
* from secure memory using UV_PAGE_OUT uvcall.
* Caller must held kvm->arch.uvmem_lock.
*/
static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
unsigned long start,
unsigned long end, unsigned long page_shift,
struct kvm *kvm, unsigned long gpa)
{
unsigned long src_pfn, dst_pfn = 0;
struct migrate_vma mig;
struct page *dpage, *spage;
struct kvmppc_uvmem_page_pvt *pvt;
unsigned long pfn;
int ret = U_SUCCESS;
memset(&mig, 0, sizeof(mig));
mig.vma = vma;
mig.start = start;
mig.end = end;
mig.src = &src_pfn;
mig.dst = &dst_pfn;
mig.src_owner = &kvmppc_uvmem_pgmap;
/* The requested page is already paged-out, nothing to do */
if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
return ret;
ret = migrate_vma_setup(&mig);
if (ret)
return -1;
spage = migrate_pfn_to_page(*mig.src);
if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
goto out_finalize;
if (!is_zone_device_page(spage))
goto out_finalize;
dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
if (!dpage) {
ret = -1;
goto out_finalize;
}
lock_page(dpage);
pvt = spage->zone_device_data;
pfn = page_to_pfn(dpage);
/*
* This function is used in two cases:
* - When HV touches a secure page, for which we do UV_PAGE_OUT
* - When a secure page is converted to shared page, we *get*
* the page to essentially unmap the device page. In this
* case we skip page-out.
*/
if (!pvt->skip_page_out)
ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
gpa, 0, page_shift);
if (ret == U_SUCCESS)
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
else {
unlock_page(dpage);
__free_page(dpage);
goto out_finalize;
}
migrate_vma_pages(&mig);
out_finalize:
migrate_vma_finalize(&mig);
return ret;
}
static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long page_shift,
struct kvm *kvm, unsigned long gpa)
{
int ret;
mutex_lock(&kvm->arch.uvmem_lock);
ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
mutex_unlock(&kvm->arch.uvmem_lock);
return ret;
}
/*
* Drop device pages that we maintain for the secure guest
*
@ -866,82 +956,6 @@ out:
return ret;
}
/*
* Provision a new page on HV side and copy over the contents
* from secure memory using UV_PAGE_OUT uvcall.
*/
static int kvmppc_svm_page_out(struct vm_area_struct *vma,
unsigned long start,
unsigned long end, unsigned long page_shift,
struct kvm *kvm, unsigned long gpa)
{
unsigned long src_pfn, dst_pfn = 0;
struct migrate_vma mig;
struct page *dpage, *spage;
struct kvmppc_uvmem_page_pvt *pvt;
unsigned long pfn;
int ret = U_SUCCESS;
memset(&mig, 0, sizeof(mig));
mig.vma = vma;
mig.start = start;
mig.end = end;
mig.src = &src_pfn;
mig.dst = &dst_pfn;
mig.src_owner = &kvmppc_uvmem_pgmap;
mutex_lock(&kvm->arch.uvmem_lock);
/* The requested page is already paged-out, nothing to do */
if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
goto out;
ret = migrate_vma_setup(&mig);
if (ret)
goto out;
spage = migrate_pfn_to_page(*mig.src);
if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
goto out_finalize;
if (!is_zone_device_page(spage))
goto out_finalize;
dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
if (!dpage) {
ret = -1;
goto out_finalize;
}
lock_page(dpage);
pvt = spage->zone_device_data;
pfn = page_to_pfn(dpage);
/*
* This function is used in two cases:
* - When HV touches a secure page, for which we do UV_PAGE_OUT
* - When a secure page is converted to shared page, we *get*
* the page to essentially unmap the device page. In this
* case we skip page-out.
*/
if (!pvt->skip_page_out)
ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
gpa, 0, page_shift);
if (ret == U_SUCCESS)
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
else {
unlock_page(dpage);
__free_page(dpage);
goto out_finalize;
}
migrate_vma_pages(&mig);
out_finalize:
migrate_vma_finalize(&mig);
out:
mutex_unlock(&kvm->arch.uvmem_lock);
return ret;
}
/*
* Fault handler callback that gets called when HV touches any page that