KVM: introduce kvm_for_each_memslot macro
Introduce kvm_for_each_memslot to walk all valid memslot Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Родитель
be593d6286
Коммит
be6ba0f096
|
@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm)
|
|||
{
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
int i, j;
|
||||
int j;
|
||||
unsigned long base_gfn;
|
||||
|
||||
slots = kvm_memslots(kvm);
|
||||
for (i = 0; i < slots->nmemslots; i++) {
|
||||
memslot = &slots->memslots[i];
|
||||
kvm_for_each_memslot(memslot, slots) {
|
||||
base_gfn = memslot->base_gfn;
|
||||
|
||||
for (j = 0; j < memslot->npages; j++) {
|
||||
if (memslot->rmap[j])
|
||||
put_page((struct page *)memslot->rmap[j]);
|
||||
|
|
|
@ -1128,15 +1128,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
|||
int (*handler)(struct kvm *kvm, unsigned long *rmapp,
|
||||
unsigned long data))
|
||||
{
|
||||
int i, j;
|
||||
int j;
|
||||
int ret;
|
||||
int retval = 0;
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
slots = kvm_memslots(kvm);
|
||||
|
||||
for (i = 0; i < slots->nmemslots; i++) {
|
||||
struct kvm_memory_slot *memslot = &slots->memslots[i];
|
||||
kvm_for_each_memslot(memslot, slots) {
|
||||
unsigned long start = memslot->userspace_addr;
|
||||
unsigned long end;
|
||||
|
||||
|
@ -3985,15 +3985,15 @@ nomem:
|
|||
*/
|
||||
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
|
||||
{
|
||||
int i;
|
||||
unsigned int nr_mmu_pages;
|
||||
unsigned int nr_pages = 0;
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
slots = kvm_memslots(kvm);
|
||||
|
||||
for (i = 0; i < slots->nmemslots; i++)
|
||||
nr_pages += slots->memslots[i].npages;
|
||||
kvm_for_each_memslot(memslot, slots)
|
||||
nr_pages += memslot->npages;
|
||||
|
||||
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
|
||||
nr_mmu_pages = max(nr_mmu_pages,
|
||||
|
|
|
@ -308,6 +308,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
|
|||
(vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
|
||||
idx++)
|
||||
|
||||
#define kvm_for_each_memslot(memslot, slots) \
|
||||
for (memslot = &slots->memslots[0]; \
|
||||
memslot < slots->memslots + (slots)->nmemslots; memslot++)
|
||||
|
||||
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
|
||||
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -134,14 +134,15 @@ unmap_pages:
|
|||
|
||||
static int kvm_iommu_map_memslots(struct kvm *kvm)
|
||||
{
|
||||
int i, idx, r = 0;
|
||||
int idx, r = 0;
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
slots = kvm_memslots(kvm);
|
||||
|
||||
for (i = 0; i < slots->nmemslots; i++) {
|
||||
r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
|
||||
kvm_for_each_memslot(memslot, slots) {
|
||||
r = kvm_iommu_map_pages(kvm, memslot);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
|
@ -311,16 +312,16 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
|
|||
|
||||
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
|
||||
{
|
||||
int i, idx;
|
||||
int idx;
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
slots = kvm_memslots(kvm);
|
||||
|
||||
for (i = 0; i < slots->nmemslots; i++) {
|
||||
kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
|
||||
slots->memslots[i].npages);
|
||||
}
|
||||
kvm_for_each_memslot(memslot, slots)
|
||||
kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -547,11 +547,11 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
|
|||
|
||||
void kvm_free_physmem(struct kvm *kvm)
|
||||
{
|
||||
int i;
|
||||
struct kvm_memslots *slots = kvm->memslots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
for (i = 0; i < slots->nmemslots; ++i)
|
||||
kvm_free_physmem_slot(&slots->memslots[i], NULL);
|
||||
kvm_for_each_memslot(memslot, slots)
|
||||
kvm_free_physmem_slot(memslot, NULL);
|
||||
|
||||
kfree(kvm->memslots);
|
||||
}
|
||||
|
@ -975,15 +975,13 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
|
|||
static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
|
||||
gfn_t gfn)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < slots->nmemslots; ++i) {
|
||||
struct kvm_memory_slot *memslot = &slots->memslots[i];
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
kvm_for_each_memslot(memslot, slots)
|
||||
if (gfn >= memslot->base_gfn
|
||||
&& gfn < memslot->base_gfn + memslot->npages)
|
||||
return memslot;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче