2020-10-01 04:22:22 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
/*
|
|
|
|
* KVM dirty ring implementation
|
|
|
|
*
|
|
|
|
* Copyright 2019 Red Hat, Inc.
|
|
|
|
*/
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/kvm.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/kvm_dirty_ring.h>
|
|
|
|
#include <trace/events/kvm.h>
|
KVM: Reinstate gfn_to_pfn_cache with invalidation support
This can be used in two modes. There is an atomic mode where the cached
mapping is accessed while holding the rwlock, and a mode where the
physical address is used by a vCPU in guest mode.
For the latter case, an invalidation will wake the vCPU with the new
KVM_REQ_GPC_INVALIDATE, and the architecture will need to refresh any
caches it still needs to access before entering guest mode again.
Only one vCPU can be targeted by the wake requests; it's simple enough
to make it wake all vCPUs or even a mask but I don't see a use case for
that additional complexity right now.
Invalidation happens from the invalidate_range_start MMU notifier, which
needs to be able to sleep in order to wake the vCPU and wait for it.
This means that revalidation potentially needs to "wait" for the MMU
operation to complete and the invalidate_range_end notifier to be
invoked. Like the vCPU when it takes a page fault in that period, we
just spin — fixing that in a future patch by implementing an actual
*wait* may be another part of shaving this particularly hirsute yak.
As noted in the comments in the function itself, the only case where
the invalidate_range_start notifier is expected to be called *without*
being able to sleep is when the OOM reaper is killing the process. In
that case, we expect the vCPU threads already to have exited, and thus
there will be nothing to wake, and no reason to wait. So we clear the
KVM_REQUEST_WAIT bit and send the request anyway, then complain loudly
if there actually *was* anything to wake up.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Message-Id: <20211210163625.2886-3-dwmw2@infradead.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-12-10 19:36:21 +03:00
|
|
|
#include "kvm_mm.h"
|
2020-10-01 04:22:22 +03:00
|
|
|
|
|
|
|
int __weak kvm_cpu_dirty_log_size(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 kvm_dirty_ring_get_rsvd_entries(void)
|
|
|
|
{
|
|
|
|
return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
|
|
|
|
{
|
|
|
|
return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
|
|
|
|
{
|
|
|
|
return kvm_dirty_ring_used(ring) >= ring->soft_limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
|
|
|
|
{
|
|
|
|
return kvm_dirty_ring_used(ring) >= ring->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
|
|
|
|
{
|
|
|
|
struct kvm_memory_slot *memslot;
|
|
|
|
int as_id, id;
|
|
|
|
|
|
|
|
as_id = slot >> 16;
|
|
|
|
id = (u16)slot;
|
|
|
|
|
|
|
|
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
|
|
|
|
return;
|
|
|
|
|
|
|
|
memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
|
|
|
|
|
|
|
|
if (!memslot || (offset + __fls(mask)) >= memslot->npages)
|
|
|
|
return;
|
|
|
|
|
2021-02-02 21:57:24 +03:00
|
|
|
KVM_MMU_LOCK(kvm);
|
2020-10-01 04:22:22 +03:00
|
|
|
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
|
2021-02-02 21:57:24 +03:00
|
|
|
KVM_MMU_UNLOCK(kvm);
|
2020-10-01 04:22:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
|
|
|
|
{
|
2021-01-25 06:57:25 +03:00
|
|
|
ring->dirty_gfns = vzalloc(size);
|
2020-10-01 04:22:22 +03:00
|
|
|
if (!ring->dirty_gfns)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ring->size = size / sizeof(struct kvm_dirty_gfn);
|
|
|
|
ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
|
|
|
|
ring->dirty_index = 0;
|
|
|
|
ring->reset_index = 0;
|
|
|
|
ring->index = index;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
|
|
|
|
{
|
|
|
|
gfn->flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
|
|
|
|
{
|
|
|
|
gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
|
|
|
|
{
|
|
|
|
return gfn->flags & KVM_DIRTY_GFN_F_RESET;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
|
|
|
|
{
|
|
|
|
u32 cur_slot, next_slot;
|
|
|
|
u64 cur_offset, next_offset;
|
|
|
|
unsigned long mask;
|
|
|
|
int count = 0;
|
|
|
|
struct kvm_dirty_gfn *entry;
|
|
|
|
bool first_round = true;
|
|
|
|
|
|
|
|
/* This is only needed to make compilers happy */
|
|
|
|
cur_slot = cur_offset = mask = 0;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
|
|
|
|
|
|
|
|
if (!kvm_dirty_gfn_harvested(entry))
|
|
|
|
break;
|
|
|
|
|
|
|
|
next_slot = READ_ONCE(entry->slot);
|
|
|
|
next_offset = READ_ONCE(entry->offset);
|
|
|
|
|
|
|
|
/* Update the flags to reflect that this GFN is reset */
|
|
|
|
kvm_dirty_gfn_set_invalid(entry);
|
|
|
|
|
|
|
|
ring->reset_index++;
|
|
|
|
count++;
|
|
|
|
/*
|
|
|
|
* Try to coalesce the reset operations when the guest is
|
|
|
|
* scanning pages in the same slot.
|
|
|
|
*/
|
|
|
|
if (!first_round && next_slot == cur_slot) {
|
|
|
|
s64 delta = next_offset - cur_offset;
|
|
|
|
|
|
|
|
if (delta >= 0 && delta < BITS_PER_LONG) {
|
|
|
|
mask |= 1ull << delta;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Backwards visit, careful about overflows! */
|
|
|
|
if (delta > -BITS_PER_LONG && delta < 0 &&
|
|
|
|
(mask << -delta >> -delta) == mask) {
|
|
|
|
cur_offset = next_offset;
|
|
|
|
mask = (mask << -delta) | 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
|
|
|
|
cur_slot = next_slot;
|
|
|
|
cur_offset = next_offset;
|
|
|
|
mask = 1;
|
|
|
|
first_round = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
|
|
|
|
|
|
|
|
trace_kvm_dirty_ring_reset(ring);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
|
|
|
|
{
|
|
|
|
struct kvm_dirty_gfn *entry;
|
|
|
|
|
|
|
|
/* It should never get full */
|
|
|
|
WARN_ON_ONCE(kvm_dirty_ring_full(ring));
|
|
|
|
|
|
|
|
entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
|
|
|
|
|
|
|
|
entry->slot = slot;
|
|
|
|
entry->offset = offset;
|
|
|
|
/*
|
|
|
|
* Make sure the data is filled in before we publish this to
|
|
|
|
* the userspace program. There's no paired kernel-side reader.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
kvm_dirty_gfn_set_dirtied(entry);
|
|
|
|
ring->dirty_index++;
|
|
|
|
trace_kvm_dirty_ring_push(ring, slot, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
|
|
|
|
{
|
|
|
|
return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
|
|
|
|
{
|
|
|
|
vfree(ring->dirty_gfns);
|
|
|
|
ring->dirty_gfns = NULL;
|
|
|
|
}
|