2010-04-16 02:11:33 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Alexander Graf <agraf@suse.de>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License, version 2, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
|
|
|
|
#include <asm/kvm_ppc.h>
|
|
|
|
#include <asm/kvm_book3s.h>
|
2016-03-01 10:29:20 +03:00
|
|
|
#include <asm/book3s/32/mmu-hash.h>
|
2010-04-16 02:11:33 +04:00
|
|
|
#include <asm/machdep.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/hw_irq.h>
|
2015-05-22 10:25:02 +03:00
|
|
|
#include "book3s.h"
|
2010-04-16 02:11:33 +04:00
|
|
|
|
|
|
|
/* #define DEBUG_MMU */
|
|
|
|
/* #define DEBUG_SR */
|
|
|
|
|
|
|
|
#ifdef DEBUG_MMU
|
|
|
|
#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
|
|
|
|
#else
|
|
|
|
#define dprintk_mmu(a, ...) do { } while(0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef DEBUG_SR
|
|
|
|
#define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
|
|
|
|
#else
|
|
|
|
#define dprintk_sr(a, ...) do { } while(0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if PAGE_SHIFT != 12
|
|
|
|
#error Unknown page size
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#error XXX need to grab mmu_hash_lock
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_PTE_64BIT
|
|
|
|
#error Only 32 bit pages are supported for now
|
|
|
|
#endif
|
|
|
|
|
2010-04-20 04:49:53 +04:00
|
|
|
static ulong htab;
|
|
|
|
static u32 htabmask;
|
|
|
|
|
2010-06-30 17:18:46 +04:00
|
|
|
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
|
2010-04-16 02:11:33 +04:00
|
|
|
{
|
|
|
|
volatile u32 *pteg;
|
|
|
|
|
2010-06-30 17:18:46 +04:00
|
|
|
/* Remove from host HTAB */
|
2010-04-16 02:11:33 +04:00
|
|
|
pteg = (u32*)pte->slot;
|
|
|
|
pteg[0] = 0;
|
2010-06-30 17:18:46 +04:00
|
|
|
|
|
|
|
/* And make sure it's gone from the TLB too */
|
2010-04-16 02:11:33 +04:00
|
|
|
asm volatile ("sync");
|
|
|
|
asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
|
|
|
|
asm volatile ("sync");
|
|
|
|
asm volatile ("tlbsync");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
|
|
|
|
* a hash, so we don't waste cycles on looping */
|
|
|
|
static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
|
|
|
|
{
|
2010-08-02 23:48:53 +04:00
|
|
|
return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
|
2010-04-16 02:11:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
|
|
|
|
{
|
|
|
|
struct kvmppc_sid_map *map;
|
|
|
|
u16 sid_map_mask;
|
|
|
|
|
2014-04-24 15:46:24 +04:00
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
2010-04-16 02:11:33 +04:00
|
|
|
gvsid |= VSID_PR;
|
|
|
|
|
|
|
|
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
|
|
|
|
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
|
|
|
|
if (map->guest_vsid == gvsid) {
|
|
|
|
dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
|
|
|
|
gvsid, map->host_vsid);
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
|
|
|
|
if (map->guest_vsid == gvsid) {
|
|
|
|
dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
|
|
|
|
gvsid, map->host_vsid);
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
|
|
|
|
bool primary)
|
|
|
|
{
|
2010-04-20 04:49:53 +04:00
|
|
|
u32 page, hash;
|
|
|
|
ulong pteg = htab;
|
2010-04-16 02:11:33 +04:00
|
|
|
|
|
|
|
page = (eaddr & ~ESID_MASK) >> 12;
|
|
|
|
|
|
|
|
hash = ((vsid ^ page) << 6);
|
|
|
|
if (!primary)
|
|
|
|
hash = ~hash;
|
|
|
|
|
|
|
|
hash &= htabmask;
|
|
|
|
|
|
|
|
pteg |= hash;
|
|
|
|
|
2010-04-20 04:49:53 +04:00
|
|
|
dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
|
|
|
|
htab, hash, htabmask, pteg);
|
2010-04-16 02:11:33 +04:00
|
|
|
|
|
|
|
return (u32*)pteg;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern char etext[];
|
|
|
|
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 08:52:51 +04:00
|
|
|
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
|
|
|
bool iswrite)
|
2010-04-16 02:11:33 +04:00
|
|
|
{
|
kvm: rename pfn_t to kvm_pfn_t
To date, we have implemented two I/O usage models for persistent memory,
PMEM (a persistent "ram disk") and DAX (mmap persistent memory into
userspace). This series adds a third, DAX-GUP, that allows DAX mappings
to be the target of direct-i/o. It allows userspace to coordinate
DMA/RDMA from/to persistent memory.
The implementation leverages the ZONE_DEVICE mm-zone that went into
4.3-rc1 (also discussed at kernel summit) to flag pages that are owned
and dynamically mapped by a device driver. The pmem driver, after
mapping a persistent memory range into the system memmap via
devm_memremap_pages(), arranges for DAX to distinguish pfn-only versus
page-backed pmem-pfns via flags in the new pfn_t type.
The DAX code, upon seeing a PFN_DEV+PFN_MAP flagged pfn, flags the
resulting pte(s) inserted into the process page tables with a new
_PAGE_DEVMAP flag. Later, when get_user_pages() is walking ptes it keys
off _PAGE_DEVMAP to pin the device hosting the page range active.
Finally, get_page() and put_page() are modified to take references
against the device driver established page mapping.
Finally, this need for "struct page" for persistent memory requires
memory capacity to store the memmap array. Given the memmap array for a
large pool of persistent may exhaust available DRAM introduce a
mechanism to allocate the memmap from persistent memory. The new
"struct vmem_altmap *" parameter to devm_memremap_pages() enables
arch_add_memory() to use reserved pmem capacity rather than the page
allocator.
This patch (of 18):
The core has developed a need for a "pfn_t" type [1]. Move the existing
pfn_t in KVM to kvm_pfn_t [2].
[1]: https://lists.01.org/pipermail/linux-nvdimm/2015-September/002199.html
[2]: https://lists.01.org/pipermail/linux-nvdimm/2015-September/002218.html
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:56:11 +03:00
|
|
|
kvm_pfn_t hpaddr;
|
2012-09-10 06:52:50 +04:00
|
|
|
u64 vpn;
|
2010-04-16 02:11:33 +04:00
|
|
|
u64 vsid;
|
|
|
|
struct kvmppc_sid_map *map;
|
|
|
|
volatile u32 *pteg;
|
|
|
|
u32 eaddr = orig_pte->eaddr;
|
|
|
|
u32 pteg0, pteg1;
|
|
|
|
register int rr = 0;
|
|
|
|
bool primary = false;
|
|
|
|
bool evict = false;
|
|
|
|
struct hpte_cache *pte;
|
2011-12-09 17:44:13 +04:00
|
|
|
int r = 0;
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 08:52:51 +04:00
|
|
|
bool writable;
|
2010-04-16 02:11:33 +04:00
|
|
|
|
|
|
|
/* Get host physical address for gpa */
|
2014-07-13 18:37:12 +04:00
|
|
|
hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
|
2012-10-16 16:10:59 +04:00
|
|
|
if (is_error_noslot_pfn(hpaddr)) {
|
2014-07-13 18:37:12 +04:00
|
|
|
printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
|
|
|
|
orig_pte->raddr);
|
2011-12-09 17:44:13 +04:00
|
|
|
r = -EINVAL;
|
|
|
|
goto out;
|
2010-04-16 02:11:33 +04:00
|
|
|
}
|
|
|
|
hpaddr <<= PAGE_SHIFT;
|
|
|
|
|
|
|
|
/* and write the mapping ea -> hpa into the pt */
|
|
|
|
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
|
|
|
|
map = find_sid_vsid(vcpu, vsid);
|
|
|
|
if (!map) {
|
|
|
|
kvmppc_mmu_map_segment(vcpu, eaddr);
|
|
|
|
map = find_sid_vsid(vcpu, vsid);
|
|
|
|
}
|
|
|
|
BUG_ON(!map);
|
|
|
|
|
|
|
|
vsid = map->host_vsid;
|
2012-10-17 02:25:45 +04:00
|
|
|
vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
|
|
|
|
((eaddr & ~ESID_MASK) >> VPN_SHIFT);
|
2010-04-16 02:11:33 +04:00
|
|
|
next_pteg:
|
|
|
|
if (rr == 16) {
|
|
|
|
primary = !primary;
|
|
|
|
evict = true;
|
|
|
|
rr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
|
|
|
|
|
|
|
|
/* not evicting yet */
|
|
|
|
if (!evict && (pteg[rr] & PTE_V)) {
|
|
|
|
rr += 2;
|
|
|
|
goto next_pteg;
|
|
|
|
}
|
|
|
|
|
|
|
|
dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
|
|
|
|
|
|
|
|
pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
|
|
|
|
(primary ? 0 : PTE_SEC);
|
|
|
|
pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
|
|
|
|
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 08:52:51 +04:00
|
|
|
if (orig_pte->may_write && writable) {
|
2010-04-16 02:11:33 +04:00
|
|
|
pteg1 |= PP_RWRW;
|
|
|
|
mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
|
|
|
|
} else {
|
|
|
|
pteg1 |= PP_RWRX;
|
|
|
|
}
|
|
|
|
|
2012-08-03 15:56:33 +04:00
|
|
|
if (orig_pte->may_execute)
|
|
|
|
kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
|
|
|
|
|
2010-04-16 02:11:33 +04:00
|
|
|
local_irq_disable();
|
|
|
|
|
|
|
|
if (pteg[rr]) {
|
|
|
|
pteg[rr] = 0;
|
|
|
|
asm volatile ("sync");
|
|
|
|
}
|
|
|
|
pteg[rr + 1] = pteg1;
|
|
|
|
pteg[rr] = pteg0;
|
|
|
|
asm volatile ("sync");
|
|
|
|
|
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
dprintk_mmu("KVM: new PTEG: %p\n", pteg);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
|
|
|
|
dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
|
|
|
|
|
|
|
|
|
|
|
|
/* Now tell our Shadow PTE code about the new page */
|
|
|
|
|
2010-06-30 17:18:46 +04:00
|
|
|
pte = kvmppc_mmu_hpte_cache_next(vcpu);
|
2013-12-02 14:21:58 +04:00
|
|
|
if (!pte) {
|
|
|
|
kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
|
|
|
|
r = -EAGAIN;
|
|
|
|
goto out;
|
|
|
|
}
|
2010-04-16 02:11:33 +04:00
|
|
|
|
|
|
|
dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
|
|
|
|
orig_pte->may_write ? 'w' : '-',
|
|
|
|
orig_pte->may_execute ? 'x' : '-',
|
2012-09-10 06:52:50 +04:00
|
|
|
orig_pte->eaddr, (ulong)pteg, vpn,
|
2010-04-16 02:11:33 +04:00
|
|
|
orig_pte->vpage, hpaddr);
|
|
|
|
|
|
|
|
pte->slot = (ulong)&pteg[rr];
|
2012-09-10 06:52:50 +04:00
|
|
|
pte->host_vpn = vpn;
|
2010-04-16 02:11:33 +04:00
|
|
|
pte->pte = *orig_pte;
|
|
|
|
pte->pfn = hpaddr >> PAGE_SHIFT;
|
|
|
|
|
2010-06-30 17:18:46 +04:00
|
|
|
kvmppc_mmu_hpte_cache_map(vcpu, pte);
|
|
|
|
|
2012-08-10 15:23:55 +04:00
|
|
|
kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
|
2011-12-09 17:44:13 +04:00
|
|
|
out:
|
|
|
|
return r;
|
2010-04-16 02:11:33 +04:00
|
|
|
}
|
|
|
|
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 08:52:51 +04:00
|
|
|
void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
|
|
|
|
{
|
|
|
|
kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
|
|
|
|
}
|
|
|
|
|
2010-04-16 02:11:33 +04:00
|
|
|
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
|
|
|
|
{
|
|
|
|
struct kvmppc_sid_map *map;
|
|
|
|
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
|
|
|
u16 sid_map_mask;
|
|
|
|
static int backwards_map = 0;
|
|
|
|
|
2014-04-24 15:46:24 +04:00
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
2010-04-16 02:11:33 +04:00
|
|
|
gvsid |= VSID_PR;
|
|
|
|
|
|
|
|
/* We might get collisions that trap in preceding order, so let's
|
|
|
|
map them differently */
|
|
|
|
|
|
|
|
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
|
|
|
|
if (backwards_map)
|
|
|
|
sid_map_mask = SID_MAP_MASK - sid_map_mask;
|
|
|
|
|
|
|
|
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
|
|
|
|
|
|
|
|
/* Make sure we're taking the other map next time */
|
|
|
|
backwards_map = !backwards_map;
|
|
|
|
|
|
|
|
/* Uh-oh ... out of mappings. Let's flush! */
|
2010-08-15 10:04:24 +04:00
|
|
|
if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
|
|
|
|
vcpu_book3s->vsid_next = 0;
|
2010-04-16 02:11:33 +04:00
|
|
|
memset(vcpu_book3s->sid_map, 0,
|
|
|
|
sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
|
|
|
|
kvmppc_mmu_pte_flush(vcpu, 0, 0);
|
|
|
|
kvmppc_mmu_flush_segments(vcpu);
|
|
|
|
}
|
2010-08-15 10:04:24 +04:00
|
|
|
map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
|
|
|
|
vcpu_book3s->vsid_next++;
|
2010-04-16 02:11:33 +04:00
|
|
|
|
|
|
|
map->guest_vsid = gvsid;
|
|
|
|
map->valid = true;
|
|
|
|
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
|
|
|
|
{
|
|
|
|
u32 esid = eaddr >> SID_SHIFT;
|
|
|
|
u64 gvsid;
|
|
|
|
u32 sr;
|
|
|
|
struct kvmppc_sid_map *map;
|
2011-12-09 17:44:13 +04:00
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
|
|
|
int r = 0;
|
2010-04-16 02:11:33 +04:00
|
|
|
|
|
|
|
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
|
|
|
|
/* Invalidate an entry */
|
|
|
|
svcpu->sr[esid] = SR_INVALID;
|
2011-12-09 17:44:13 +04:00
|
|
|
r = -ENOENT;
|
|
|
|
goto out;
|
2010-04-16 02:11:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
map = find_sid_vsid(vcpu, gvsid);
|
|
|
|
if (!map)
|
|
|
|
map = create_sid_map(vcpu, gvsid);
|
|
|
|
|
|
|
|
map->guest_esid = esid;
|
|
|
|
sr = map->host_vsid | SR_KP;
|
|
|
|
svcpu->sr[esid] = sr;
|
|
|
|
|
|
|
|
dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
|
|
|
|
|
2011-12-09 17:44:13 +04:00
|
|
|
out:
|
|
|
|
svcpu_put(svcpu);
|
|
|
|
return r;
|
2010-04-16 02:11:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int i;
|
2011-12-09 17:44:13 +04:00
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
2010-04-16 02:11:33 +04:00
|
|
|
|
|
|
|
dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
|
|
|
|
for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
|
|
|
|
svcpu->sr[i] = SR_INVALID;
|
2011-12-09 17:44:13 +04:00
|
|
|
|
|
|
|
svcpu_put(svcpu);
|
2010-04-16 02:11:33 +04:00
|
|
|
}
|
|
|
|
|
2013-10-07 20:47:53 +04:00
|
|
|
void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
|
2010-04-16 02:11:33 +04:00
|
|
|
{
|
2010-08-15 10:04:24 +04:00
|
|
|
int i;
|
|
|
|
|
2010-06-30 17:18:46 +04:00
|
|
|
kvmppc_mmu_hpte_destroy(vcpu);
|
2010-04-16 02:11:33 +04:00
|
|
|
preempt_disable();
|
2010-08-15 10:04:24 +04:00
|
|
|
for (i = 0; i < SID_CONTEXTS; i++)
|
|
|
|
__destroy_context(to_book3s(vcpu)->context_id[i]);
|
2010-04-16 02:11:33 +04:00
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* From mm/mmu_context_hash32.c */
|
2010-08-15 10:04:24 +04:00
|
|
|
#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
|
2010-04-16 02:11:33 +04:00
|
|
|
|
|
|
|
int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
|
|
|
int err;
|
2010-04-20 04:49:53 +04:00
|
|
|
ulong sdr1;
|
2010-08-15 10:04:24 +04:00
|
|
|
int i;
|
|
|
|
int j;
|
2010-04-16 02:11:33 +04:00
|
|
|
|
2010-08-15 10:04:24 +04:00
|
|
|
for (i = 0; i < SID_CONTEXTS; i++) {
|
|
|
|
err = __init_new_context();
|
|
|
|
if (err < 0)
|
|
|
|
goto init_fail;
|
|
|
|
vcpu3s->context_id[i] = err;
|
2010-04-16 02:11:33 +04:00
|
|
|
|
2010-08-15 10:04:24 +04:00
|
|
|
/* Remember context id for this combination */
|
|
|
|
for (j = 0; j < 16; j++)
|
|
|
|
vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
|
|
|
|
}
|
2010-04-16 02:11:33 +04:00
|
|
|
|
2010-08-15 10:04:24 +04:00
|
|
|
vcpu3s->vsid_next = 0;
|
2010-04-16 02:11:33 +04:00
|
|
|
|
2010-04-20 04:49:53 +04:00
|
|
|
/* Remember where the HTAB is */
|
|
|
|
asm ( "mfsdr1 %0" : "=r"(sdr1) );
|
|
|
|
htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
|
|
|
|
htab = (ulong)__va(sdr1 & 0xffff0000);
|
|
|
|
|
2010-06-30 17:18:46 +04:00
|
|
|
kvmppc_mmu_hpte_init(vcpu);
|
|
|
|
|
2010-04-16 02:11:33 +04:00
|
|
|
return 0;
|
2010-08-15 10:04:24 +04:00
|
|
|
|
|
|
|
init_fail:
|
|
|
|
for (j = 0; j < i; j++) {
|
|
|
|
if (!vcpu3s->context_id[j])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
__destroy_context(to_book3s(vcpu)->context_id[j]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
2010-04-16 02:11:33 +04:00
|
|
|
}
|