powerpc fixes for 5.3 #2
An assortment of non-regression fixes that have accumulated since the start of the merge window. A fix for a user triggerable oops on machines where transactional memory is disabled, eg. Power9 bare metal, Power8 with TM disabled on the command line, or all Power7 or earlier machines. Three fixes for handling of PMU and power saving registers when running nested KVM on Power9. Two fixes for bugs found while stress testing the XIVE interrupt controller code, also on Power9. A fix to allow guests to boot under Qemu/KVM on Power9 using the the Hash MMU with >= 1TB of memory. Two fixes for bugs in the recent DMA cleanup, one of which could lead to checkstops. And finally three fixes for the PAPR SCM nvdimm driver. Thanks to: Alexey Kardashevskiy, Andrea Arcangeli, Cédric Le Goater, Christoph Hellwig, David Gibson, Gautham R. Shenoy, Michael Neuling, Oliver O'Halloran,, Satheesh Rajendran, Shawn Anastasio, Suraj Jitindar Singh, Vaibhav Jain. -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJdOF2iAAoJEFHr6jzI4aWAVmsQAJ//UY1a+lz39y/5jmkybJbH HVnja6ZhsKd3+ZAnljGmqr1zuwDmy8+X3pT+1832zBBm4Z1cNKj1c0wuK5fuhAfq o0XkO0N9GFcQu8HUPb5wSBOoyXwK0qUhExfCVobl7YsDAyAI2//nSQTwxNX3W4Hv P7hz48pBbiqRzQAOSHV8ZlcOBETbSVAXeNalSXrXqSJmXQbVWCQcd6vucMSwZ7S5 ZiiL/gCBoO0kd0ZQRsGXCbwcjcR4NlTDN0M40og8Y9KTDkId8HdmJyXW3tMcZo/g W3LeMR94bUh/KrK88lMBrRXKUlxL+loZKWZaeNlA5+ShCYk/ZafkKri/QUX/glOq ahm8uqokdZ5VS1tgSYoJIKdA5qMGvv8V+CpHRJnZqaEhUCduQa5XmWPnDnEKkDt0 94VBsk0D2vHYKyygv5JMgYHQVlU7XrQF8fw2pKShpqLMY7ZMpeDDmKN9AuzxhawF 9b7HigbwNt5LvNJ0xn097KW+svCK7i3ZgiQe83W36wjSl2ystgjJ3T7yrH6Q1rKH o4loEGA4gASTDjTmWQM20lHT1xQHY4fQBC/wi/67as3m0TDeGXYI0fOZC5qtEBFr Ln/0e78VMhut/RWicDlRveszef1MCi1warR9R4I/bQ8M6O1BzHYsQ9zr6H111uxL vQ92Yp8G2PoqN7wlFlSG =Yc9T -----END PGP SIGNATURE----- Merge tag 'powerpc-5.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fixes from Michael Ellerman: "An assortment of non-regression fixes that have accumulated since the start of the merge window. - A fix for a user triggerable oops on machines where transactional memory is disabled, eg. Power9 bare metal, Power8 with TM disabled on the command line, or all Power7 or earlier machines. - Three fixes for handling of PMU and power saving registers when running nested KVM on Power9. - Two fixes for bugs found while stress testing the XIVE interrupt controller code, also on Power9. - A fix to allow guests to boot under Qemu/KVM on Power9 using the the Hash MMU with >= 1TB of memory. - Two fixes for bugs in the recent DMA cleanup, one of which could lead to checkstops. - And finally three fixes for the PAPR SCM nvdimm driver. Thanks to: Alexey Kardashevskiy, Andrea Arcangeli, Cédric Le Goater, Christoph Hellwig, David Gibson, Gautham R. Shenoy, Michael Neuling, Oliver O'Halloran, Satheesh Rajendran, Shawn Anastasio, Suraj Jitindar Singh, Vaibhav Jain" * tag 'powerpc-5.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/papr_scm: Force a scm-unbind if initial scm-bind fails powerpc/papr_scm: Update drc_pmem_unbind() to use H_SCM_UNBIND_ALL powerpc/pseries: Update SCM hcall op-codes in hvcall.h powerpc/tm: Fix oops on sigreturn on systems without TM powerpc/dma: Fix invalid DMA mmap behavior KVM: PPC: Book3S HV: XIVE: fix rollback when kvmppc_xive_create fails powerpc/xive: Fix loop exit-condition in xive_find_target_in_mask() powerpc: fix off by one in max_zone_pfn initialization for ZONE_DMA KVM: PPC: Book3S HV: Save and restore guest visible PSSCR bits on pseries powerpc/pmu: Set pmcregs_in_use in paca when running as LPAR KVM: PPC: Book3S HV: Always save guest pmu for guest capable of nesting powerpc/mm: Limit rma_size to 1TB when running without HV mode
This commit is contained in:
Коммит
bed38c3e2d
|
@ -121,6 +121,7 @@ config PPC
|
|||
select ARCH_32BIT_OFF_T if PPC32
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_DMA_MMAP_PGPROT
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
|
|
@ -302,9 +302,14 @@
|
|||
#define H_SCM_UNBIND_MEM 0x3F0
|
||||
#define H_SCM_QUERY_BLOCK_MEM_BINDING 0x3F4
|
||||
#define H_SCM_QUERY_LOGICAL_MEM_BINDING 0x3F8
|
||||
#define H_SCM_MEM_QUERY 0x3FC
|
||||
#define H_SCM_BLOCK_CLEAR 0x400
|
||||
#define MAX_HCALL_OPCODE H_SCM_BLOCK_CLEAR
|
||||
#define H_SCM_UNBIND_ALL 0x3FC
|
||||
#define H_SCM_HEALTH 0x400
|
||||
#define H_SCM_PERFORMANCE_STATS 0x418
|
||||
#define MAX_HCALL_OPCODE H_SCM_PERFORMANCE_STATS
|
||||
|
||||
/* Scope args for H_SCM_UNBIND_ALL */
|
||||
#define H_UNBIND_SCOPE_ALL (0x1)
|
||||
#define H_UNBIND_SCOPE_DRC (0x2)
|
||||
|
||||
/* H_VIOCTL functions */
|
||||
#define H_GET_VIOA_DUMP_SIZE 0x01
|
||||
|
|
|
@ -26,12 +26,11 @@ static inline void ppc_set_pmu_inuse(int inuse)
|
|||
if (firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
get_lppaca()->pmcregs_in_use = inuse;
|
||||
#endif
|
||||
} else {
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
get_paca()->pmcregs_in_use = inuse;
|
||||
#endif
|
||||
}
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
get_paca()->pmcregs_in_use = inuse;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,8 @@ obj-y := cputable.o ptrace.o syscalls.o \
|
|||
signal.o sysfs.o cacheinfo.o time.o \
|
||||
prom.o traps.o setup-common.o \
|
||||
udbg.o misc.o io.o misc_$(BITS).o \
|
||||
of_platform.o prom_parse.o
|
||||
of_platform.o prom_parse.o \
|
||||
dma-common.o
|
||||
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
|
||||
signal_64.o ptrace32.o \
|
||||
paca.o nvram_64.o firmware.o
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Contains common dma routines for all powerpc platforms.
|
||||
*
|
||||
* Copyright (C) 2019 Shawn Anastasio.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
|
||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
return pgprot_noncached(prot);
|
||||
return prot;
|
||||
}
|
|
@ -1198,6 +1198,9 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|||
goto bad;
|
||||
|
||||
if (MSR_TM_ACTIVE(msr_hi<<32)) {
|
||||
/* Trying to start TM on non TM system */
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
goto bad;
|
||||
/* We only recheckpoint on return if we're
|
||||
* transaction.
|
||||
*/
|
||||
|
|
|
@ -771,6 +771,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|||
if (MSR_TM_ACTIVE(msr)) {
|
||||
/* We recheckpoint on return. */
|
||||
struct ucontext __user *uc_transact;
|
||||
|
||||
/* Trying to start TM on non TM system */
|
||||
if (!cpu_has_feature(CPU_FTR_TM))
|
||||
goto badframe;
|
||||
|
||||
if (__get_user(uc_transact, &uc->uc_link))
|
||||
goto badframe;
|
||||
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
|
||||
|
|
|
@ -3569,9 +3569,18 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
|
||||
|
||||
if (kvmhv_on_pseries()) {
|
||||
/*
|
||||
* We need to save and restore the guest visible part of the
|
||||
* psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
|
||||
* doesn't do this for us. Note only required if pseries since
|
||||
* this is done in kvmhv_load_hv_regs_and_go() below otherwise.
|
||||
*/
|
||||
unsigned long host_psscr;
|
||||
/* call our hypervisor to load up HV regs and go */
|
||||
struct hv_guest_state hvregs;
|
||||
|
||||
host_psscr = mfspr(SPRN_PSSCR_PR);
|
||||
mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
|
||||
kvmhv_save_hv_regs(vcpu, &hvregs);
|
||||
hvregs.lpcr = lpcr;
|
||||
vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
|
||||
|
@ -3590,6 +3599,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
|
||||
vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
|
||||
vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
|
||||
vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
|
||||
mtspr(SPRN_PSSCR_PR, host_psscr);
|
||||
|
||||
/* H_CEDE has to be handled now, not later */
|
||||
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
|
||||
|
@ -3654,6 +3665,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
vcpu->arch.vpa.dirty = 1;
|
||||
save_pmu = lp->pmcregs_in_use;
|
||||
}
|
||||
/* Must save pmu if this guest is capable of running nested guests */
|
||||
save_pmu |= nesting_enabled(vcpu->kvm);
|
||||
|
||||
kvmhv_save_guest_pmu(vcpu, save_pmu);
|
||||
|
||||
|
|
|
@ -1986,10 +1986,8 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
|
|||
|
||||
xive->single_escalation = xive_native_has_single_escalation();
|
||||
|
||||
if (ret) {
|
||||
kfree(xive);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1090,9 +1090,9 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
|
|||
xive->ops = &kvmppc_xive_native_ops;
|
||||
|
||||
if (ret)
|
||||
kfree(xive);
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1899,11 +1899,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
|||
*
|
||||
* For guests on platforms before POWER9, we clamp the it limit to 1G
|
||||
* to avoid some funky things such as RTAS bugs etc...
|
||||
*
|
||||
* On POWER9 we limit to 1TB in case the host erroneously told us that
|
||||
* the RMA was >1TB. Effective address bits 0:23 are treated as zero
|
||||
* (meaning the access is aliased to zero i.e. addr = addr % 1TB)
|
||||
* for virtual real mode addressing and so it doesn't make sense to
|
||||
* have an area larger than 1TB as it can't be addressed.
|
||||
*/
|
||||
if (!early_cpu_has_feature(CPU_FTR_HVMODE)) {
|
||||
ppc64_rma_size = first_memblock_size;
|
||||
if (!early_cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000);
|
||||
else
|
||||
ppc64_rma_size = min_t(u64, ppc64_rma_size,
|
||||
1UL << SID_SHIFT_1T);
|
||||
|
||||
/* Finally limit subsequent allocations */
|
||||
memblock_set_current_limit(ppc64_rma_size);
|
||||
|
|
|
@ -239,7 +239,7 @@ void __init paging_init(void)
|
|||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
|
||||
((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT);
|
||||
1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
|
||||
#endif
|
||||
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/libnvdimm.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/plpar_wrappers.h>
|
||||
|
||||
|
@ -43,8 +44,9 @@ struct papr_scm_priv {
|
|||
static int drc_pmem_bind(struct papr_scm_priv *p)
|
||||
{
|
||||
unsigned long ret[PLPAR_HCALL_BUFSIZE];
|
||||
uint64_t rc, token;
|
||||
uint64_t saved = 0;
|
||||
uint64_t token;
|
||||
int64_t rc;
|
||||
|
||||
/*
|
||||
* When the hypervisor cannot map all the requested memory in a single
|
||||
|
@ -64,6 +66,10 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
|
|||
} while (rc == H_BUSY);
|
||||
|
||||
if (rc) {
|
||||
/* H_OVERLAP needs a separate error path */
|
||||
if (rc == H_OVERLAP)
|
||||
return -EBUSY;
|
||||
|
||||
dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
@ -78,22 +84,36 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
|
|||
static int drc_pmem_unbind(struct papr_scm_priv *p)
|
||||
{
|
||||
unsigned long ret[PLPAR_HCALL_BUFSIZE];
|
||||
uint64_t rc, token;
|
||||
uint64_t token = 0;
|
||||
int64_t rc;
|
||||
|
||||
token = 0;
|
||||
dev_dbg(&p->pdev->dev, "unbind drc %x\n", p->drc_index);
|
||||
|
||||
/* NB: unbind has the same retry requirements mentioned above */
|
||||
/* NB: unbind has the same retry requirements as drc_pmem_bind() */
|
||||
do {
|
||||
rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
|
||||
p->bound_addr, p->blocks, token);
|
||||
|
||||
/* Unbind of all SCM resources associated with drcIndex */
|
||||
rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC,
|
||||
p->drc_index, token);
|
||||
token = ret[0];
|
||||
cond_resched();
|
||||
|
||||
/* Check if we are stalled for some time */
|
||||
if (H_IS_LONG_BUSY(rc)) {
|
||||
msleep(get_longbusy_msecs(rc));
|
||||
rc = H_BUSY;
|
||||
} else if (rc == H_BUSY) {
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
} while (rc == H_BUSY);
|
||||
|
||||
if (rc)
|
||||
dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
|
||||
else
|
||||
dev_dbg(&p->pdev->dev, "unbind drc %x complete\n",
|
||||
p->drc_index);
|
||||
|
||||
return !!rc;
|
||||
return rc == H_SUCCESS ? 0 : -ENXIO;
|
||||
}
|
||||
|
||||
static int papr_scm_meta_get(struct papr_scm_priv *p,
|
||||
|
@ -389,6 +409,14 @@ static int papr_scm_probe(struct platform_device *pdev)
|
|||
|
||||
/* request the hypervisor to bind this region to somewhere in memory */
|
||||
rc = drc_pmem_bind(p);
|
||||
|
||||
/* If phyp says drc memory still bound then force unbound and retry */
|
||||
if (rc == -EBUSY) {
|
||||
dev_warn(&pdev->dev, "Retrying bind after unbinding\n");
|
||||
drc_pmem_unbind(p);
|
||||
rc = drc_pmem_bind(p);
|
||||
}
|
||||
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -479,7 +479,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
|
|||
* Now go through the entire mask until we find a valid
|
||||
* target.
|
||||
*/
|
||||
for (;;) {
|
||||
do {
|
||||
/*
|
||||
* We re-check online as the fallback case passes us
|
||||
* an untested affinity mask
|
||||
|
@ -487,12 +487,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
|
|||
if (cpu_online(cpu) && xive_try_pick_target(cpu))
|
||||
return cpu;
|
||||
cpu = cpumask_next(cpu, mask);
|
||||
if (cpu == first)
|
||||
break;
|
||||
/* Wrap around */
|
||||
if (cpu >= nr_cpu_ids)
|
||||
cpu = cpumask_first(mask);
|
||||
}
|
||||
} while (cpu != first);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче