- Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8" from Paul
  - Handle irq_happened flag correctly in off-line loop from Paul
  - Validate rtas.entry before calling enter_rtas() from Vasant
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJWKakXAAoJEFHr6jzI4aWAqwsP/22a8aq7SPuPaCK/c7u84aHg
 UkzYNdrC+osDvnrydqyJsmIojVLX+AsJ7TCbBZFaJ6oOuew9bVzmZ5mfNvOVn86H
 dCH2GMr4NbWbkO3SaFi6ZTUVGl7JyjEhf3uCtKGssa2+Do8FubK6Y88L1rhzFFdz
 l1Dx3Jp8CpGKcByQfwYyaNZhC/GEZ06pY36d362mLnyctxcQRYr5l+8boDH81nyE
 f89RE7baNPYOL0YOhZAh3ZilBrZ8DIAaesMXU8LUKFbLTBgWfVPkDy3l5a2m47oP
 V/Yi+oEQBkL/3Itth57iGWpl8vVkzF2MTu8Aep3BzHJXqXCliTzVVdXETW6NCdut
 Nss0xtnNdM18+0mhG3LzzdoZGi/Zb0SYz8j+nY5vE2nf7FDVFkAZzKHeW822zNaV
 A1PLJa+ei4jVhKtTp4wETjpUi+kw+ikM+rR1L1/+IKHbriLsRrj7Zw3xo6Em1KVq
 cI2g7DZLSzptIprxbEv9rNhb1VlBot4jc4mmGhmyMlwKDkpCxRkYVv+Ilfi6jCSc
 6llKTZfKqEV+0sXO6QISv8wfiye84jVTKOlkpQLvpugz9rBTpq8apmInVh4AHF2b
 wDRgs/iyOSZuz+UiPEHHXbW7ZfF2F7lqxxtQgJefiWLDsvBbsfnVTyDJsKibvWzb
 lxorlKx/tH/q4pNBjmoB
 =K7eA
 -----END PGP SIGNATURE-----

Merge tag 'powerpc-4.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

 - Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on
   POWER8" from Paul
 - Handle irq_happened flag correctly in off-line loop from Paul
 - Validate rtas.entry before calling enter_rtas() from Vasant

* tag 'powerpc-4.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/rtas: Validate rtas.entry before calling enter_rtas()
  powerpc/powernv: Handle irq_happened flag correctly in off-line loop
  powerpc: Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8"
This commit is contained in:
Linus Torvalds 2015-10-23 18:49:51 +09:00
Родитель d0ddf980d6 8832317f66
Коммит a2c01ed5d4
7 изменённых файлов: 28 добавлений и 86 удалений

Просмотреть файл

@ -3,7 +3,6 @@
#ifdef __KERNEL__
#include <asm/reg.h>
/* bytes per L1 cache line */
#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@ -40,12 +39,6 @@ struct ppc64_caches {
};
extern struct ppc64_caches ppc64_caches;
static inline void logmpp(u64 x)
{
asm volatile(PPC_LOGMPP(R1) : : "r" (x));
}
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
#if defined(__ASSEMBLY__)

Просмотреть файл

@ -297,8 +297,6 @@ struct kvmppc_vcore {
u32 arch_compat;
ulong pcr;
ulong dpdes; /* doorbell state (POWER8) */
void *mpp_buffer; /* Micro Partition Prefetch buffer */
bool mpp_buffer_is_valid;
ulong conferring_threads;
};

Просмотреть файл

@ -141,7 +141,6 @@
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
#define PPC_INST_LOGMPP 0x7c0007e4
#define PPC_INST_LSWI 0x7c0004aa
#define PPC_INST_LSWX 0x7c00042a
#define PPC_INST_LWARX 0x7c000028
@ -285,20 +284,6 @@
#define __PPC_EH(eh) 0
#endif
/* POWER8 Micro Partition Prefetch (MPP) parameters */
/* Address mask is common for LOGMPP instruction and MPPR SPR */
#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL
/* Bits 60 and 61 of MPP SPR should be set to one of the following */
/* Aborting the fetch is indeed setting 00 in the table size bits */
#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
__PPC_RA(a) | __PPC_RB(b))
@ -307,8 +292,6 @@
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))
#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
__PPC_RB(b))
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))

Просмотреть файл

@ -226,7 +226,6 @@
#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
#define SPRN_DAWR 0xB4
#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
#define SPRN_RPR 0xBA /* Relative Priority Register */
#define SPRN_CIABR 0xBB
#define CIABR_PRIV 0x3

Просмотреть файл

@ -1043,6 +1043,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!rtas.entry)
return -EINVAL;
if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
return -EFAULT;

Просмотреть файл

@ -36,7 +36,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@ -75,12 +74,6 @@
static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#if defined(CONFIG_PPC_64K_PAGES)
#define MPP_BUFFER_ORDER 0
#elif defined(CONFIG_PPC_4K_PAGES)
#define MPP_BUFFER_ORDER 3
#endif
static int dynamic_mt_modes = 6;
module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list);
vcore->mpp_buffer_is_valid = false;
if (cpu_has_feature(CPU_FTR_ARCH_207S))
vcore->mpp_buffer = (void *)__get_free_pages(
GFP_KERNEL|__GFP_ZERO,
MPP_BUFFER_ORDER);
return vcore;
}
@ -1894,33 +1880,6 @@ static int on_primary_thread(void)
return 1;
}
static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
{
phys_addr_t phy_addr, mpp_addr;
phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
vc->mpp_buffer_is_valid = true;
}
static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
{
phys_addr_t phy_addr, mpp_addr;
phy_addr = virt_to_phys(vc->mpp_buffer);
mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
/* We must abort any in-progress save operations to ensure
* the table is valid so that prefetch engine knows when to
* stop prefetching. */
logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
}
/*
* A list of virtual cores for each physical CPU.
* These are vcores that could run but their runner VCPU tasks are
@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
if (vc->mpp_buffer_is_valid)
kvmppc_start_restoring_l2_cache(vc);
__kvmppc_vcore_entry();
if (vc->mpp_buffer)
kvmppc_start_saving_l2_cache(vc);
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
spin_lock(&vc->lock);
@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm)
{
long int i;
for (i = 0; i < KVM_MAX_VCORES; ++i) {
if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
struct kvmppc_vcore *vc = kvm->arch.vcores[i];
free_pages((unsigned long)vc->mpp_buffer,
MPP_BUFFER_ORDER);
}
for (i = 0; i < KVM_MAX_VCORES; ++i)
kfree(kvm->arch.vcores[i]);
}
kvm->arch.online_vcores = 0;
}

Просмотреть файл

@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void)
* so clear LPCR:PECE1. We keep PECE2 enabled.
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
/*
* Hard-disable interrupts, and then clear irq_happened flags
* that we can safely ignore while off-line, since they
* are for things for which we do no processing when off-line
* (or in the case of HMI, all the processing we need to do
* is done in lower-level real-mode code).
*/
hard_irq_disable();
local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI);
while (!generic_check_cpu_restart(cpu)) {
/*
* Clear IPI flag, since we don't handle IPIs while
* offline, except for those when changing micro-threading
* mode, which are handled explicitly below, and those
* for coming online, which are handled via
* generic_check_cpu_restart() calls.
*/
kvmppc_set_host_ipi(cpu, 0);
ppc64_runlatch_off();
@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void)
* having finished executing in a KVM guest, then srr1
* contains 0.
*/
if ((srr1 & wmask) == SRR1_WAKEEE) {
if (((srr1 & wmask) == SRR1_WAKEEE) ||
(local_paca->irq_happened & PACA_IRQ_EE)) {
icp_native_flush_interrupt();
local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
smp_mb();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
kvmppc_set_host_ipi(cpu, 0);
}
local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL);
smp_mb();
if (cpu_core_split_required())
continue;
if (!generic_check_cpu_restart(cpu))
if (srr1 && !generic_check_cpu_restart(cpu))
DBG("CPU%d Unexpected exit while offline !\n", cpu);
}
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);