powerpc fixes for 5.9 #4
Revert our removal of PROT_SAO, at least one user expressed an interest in using it on Power9. Instead don't allow it to be used in guests unless enabled explicitly at compile time. A fix for a crash introduced by a recent change to FP handling. Revert a change to our idle code that left Power10 with no idle support. One minor fix for the new scv system call path to set PPR. Fix a crash in our "generic" PMU if branch stack events were enabled. A fix for the IMC PMU, to correctly identify host kernel samples. The ADB_PMU powermac code was found to be incompatible with VMAP_STACK, so make them incompatible in Kconfig until the code can be fixed. A build fix in drivers/video/fbdev/controlfb.c, and a documentation fix. Thanks to: Alexey Kardashevskiy, Athira Rajeev, Christophe Leroy, Giuseppe Sacco, Madhavan Srinivasan, Milton Miller, Nicholas Piggin, Pratik Rajesh Sampat, Randy Dunlap, Shawn Anastasio, Vaidyanathan Srinivasan. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAl9LlF8THG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgEwJD/4nEkp9id7bZyiGruoawqxdpmc9viIp JFRH3+eHWbE5rfoXn7fwM1zTE9SsHxCd0q09cHk2rtAwKMXcJW83/pXNuWEjIzcy 7Ra8Zq2jRl6qgWAx84VKoZVg+W40yNFex0M0akMQV55SjYOTN8gpGe+algi+wPaH 44oYBYctDi3B9X8CsaUQEdov1EZdWT6TxcN9xIJiIdr53VXMER6C+ytYV8VgkGHW Qt+Ardyvp6eNq9+foGegRSk3OmNcmj+CJZYzhkp5+1k9ko9GQ8wg9NzxTV4ZoSJ9 g5rgD4ztBfLGyUDu6oUypzOnSVbfzJh9JPH/h1zaSOjSv9MnJ20zqvqjD7QXFNbs j960PiylTfVWdnOoUUkvON0UOYZM9XiZP63i8z/mBsMJ5BFaLB1TonZ+lDwXc1vK MHXhjahP2qP0LnJZ/M5gT3zfLPyrKoeIlmLTOkLjrM5C9mcSxpPnagq+AHacfYpG sGrg2LGLfBo/9PomUNHseQhBfsc2uYwM924si9MpNWN6BT+TNgTJYeNPDOnvRCbG ivDQ7HFZ6aiOj+b5iTZI2RV3EOaBKZgo+VEryNDnqd7etjyDr5PNbooGaHJDgsnz mNFxUNusxzv0vMI3zyFtLMTe/99/NlRSYyMXPL8SL7MvlRt624ngrrxYv+2+dBRt aIpxSpgdqTVXSw== =t+yB -----END PGP SIGNATURE----- Merge tag 'powerpc-5.9-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fixes from Michael Ellerman: - Revert our removal of PROT_SAO, at least one user expressed an interest in using it on Power9. Instead don't allow it to be used in guests unless enabled explicitly at compile time. - A fix for a crash introduced by a recent change to FP handling. - Revert a change to our idle code that left Power10 with no idle support. - One minor fix for the new scv system call path to set PPR. - Fix a crash in our "generic" PMU if branch stack events were enabled. - A fix for the IMC PMU, to correctly identify host kernel samples. - The ADB_PMU powermac code was found to be incompatible with VMAP_STACK, so make them incompatible in Kconfig until the code can be fixed. - A build fix in drivers/video/fbdev/controlfb.c, and a documentation fix. Thanks to Alexey Kardashevskiy, Athira Rajeev, Christophe Leroy, Giuseppe Sacco, Madhavan Srinivasan, Milton Miller, Nicholas Piggin, Pratik Rajesh Sampat, Randy Dunlap, Shawn Anastasio, Vaidyanathan Srinivasan. * tag 'powerpc-5.9-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/32s: Disable VMAP stack which CONFIG_ADB_PMU Revert "powerpc/powernv/idle: Replace CPU feature check with PVR check" powerpc/perf: Fix reading of MSR[HV/PR] bits in trace-imc powerpc/perf: Fix crashes with generic_compat_pmu & BHRB powerpc/64s: Fix crash in load_fp_state() due to fpexc_mode powerpc/64s: scv entry should set PPR Documentation/powerpc: fix malformed table in syscall64-abi video: fbdev: controlfb: Fix build for COMPILE_TEST=y && PPC_PMAC=n selftests/powerpc: Update PROT_SAO test to skip ISA 3.1 powerpc/64s: Disallow PROT_SAO in LPARs by default Revert "powerpc/64s: Remove PROT_SAO support"
This commit is contained in:
Коммит
8bb5021cc2
|
@ -49,16 +49,18 @@ Register preservation rules
|
|||
Register preservation rules match the ELF ABI calling sequence with the
|
||||
following differences:
|
||||
|
||||
=========== ============= ========================================
|
||||
--- For the sc instruction, differences with the ELF ABI ---
|
||||
=========== ============= ========================================
|
||||
r0 Volatile (System call number.)
|
||||
r3 Volatile (Parameter 1, and return value.)
|
||||
r4-r8 Volatile (Parameters 2-6.)
|
||||
cr0 Volatile (cr0.SO is the return error condition.)
|
||||
cr1, cr5-7 Nonvolatile
|
||||
lr Nonvolatile
|
||||
=========== ============= ========================================
|
||||
|
||||
--- For the scv 0 instruction, differences with the ELF ABI ---
|
||||
=========== ============= ========================================
|
||||
r0 Volatile (System call number.)
|
||||
r3 Volatile (Parameter 1, and return value.)
|
||||
r4-r8 Volatile (Parameters 2-6.)
|
||||
|
|
|
@ -860,6 +860,18 @@ config PPC_SUBPAGE_PROT
|
|||
|
||||
If unsure, say N here.
|
||||
|
||||
config PPC_PROT_SAO_LPAR
|
||||
bool "Support PROT_SAO mappings in LPARs"
|
||||
depends on PPC_BOOK3S_64
|
||||
help
|
||||
This option adds support for PROT_SAO mappings from userspace
|
||||
inside LPARs on supported CPUs.
|
||||
|
||||
This may cause issues when performing guest migration from
|
||||
a CPU that supports SAO to one that does not.
|
||||
|
||||
If unsure, say N here.
|
||||
|
||||
config PPC_COPRO_BASE
|
||||
bool
|
||||
|
||||
|
|
|
@ -20,13 +20,9 @@
|
|||
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
|
||||
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
|
||||
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
|
||||
|
||||
#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */
|
||||
/* No bits set is normal cacheable memory */
|
||||
/* 0x00010 unused, is SAO bit on radix POWER9 */
|
||||
#define _PAGE_SAO 0x00010 /* Strong access order */
|
||||
#define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */
|
||||
#define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */
|
||||
|
||||
#define _PAGE_DIRTY 0x00080 /* C: page changed */
|
||||
#define _PAGE_ACCESSED 0x00100 /* R: page referenced */
|
||||
/*
|
||||
|
@ -828,6 +824,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
return hash__set_pte_at(mm, addr, ptep, pte, percpu);
|
||||
}
|
||||
|
||||
#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
|
||||
|
||||
#define pgprot_noncached pgprot_noncached
|
||||
static inline pgprot_t pgprot_noncached(pgprot_t prot)
|
||||
{
|
||||
|
|
|
@ -196,7 +196,7 @@ static inline void cpu_feature_keys_init(void) { }
|
|||
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000)
|
||||
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000)
|
||||
#define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000)
|
||||
// Free LONG_ASM_CONST(0x0000000008000000)
|
||||
#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000)
|
||||
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000)
|
||||
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000)
|
||||
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000)
|
||||
|
@ -441,7 +441,7 @@ static inline void cpu_feature_keys_init(void) { }
|
|||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \
|
||||
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_CFAR | CPU_FTR_HVMODE | \
|
||||
CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX )
|
||||
|
@ -450,7 +450,7 @@ static inline void cpu_feature_keys_init(void) { }
|
|||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | \
|
||||
CPU_FTR_DSCR | CPU_FTR_SAO | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
|
||||
|
@ -461,7 +461,7 @@ static inline void cpu_feature_keys_init(void) { }
|
|||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | \
|
||||
CPU_FTR_DSCR | CPU_FTR_SAO | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
|
||||
|
@ -479,7 +479,7 @@ static inline void cpu_feature_keys_init(void) { }
|
|||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | \
|
||||
CPU_FTR_DSCR | CPU_FTR_SAO | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
|
||||
|
|
|
@ -13,20 +13,43 @@
|
|||
#include <linux/pkeys.h>
|
||||
#include <asm/cpu_has_feature.h>
|
||||
|
||||
#ifdef CONFIG_PPC_MEM_KEYS
|
||||
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
|
||||
unsigned long pkey)
|
||||
{
|
||||
return pkey_to_vmflag_bits(pkey);
|
||||
#ifdef CONFIG_PPC_MEM_KEYS
|
||||
return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey));
|
||||
#else
|
||||
return ((prot & PROT_SAO) ? VM_SAO : 0);
|
||||
#endif
|
||||
}
|
||||
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
|
||||
|
||||
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
|
||||
{
|
||||
return __pgprot(vmflag_to_pte_pkey_bits(vm_flags));
|
||||
#ifdef CONFIG_PPC_MEM_KEYS
|
||||
return (vm_flags & VM_SAO) ?
|
||||
__pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
|
||||
__pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
|
||||
#else
|
||||
return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
|
||||
#endif
|
||||
}
|
||||
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
|
||||
#endif
|
||||
|
||||
static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
|
||||
{
|
||||
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
|
||||
return false;
|
||||
if (prot & PROT_SAO) {
|
||||
if (!cpu_has_feature(CPU_FTR_SAO))
|
||||
return false;
|
||||
if (firmware_has_feature(FW_FEATURE_LPAR) &&
|
||||
!IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#define arch_validate_prot arch_validate_prot
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#endif /* _ASM_POWERPC_MMAN_H */
|
||||
|
|
|
@ -82,6 +82,8 @@
|
|||
*/
|
||||
#include <asm/nohash/pte-book3e.h>
|
||||
|
||||
#define _PAGE_SAO 0
|
||||
|
||||
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
|
||||
|
||||
/*
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include <asm-generic/mman-common.h>
|
||||
|
||||
|
||||
#define PROT_SAO 0x10 /* Unsupported since v5.9 */
|
||||
#define PROT_SAO 0x10 /* Strong Access Ordering */
|
||||
|
||||
#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
|
||||
#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
|
||||
|
|
|
@ -653,7 +653,7 @@ static struct dt_cpu_feature_match __initdata
|
|||
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
|
||||
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
|
||||
{"no-execute", feat_enable, 0},
|
||||
/* strong-access-ordering is unused */
|
||||
{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
|
||||
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
|
||||
{"coprocessor-icswx", feat_enable, 0},
|
||||
{"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
|
||||
|
|
|
@ -113,6 +113,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
|||
ld r11,exception_marker@toc(r2)
|
||||
std r11,-16(r10) /* "regshere" marker */
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
HMT_MEDIUM
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
|
||||
/*
|
||||
* RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
|
||||
* would clobber syscall parameters. Also we always enter with IRQs
|
||||
|
|
|
@ -548,7 +548,7 @@ void notrace restore_math(struct pt_regs *regs)
|
|||
* are live for the user thread).
|
||||
*/
|
||||
if ((!(msr & MSR_FP)) && should_restore_fp())
|
||||
new_msr |= MSR_FP | current->thread.fpexc_mode;
|
||||
new_msr |= MSR_FP;
|
||||
|
||||
if ((!(msr & MSR_VEC)) && should_restore_altivec())
|
||||
new_msr |= MSR_VEC;
|
||||
|
@ -559,11 +559,17 @@ void notrace restore_math(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
if (new_msr) {
|
||||
unsigned long fpexc_mode = 0;
|
||||
|
||||
msr_check_and_set(new_msr);
|
||||
|
||||
if (new_msr & MSR_FP)
|
||||
if (new_msr & MSR_FP) {
|
||||
do_restore_fp();
|
||||
|
||||
// This also covers VSX, because VSX implies FP
|
||||
fpexc_mode = current->thread.fpexc_mode;
|
||||
}
|
||||
|
||||
if (new_msr & MSR_VEC)
|
||||
do_restore_altivec();
|
||||
|
||||
|
@ -572,7 +578,7 @@ void notrace restore_math(struct pt_regs *regs)
|
|||
|
||||
msr_check_and_clear(new_msr);
|
||||
|
||||
regs->msr |= new_msr;
|
||||
regs->msr |= new_msr | fpexc_mode;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -232,6 +232,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
|||
rflags |= HPTE_R_I;
|
||||
else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
|
||||
rflags |= (HPTE_R_I | HPTE_R_G);
|
||||
else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
|
||||
rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
|
||||
else
|
||||
/*
|
||||
* Add memory coherence if cache inhibited is not set
|
||||
|
|
|
@ -1557,9 +1557,16 @@ nocheck:
|
|||
ret = 0;
|
||||
out:
|
||||
if (has_branch_stack(event)) {
|
||||
power_pmu_bhrb_enable(event);
|
||||
cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
|
||||
event->attr.branch_sample_type);
|
||||
u64 bhrb_filter = -1;
|
||||
|
||||
if (ppmu->bhrb_filter_map)
|
||||
bhrb_filter = ppmu->bhrb_filter_map(
|
||||
event->attr.branch_sample_type);
|
||||
|
||||
if (bhrb_filter != -1) {
|
||||
cpuhw->bhrb_filter = bhrb_filter;
|
||||
power_pmu_bhrb_enable(event);
|
||||
}
|
||||
}
|
||||
|
||||
perf_pmu_enable(event->pmu);
|
||||
|
@ -1881,7 +1888,6 @@ static int power_pmu_event_init(struct perf_event *event)
|
|||
int n;
|
||||
int err;
|
||||
struct cpu_hw_events *cpuhw;
|
||||
u64 bhrb_filter;
|
||||
|
||||
if (!ppmu)
|
||||
return -ENOENT;
|
||||
|
@ -1987,7 +1993,10 @@ static int power_pmu_event_init(struct perf_event *event)
|
|||
err = power_check_constraints(cpuhw, events, cflags, n + 1);
|
||||
|
||||
if (has_branch_stack(event)) {
|
||||
bhrb_filter = ppmu->bhrb_filter_map(
|
||||
u64 bhrb_filter = -1;
|
||||
|
||||
if (ppmu->bhrb_filter_map)
|
||||
bhrb_filter = ppmu->bhrb_filter_map(
|
||||
event->attr.branch_sample_type);
|
||||
|
||||
if (bhrb_filter == -1) {
|
||||
|
|
|
@ -1289,7 +1289,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
|
|||
header->misc = 0;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
|
||||
switch (IMC_TRACE_RECORD_VAL_HVPR(mem->val)) {
|
||||
switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) {
|
||||
case 0:/* when MSR HV and PR not set in the trace-record */
|
||||
header->misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
break;
|
||||
|
@ -1297,7 +1297,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
|
|||
header->misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||
break;
|
||||
case 2: /* MSR HV is 1 and PR is 0 */
|
||||
header->misc |= PERF_RECORD_MISC_HYPERVISOR;
|
||||
header->misc |= PERF_RECORD_MISC_KERNEL;
|
||||
break;
|
||||
case 3: /* MSR HV is 1 and PR is 1 */
|
||||
header->misc |= PERF_RECORD_MISC_USER;
|
||||
|
|
|
@ -36,7 +36,7 @@ config PPC_BOOK3S_6xx
|
|||
select PPC_HAVE_PMU_SUPPORT
|
||||
select PPC_HAVE_KUEP
|
||||
select PPC_HAVE_KUAP
|
||||
select HAVE_ARCH_VMAP_STACK
|
||||
select HAVE_ARCH_VMAP_STACK if !ADB_PMU
|
||||
|
||||
config PPC_BOOK3S_601
|
||||
bool "PowerPC 601"
|
||||
|
|
|
@ -1223,7 +1223,7 @@ static void __init pnv_probe_idle_states(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (pvr_version_is(PVR_POWER9))
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
pnv_power9_idle_init();
|
||||
|
||||
for (i = 0; i < nr_pnv_idle_states; i++)
|
||||
|
|
|
@ -49,6 +49,8 @@
|
|||
#include <linux/cuda.h>
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
#include <asm/prom.h>
|
||||
#endif
|
||||
#ifdef CONFIG_BOOTX_TEXT
|
||||
#include <asm/btext.h>
|
||||
#endif
|
||||
|
||||
|
|
|
@ -324,6 +324,8 @@ extern unsigned int kobjsize(const void *objp);
|
|||
|
||||
#if defined(CONFIG_X86)
|
||||
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
|
||||
#elif defined(CONFIG_PPC)
|
||||
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
|
||||
#elif defined(CONFIG_PARISC)
|
||||
# define VM_GROWSUP VM_ARCH_1
|
||||
#elif defined(CONFIG_IA64)
|
||||
|
|
|
@ -114,6 +114,8 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
|
|||
|
||||
#if defined(CONFIG_X86)
|
||||
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
|
||||
#elif defined(CONFIG_PPC)
|
||||
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
|
||||
#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
|
||||
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
|
||||
#elif !defined(CONFIG_MMU)
|
||||
|
|
4
mm/ksm.c
4
mm/ksm.c
|
@ -2453,6 +2453,10 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
|||
if (vma_is_dax(vma))
|
||||
return 0;
|
||||
|
||||
#ifdef VM_SAO
|
||||
if (*vm_flags & VM_SAO)
|
||||
return 0;
|
||||
#endif
|
||||
#ifdef VM_SPARC_ADI
|
||||
if (*vm_flags & VM_SPARC_ADI)
|
||||
return 0;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
hugetlb_vs_thp_test
|
||||
subpage_prot
|
||||
tempfile
|
||||
prot_sao
|
||||
segv_errors
|
||||
wild_bctr
|
||||
large_vm_fork_separation
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
noarg:
|
||||
$(MAKE) -C ../
|
||||
|
||||
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \
|
||||
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
|
||||
large_vm_fork_separation bad_accesses pkey_exec_prot \
|
||||
pkey_siginfo stack_expansion_signal stack_expansion_ldst
|
||||
|
||||
|
@ -14,6 +14,8 @@ include ../../lib.mk
|
|||
|
||||
$(TEST_GEN_PROGS): ../harness.c ../utils.c
|
||||
|
||||
$(OUTPUT)/prot_sao: ../utils.c
|
||||
|
||||
$(OUTPUT)/wild_bctr: CFLAGS += -m64
|
||||
$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
|
||||
$(OUTPUT)/bad_accesses: CFLAGS += -m64
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright 2016, Michael Ellerman, IBM Corp.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include <asm/cputable.h>
|
||||
|
||||
#include "utils.h"
|
||||
|
||||
#define SIZE (64 * 1024)
|
||||
|
||||
int test_prot_sao(void)
|
||||
{
|
||||
char *p;
|
||||
|
||||
/* SAO was introduced in 2.06 and removed in 3.1 */
|
||||
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06) ||
|
||||
have_hwcap2(PPC_FEATURE2_ARCH_3_1));
|
||||
|
||||
/*
|
||||
* Ensure we can ask for PROT_SAO.
|
||||
* We can't really verify that it does the right thing, but at least we
|
||||
* confirm the kernel will accept it.
|
||||
*/
|
||||
p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
FAIL_IF(p == MAP_FAILED);
|
||||
|
||||
/* Write to the mapping, to at least cause a fault */
|
||||
memset(p, 0xaa, SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
return test_harness(test_prot_sao, "prot-sao");
|
||||
}
|
Загрузка…
Ссылка в новой задаче