s390: avoid cache aliasing under z/VM and KVM
commit 1f6b83e5e4
("s390: avoid z13 cache aliasing") checks for the
machine type to optimize address space randomization and zero page
allocation to avoid cache aliases.
This check might fail under a hypervisor with migration support.
z/VMs "Single System Image and Live Guest Relocation" facility will
"fake" the machine type of the oldest system in the group. For example
in a group of zEC12 and Z13 the guest appears to run on a zEC12
(architecture fencing within the relocation domain)
Remove the machine type detection and always use cache aliasing
rules that are known to work for all machines. These are the z13
aliasing rules.
Suggested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Родитель
f07f21b3e2
Коммит
c7e8b2c21c
|
@ -206,9 +206,16 @@ do { \
|
|||
} while (0)
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
extern unsigned long mmap_rnd_mask;
|
||||
|
||||
#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
|
||||
/*
|
||||
* Cache aliasing on the latest machines calls for a mapping granularity
|
||||
* of 512KB. For 64-bit processes use a 512KB alignment and a randomization
|
||||
* of up to 1GB. For 31-bit processes the virtual address space is limited,
|
||||
* use no alignment and limit the randomization to 8MB.
|
||||
*/
|
||||
#define BRK_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ffffUL)
|
||||
#define MMAP_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ff80UL)
|
||||
#define MMAP_ALIGN_MASK (is_32bit_task() ? 0 : 0x7fUL)
|
||||
#define STACK_RND_MASK MMAP_RND_MASK
|
||||
|
||||
#define ARCH_DLINFO \
|
||||
do { \
|
||||
|
|
|
@ -243,11 +243,7 @@ unsigned long arch_align_stack(unsigned long sp)
|
|||
|
||||
static inline unsigned long brk_rnd(void)
|
||||
{
|
||||
/* 8MB for 32bit, 1GB for 64bit */
|
||||
if (is_32bit_task())
|
||||
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
|
||||
else
|
||||
return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
|
||||
return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
|
|
|
@ -48,37 +48,13 @@ EXPORT_SYMBOL(zero_page_mask);
|
|||
|
||||
static void __init setup_zero_pages(void)
|
||||
{
|
||||
struct cpuid cpu_id;
|
||||
unsigned int order;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
switch (cpu_id.machine) {
|
||||
case 0x9672: /* g5 */
|
||||
case 0x2064: /* z900 */
|
||||
case 0x2066: /* z900 */
|
||||
case 0x2084: /* z990 */
|
||||
case 0x2086: /* z990 */
|
||||
case 0x2094: /* z9-109 */
|
||||
case 0x2096: /* z9-109 */
|
||||
order = 0;
|
||||
break;
|
||||
case 0x2097: /* z10 */
|
||||
case 0x2098: /* z10 */
|
||||
case 0x2817: /* z196 */
|
||||
case 0x2818: /* z196 */
|
||||
order = 2;
|
||||
break;
|
||||
case 0x2827: /* zEC12 */
|
||||
case 0x2828: /* zEC12 */
|
||||
order = 5;
|
||||
break;
|
||||
case 0x2964: /* z13 */
|
||||
default:
|
||||
order = 7;
|
||||
break;
|
||||
}
|
||||
/* Latest machines require a mapping granularity of 512KB */
|
||||
order = 7;
|
||||
|
||||
/* Limit number of empty zero pages for small memory sizes */
|
||||
while (order > 2 && (totalram_pages >> 10) < (1UL << order))
|
||||
order--;
|
||||
|
|
|
@ -31,9 +31,6 @@
|
|||
#include <linux/security.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
unsigned long mmap_rnd_mask;
|
||||
static unsigned long mmap_align_mask;
|
||||
|
||||
static unsigned long stack_maxrandom_size(void)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
|
@ -62,10 +59,7 @@ static inline int mmap_is_legacy(void)
|
|||
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
if (is_32bit_task())
|
||||
return (get_random_int() & 0x7ff) << PAGE_SHIFT;
|
||||
else
|
||||
return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
|
||||
return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long mmap_base_legacy(unsigned long rnd)
|
||||
|
@ -92,7 +86,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_unmapped_area_info info;
|
||||
int do_color_align;
|
||||
|
||||
if (len > TASK_SIZE - mmap_min_addr)
|
||||
return -ENOMEM;
|
||||
|
@ -108,15 +101,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
return addr;
|
||||
}
|
||||
|
||||
do_color_align = 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
do_color_align = !is_32bit_task();
|
||||
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = mm->mmap_base;
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
|
||||
else
|
||||
info.align_mask = 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
@ -130,7 +122,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr = addr0;
|
||||
struct vm_unmapped_area_info info;
|
||||
int do_color_align;
|
||||
|
||||
/* requested length too big for entire address space */
|
||||
if (len > TASK_SIZE - mmap_min_addr)
|
||||
|
@ -148,15 +139,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
return addr;
|
||||
}
|
||||
|
||||
do_color_align = 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
do_color_align = !is_32bit_task();
|
||||
|
||||
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||
info.length = len;
|
||||
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
|
||||
info.high_limit = mm->mmap_base;
|
||||
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
|
||||
else
|
||||
info.align_mask = 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
addr = vm_unmapped_area(&info);
|
||||
|
||||
|
@ -254,35 +244,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init setup_mmap_rnd(void)
|
||||
{
|
||||
struct cpuid cpu_id;
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
switch (cpu_id.machine) {
|
||||
case 0x9672:
|
||||
case 0x2064:
|
||||
case 0x2066:
|
||||
case 0x2084:
|
||||
case 0x2086:
|
||||
case 0x2094:
|
||||
case 0x2096:
|
||||
case 0x2097:
|
||||
case 0x2098:
|
||||
case 0x2817:
|
||||
case 0x2818:
|
||||
case 0x2827:
|
||||
case 0x2828:
|
||||
mmap_rnd_mask = 0x7ffUL;
|
||||
mmap_align_mask = 0UL;
|
||||
break;
|
||||
case 0x2964: /* z13 */
|
||||
default:
|
||||
mmap_rnd_mask = 0x3ff80UL;
|
||||
mmap_align_mask = 0x7fUL;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
early_initcall(setup_mmap_rnd);
|
||||
|
|
Загрузка…
Ссылка в новой задаче