2011-05-19 12:21:33 +04:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2011 Wind River Systems,
|
|
|
|
* written by Ralf Baechle <ralf@linux-mips.org>
|
|
|
|
*/
|
2011-06-18 22:28:48 +04:00
|
|
|
#include <linux/compiler.h>
|
2017-08-23 21:17:49 +03:00
|
|
|
#include <linux/elf-randomize.h>
|
2011-05-19 12:21:33 +04:00
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/mman.h>
|
2016-08-21 22:58:14 +03:00
|
|
|
#include <linux/export.h>
|
2011-05-17 23:27:49 +04:00
|
|
|
#include <linux/personality.h>
|
2011-05-19 12:21:33 +04:00
|
|
|
#include <linux/random.h>
|
2017-02-08 20:51:30 +03:00
|
|
|
#include <linux/sched/signal.h>
|
2017-02-08 20:51:31 +03:00
|
|
|
#include <linux/sched/mm.h>
|
2011-05-19 12:21:33 +04:00
|
|
|
|
|
|
|
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
|
|
|
|
EXPORT_SYMBOL(shm_align_mask);
|
|
|
|
|
2011-05-17 23:27:49 +04:00
|
|
|
/* gap between mmap and stack */
|
|
|
|
#define MIN_GAP (128*1024*1024UL)
|
2011-06-18 22:28:48 +04:00
|
|
|
#define MAX_GAP ((TASK_SIZE)/6*5)
|
2011-05-17 23:27:49 +04:00
|
|
|
|
2018-04-11 02:34:53 +03:00
|
|
|
static int mmap_is_legacy(struct rlimit *rlim_stack)
|
2011-05-17 23:27:49 +04:00
|
|
|
{
|
|
|
|
if (current->personality & ADDR_COMPAT_LAYOUT)
|
|
|
|
return 1;
|
|
|
|
|
2018-04-11 02:34:53 +03:00
|
|
|
if (rlim_stack->rlim_cur == RLIM_INFINITY)
|
2011-05-17 23:27:49 +04:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return sysctl_legacy_va_layout;
|
|
|
|
}
|
|
|
|
|
2018-04-11 02:34:53 +03:00
|
|
|
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
|
2011-05-17 23:27:49 +04:00
|
|
|
{
|
2018-04-11 02:34:53 +03:00
|
|
|
unsigned long gap = rlim_stack->rlim_cur;
|
2011-05-17 23:27:49 +04:00
|
|
|
|
|
|
|
if (gap < MIN_GAP)
|
|
|
|
gap = MIN_GAP;
|
|
|
|
else if (gap > MAX_GAP)
|
|
|
|
gap = MAX_GAP;
|
|
|
|
|
|
|
|
return PAGE_ALIGN(TASK_SIZE - gap - rnd);
|
|
|
|
}
|
|
|
|
|
2011-06-18 22:28:48 +04:00
|
|
|
#define COLOUR_ALIGN(addr, pgoff) \
|
2011-05-19 12:21:33 +04:00
|
|
|
((((addr) + shm_align_mask) & ~shm_align_mask) + \
|
|
|
|
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
|
|
|
|
|
2011-05-17 23:27:49 +04:00
|
|
|
enum mmap_allocation_direction {UP, DOWN};
|
|
|
|
|
2011-06-18 22:28:48 +04:00
|
|
|
static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
2011-05-17 23:27:49 +04:00
|
|
|
unsigned long addr0, unsigned long len, unsigned long pgoff,
|
|
|
|
unsigned long flags, enum mmap_allocation_direction dir)
|
2011-05-19 12:21:33 +04:00
|
|
|
{
|
2011-05-17 23:27:49 +04:00
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
unsigned long addr = addr0;
|
2011-05-19 12:21:33 +04:00
|
|
|
int do_color_align;
|
2012-12-12 04:02:06 +04:00
|
|
|
struct vm_unmapped_area_info info;
|
2011-05-19 12:21:33 +04:00
|
|
|
|
2011-05-17 23:27:49 +04:00
|
|
|
if (unlikely(len > TASK_SIZE))
|
2011-05-19 12:21:33 +04:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (flags & MAP_FIXED) {
|
2011-05-17 23:27:49 +04:00
|
|
|
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
|
2011-05-19 12:21:33 +04:00
|
|
|
if (TASK_SIZE - len < addr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We do not accept a shared mapping if it would violate
|
|
|
|
* cache aliasing constraints.
|
|
|
|
*/
|
|
|
|
if ((flags & MAP_SHARED) &&
|
|
|
|
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
|
|
|
|
return -EINVAL;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
do_color_align = 0;
|
|
|
|
if (filp || (flags & MAP_SHARED))
|
|
|
|
do_color_align = 1;
|
2011-05-17 23:27:49 +04:00
|
|
|
|
|
|
|
/* requesting a specific address */
|
2011-05-19 12:21:33 +04:00
|
|
|
if (addr) {
|
|
|
|
if (do_color_align)
|
|
|
|
addr = COLOUR_ALIGN(addr, pgoff);
|
|
|
|
else
|
|
|
|
addr = PAGE_ALIGN(addr);
|
2011-05-17 23:27:49 +04:00
|
|
|
|
|
|
|
vma = find_vma(mm, addr);
|
2011-05-19 12:21:33 +04:00
|
|
|
if (TASK_SIZE - len >= addr &&
|
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <oleg@redhat.com>
Original-patch-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-06-19 14:03:24 +03:00
|
|
|
(!vma || addr + len <= vm_start_gap(vma)))
|
2011-05-19 12:21:33 +04:00
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2012-12-12 04:02:06 +04:00
|
|
|
info.length = len;
|
|
|
|
info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
|
|
|
|
info.align_offset = pgoff << PAGE_SHIFT;
|
2011-05-17 23:27:49 +04:00
|
|
|
|
2012-12-12 04:02:06 +04:00
|
|
|
if (dir == DOWN) {
|
|
|
|
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
|
|
|
info.low_limit = PAGE_SIZE;
|
|
|
|
info.high_limit = mm->mmap_base;
|
|
|
|
addr = vm_unmapped_area(&info);
|
|
|
|
|
|
|
|
if (!(addr & ~PAGE_MASK))
|
|
|
|
return addr;
|
2011-05-17 23:27:49 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A failed mmap() very likely causes application failure,
|
|
|
|
* so fall back to the bottom-up function here. This scenario
|
|
|
|
* can happen with large stack limits and large mmap()
|
|
|
|
* allocations.
|
|
|
|
*/
|
2011-05-19 12:21:33 +04:00
|
|
|
}
|
2012-12-12 04:02:06 +04:00
|
|
|
|
|
|
|
info.flags = 0;
|
|
|
|
info.low_limit = mm->mmap_base;
|
|
|
|
info.high_limit = TASK_SIZE;
|
|
|
|
return vm_unmapped_area(&info);
|
2011-05-19 12:21:33 +04:00
|
|
|
}
|
|
|
|
|
2011-05-17 23:27:49 +04:00
|
|
|
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
|
|
|
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
|
|
{
|
2011-06-18 22:28:48 +04:00
|
|
|
return arch_get_unmapped_area_common(filp,
|
2011-05-17 23:27:49 +04:00
|
|
|
addr0, len, pgoff, flags, UP);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There is no need to export this but sched.h declares the function as
|
|
|
|
* extern so making it static here results in an error.
|
|
|
|
*/
|
|
|
|
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
|
|
|
|
unsigned long addr0, unsigned long len, unsigned long pgoff,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
2011-06-18 22:28:48 +04:00
|
|
|
return arch_get_unmapped_area_common(filp,
|
2011-05-17 23:27:49 +04:00
|
|
|
addr0, len, pgoff, flags, DOWN);
|
|
|
|
}
|
|
|
|
|
2015-04-15 01:48:00 +03:00
|
|
|
unsigned long arch_mmap_rnd(void)
|
2015-04-15 01:47:51 +03:00
|
|
|
{
|
|
|
|
unsigned long rnd;
|
|
|
|
|
2016-11-24 20:32:45 +03:00
|
|
|
#ifdef CONFIG_COMPAT
|
2015-04-15 01:47:51 +03:00
|
|
|
if (TASK_IS_32BIT_ADDR)
|
2016-11-24 20:32:45 +03:00
|
|
|
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
|
2015-04-15 01:47:51 +03:00
|
|
|
else
|
2016-11-24 20:32:45 +03:00
|
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
|
2015-04-15 01:47:51 +03:00
|
|
|
|
2016-11-24 20:32:45 +03:00
|
|
|
return rnd << PAGE_SHIFT;
|
2015-04-15 01:47:51 +03:00
|
|
|
}
|
|
|
|
|
2018-04-11 02:34:53 +03:00
|
|
|
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
|
2011-05-19 12:21:33 +04:00
|
|
|
{
|
|
|
|
unsigned long random_factor = 0UL;
|
|
|
|
|
2015-04-15 01:47:51 +03:00
|
|
|
if (current->flags & PF_RANDOMIZE)
|
2015-04-15 01:48:00 +03:00
|
|
|
random_factor = arch_mmap_rnd();
|
2011-05-19 12:21:33 +04:00
|
|
|
|
2018-04-11 02:34:53 +03:00
|
|
|
if (mmap_is_legacy(rlim_stack)) {
|
2011-05-17 23:27:49 +04:00
|
|
|
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
|
|
|
mm->get_unmapped_area = arch_get_unmapped_area;
|
|
|
|
} else {
|
2018-04-11 02:34:53 +03:00
|
|
|
mm->mmap_base = mmap_base(random_factor, rlim_stack);
|
2011-05-17 23:27:49 +04:00
|
|
|
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
|
|
|
}
|
2011-05-19 12:21:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long brk_rnd(void)
|
|
|
|
{
|
2016-02-27 02:19:37 +03:00
|
|
|
unsigned long rnd = get_random_long();
|
2011-05-19 12:21:33 +04:00
|
|
|
|
|
|
|
rnd = rnd << PAGE_SHIFT;
|
|
|
|
/* 8MB for 32bit, 256MB for 64bit */
|
|
|
|
if (TASK_IS_32BIT_ADDR)
|
|
|
|
rnd = rnd & 0x7ffffful;
|
|
|
|
else
|
|
|
|
rnd = rnd & 0xffffffful;
|
|
|
|
|
|
|
|
return rnd;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
unsigned long base = mm->brk;
|
|
|
|
unsigned long ret;
|
|
|
|
|
|
|
|
ret = PAGE_ALIGN(base + brk_rnd());
|
|
|
|
|
|
|
|
if (ret < mm->brk)
|
|
|
|
return mm->brk;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2013-01-25 21:13:15 +04:00
|
|
|
|
|
|
|
int __virt_addr_valid(const volatile void *kaddr)
|
|
|
|
{
|
|
|
|
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__virt_addr_valid);
|