arm64: Add CONFIG_DEBUG_SET_MODULE_RONX support
In a similar fashion to other architecture, add the infrastructure and Kconfig to enable DEBUG_SET_MODULE_RONX support. When enabled, module ranges will be marked read-only/no-execute as appropriate. Signed-off-by: Laura Abbott <lauraa@codeaurora.org> [will: fixed off-by-one in module end check] Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Родитель
b6d4f2800b
Коммит
11d91a770f
|
@ -43,4 +43,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
|
||||||
of TEXT_OFFSET and platforms must not require a specific
|
of TEXT_OFFSET and platforms must not require a specific
|
||||||
value.
|
value.
|
||||||
|
|
||||||
|
config DEBUG_SET_MODULE_RONX
|
||||||
|
bool "Set loadable kernel module data as NX and text as RO"
|
||||||
|
depends on MODULES
|
||||||
|
help
|
||||||
|
This option helps catch unintended modifications to loadable
|
||||||
|
kernel module's text and read-only data. It also prevents execution
|
||||||
|
of module data. Such protection may interfere with run-time code
|
||||||
|
patching and dynamic kernel tracing - and they might also protect
|
||||||
|
against certain classes of kernel exploits.
|
||||||
|
If in doubt, say "N".
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
|
@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int set_memory_ro(unsigned long addr, int numpages);
|
||||||
|
int set_memory_rw(unsigned long addr, int numpages);
|
||||||
|
int set_memory_x(unsigned long addr, int numpages);
|
||||||
|
int set_memory_nx(unsigned long addr, int numpages);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
obj-y := dma-mapping.o extable.o fault.o init.o \
|
obj-y := dma-mapping.o extable.o fault.o init.o \
|
||||||
cache.o copypage.o flush.o \
|
cache.o copypage.o flush.o \
|
||||||
ioremap.o mmap.o pgd.o mmu.o \
|
ioremap.o mmap.o pgd.o mmu.o \
|
||||||
context.o proc.o
|
context.o proc.o pageattr.o
|
||||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||||
|
|
|
@ -0,0 +1,96 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 and
|
||||||
|
* only version 2 as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*/
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
|
struct page_change_data {
|
||||||
|
pgprot_t set_mask;
|
||||||
|
pgprot_t clear_mask;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
struct page_change_data *cdata = data;
|
||||||
|
pte_t pte = *ptep;
|
||||||
|
|
||||||
|
pte = clear_pte_bit(pte, cdata->clear_mask);
|
||||||
|
pte = set_pte_bit(pte, cdata->set_mask);
|
||||||
|
|
||||||
|
set_pte(ptep, pte);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int change_memory_common(unsigned long addr, int numpages,
|
||||||
|
pgprot_t set_mask, pgprot_t clear_mask)
|
||||||
|
{
|
||||||
|
unsigned long start = addr;
|
||||||
|
unsigned long size = PAGE_SIZE*numpages;
|
||||||
|
unsigned long end = start + size;
|
||||||
|
int ret;
|
||||||
|
struct page_change_data data;
|
||||||
|
|
||||||
|
if (!IS_ALIGNED(addr, PAGE_SIZE)) {
|
||||||
|
addr &= PAGE_MASK;
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!is_module_address(start) || !is_module_address(end - 1))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
data.set_mask = set_mask;
|
||||||
|
data.clear_mask = clear_mask;
|
||||||
|
|
||||||
|
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
|
||||||
|
&data);
|
||||||
|
|
||||||
|
flush_tlb_kernel_range(start, end);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int set_memory_ro(unsigned long addr, int numpages)
|
||||||
|
{
|
||||||
|
return change_memory_common(addr, numpages,
|
||||||
|
__pgprot(PTE_RDONLY),
|
||||||
|
__pgprot(PTE_WRITE));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(set_memory_ro);
|
||||||
|
|
||||||
|
int set_memory_rw(unsigned long addr, int numpages)
|
||||||
|
{
|
||||||
|
return change_memory_common(addr, numpages,
|
||||||
|
__pgprot(PTE_WRITE),
|
||||||
|
__pgprot(PTE_RDONLY));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(set_memory_rw);
|
||||||
|
|
||||||
|
int set_memory_nx(unsigned long addr, int numpages)
|
||||||
|
{
|
||||||
|
return change_memory_common(addr, numpages,
|
||||||
|
__pgprot(PTE_PXN),
|
||||||
|
__pgprot(0));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(set_memory_nx);
|
||||||
|
|
||||||
|
int set_memory_x(unsigned long addr, int numpages)
|
||||||
|
{
|
||||||
|
return change_memory_common(addr, numpages,
|
||||||
|
__pgprot(0),
|
||||||
|
__pgprot(PTE_PXN));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(set_memory_x);
|
Загрузка…
Ссылка в новой задаче