nommu: add support for Memory Protection Units (MPU)
Some architectures (like the Blackfin arch) implement some of the "simpler" features that one would expect out of a MMU such as memory protection. In our case, we actually get read/write/exec protection down to the page boundary so processes can't stomp on each other let alone the kernel. There is a performance decrease (which depends greatly on the workload) however as the hardware/software interaction was not optimized at design time. Signed-off-by: Bernd Schmidt <bernds_cb1@t-online.de> Signed-off-by: Bryan Wu <cooloney@kernel.org> Signed-off-by: Mike Frysinger <vapier@gentoo.org> Acked-by: David Howells <dhowells@redhat.com> Acked-by: Greg Ungerer <gerg@snapgear.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
02e87d1a93
Коммит
eb8cdec4a9
|
@ -47,6 +47,7 @@
|
|||
#include <linux/rculist.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <linux/license.h>
|
||||
#include <asm/sections.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
@ -1535,6 +1536,10 @@ static void free_module(struct module *mod)
|
|||
|
||||
/* Finally, free the core (containing the module structure) */
|
||||
module_free(mod, mod->module_core);
|
||||
|
||||
#ifdef CONFIG_MPU
|
||||
update_protections(current->mm);
|
||||
#endif
|
||||
}
|
||||
|
||||
void *__symbol_get(const char *symbol)
|
||||
|
|
21
mm/nommu.c
21
mm/nommu.c
|
@ -33,6 +33,7 @@
|
|||
#include <asm/uaccess.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include "internal.h"
|
||||
|
||||
static inline __attribute__((format(printf, 1, 2)))
|
||||
|
@ -622,6 +623,22 @@ static void put_nommu_region(struct vm_region *region)
|
|||
__put_nommu_region(region);
|
||||
}
|
||||
|
||||
/*
|
||||
* update protection on a vma
|
||||
*/
|
||||
static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
|
||||
{
|
||||
#ifdef CONFIG_MPU
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
long start = vma->vm_start & PAGE_MASK;
|
||||
while (start < vma->vm_end) {
|
||||
protect_page(mm, start, flags);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
update_protections(mm);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* add a VMA into a process's mm_struct in the appropriate place in the list
|
||||
* and tree and add to the address space's page tree also if not an anonymous
|
||||
|
@ -641,6 +658,8 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
|
|||
mm->map_count++;
|
||||
vma->vm_mm = mm;
|
||||
|
||||
protect_vma(vma, vma->vm_flags);
|
||||
|
||||
/* add the VMA to the mapping */
|
||||
if (vma->vm_file) {
|
||||
mapping = vma->vm_file->f_mapping;
|
||||
|
@ -703,6 +722,8 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
|
|||
|
||||
kenter("%p", vma);
|
||||
|
||||
protect_vma(vma, 0);
|
||||
|
||||
mm->map_count--;
|
||||
if (mm->mmap_cache == vma)
|
||||
mm->mmap_cache = NULL;
|
||||
|
|
Загрузка…
Ссылка в новой задаче