x86/mm/pkeys: Optimize fault handling in access_error()
We might not strictly have to make modifictions to access_error() to check the VMA here. If we do not, we will do this: 1. app sets VMA pkey to K 2. app touches a !present page 3. do_page_fault(), allocates and maps page, sets pte.pkey=K 4. return to userspace 5. touch instruction reexecutes, but triggers PF_PK 6. do PKEY signal What happens with this patch applied: 1. app sets VMA pkey to K 2. app touches a !present page 3. do_page_fault() notices that K is inaccessible 4. do PKEY signal We basically skip the fault that does an allocation. So what this lets us do is protect areas from even being *populated* unless it is accessible according to protection keys. That seems handy to me and makes protection keys work more like an mprotect()'d mapping. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave@sr71.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210222.EBB63D8C@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
1b2ee1266e
Коммит
07f146f53e
|
@ -900,10 +900,16 @@ bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
|
||||||
static inline bool bad_area_access_from_pkeys(unsigned long error_code,
|
static inline bool bad_area_access_from_pkeys(unsigned long error_code,
|
||||||
struct vm_area_struct *vma)
|
struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
|
/* This code is always called on the current mm */
|
||||||
|
bool foreign = false;
|
||||||
|
|
||||||
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
||||||
return false;
|
return false;
|
||||||
if (error_code & PF_PK)
|
if (error_code & PF_PK)
|
||||||
return true;
|
return true;
|
||||||
|
/* this checks permission keys on the VMA: */
|
||||||
|
if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign))
|
||||||
|
return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1091,6 +1097,8 @@ int show_unhandled_signals = 1;
|
||||||
static inline int
|
static inline int
|
||||||
access_error(unsigned long error_code, struct vm_area_struct *vma)
|
access_error(unsigned long error_code, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
|
/* This is only called for the current mm, so: */
|
||||||
|
bool foreign = false;
|
||||||
/*
|
/*
|
||||||
* Access or read was blocked by protection keys. We do
|
* Access or read was blocked by protection keys. We do
|
||||||
* this check before any others because we do not want
|
* this check before any others because we do not want
|
||||||
|
@ -1099,6 +1107,13 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
|
||||||
*/
|
*/
|
||||||
if (error_code & PF_PK)
|
if (error_code & PF_PK)
|
||||||
return 1;
|
return 1;
|
||||||
|
/*
|
||||||
|
* Make sure to check the VMA so that we do not perform
|
||||||
|
* faults just to hit a PF_PK as soon as we fill in a
|
||||||
|
* page.
|
||||||
|
*/
|
||||||
|
if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), foreign))
|
||||||
|
return 1;
|
||||||
|
|
||||||
if (error_code & PF_WRITE) {
|
if (error_code & PF_WRITE) {
|
||||||
/* write, present and write, not present: */
|
/* write, present and write, not present: */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче