mm/kasan: get rid of speculative shadow checks

For some unaligned memory accesses we have to check additional byte of
the shadow memory.  Currently we load that byte speculatively to have
only single load + branch on the optimistic fast path.

However, this approach has some downsides:

 - It's unaligned access, so this prevents porting KASAN on
   architectures which doesn't support unaligned accesses.

 - We have to map additional shadow page to prevent crash if speculative
   load happens near the end of the mapped memory. This would
   significantly complicate upcoming memory hotplug support.

I wasn't able to notice any performance degradation with this patch.  So
these speculative loads is just a pain with no gain, let's remove them.

Link: http://lkml.kernel.org/r/20170601162338.23540-1-aryabinin@virtuozzo.com
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Acked-by: Dmitry Vyukov <dvyukov@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andrey Ryabinin 2017-07-10 15:50:24 -07:00 коммит произвёл Linus Torvalds
Родитель 458f7920f9
Коммит c634d807d9
1 изменённых файлов: 16 добавлений и 82 удалений

Просмотреть файл

@ -134,94 +134,30 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
return false; return false;
} }
static __always_inline bool memory_is_poisoned_2(unsigned long addr) static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
unsigned long size)
{ {
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(*shadow_addr)) { /*
if (memory_is_poisoned_1(addr + 1)) * Access crosses 8(shadow size)-byte boundary. Such access maps
return true; * into 2 shadow bytes, so we need to check them both.
*/
if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
/* return memory_is_poisoned_1(addr + size - 1);
* If single shadow byte covers 2-byte access, we don't
* need to do anything more. Otherwise, test the first
* shadow byte.
*/
if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
return false;
return unlikely(*(u8 *)shadow_addr);
}
return false;
}
static __always_inline bool memory_is_poisoned_4(unsigned long addr)
{
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(*shadow_addr)) {
if (memory_is_poisoned_1(addr + 3))
return true;
/*
* If single shadow byte covers 4-byte access, we don't
* need to do anything more. Otherwise, test the first
* shadow byte.
*/
if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
return false;
return unlikely(*(u8 *)shadow_addr);
}
return false;
}
static __always_inline bool memory_is_poisoned_8(unsigned long addr)
{
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(*shadow_addr)) {
if (memory_is_poisoned_1(addr + 7))
return true;
/*
* If single shadow byte covers 8-byte access, we don't
* need to do anything more. Otherwise, test the first
* shadow byte.
*/
if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
return false;
return unlikely(*(u8 *)shadow_addr);
}
return false;
} }
static __always_inline bool memory_is_poisoned_16(unsigned long addr) static __always_inline bool memory_is_poisoned_16(unsigned long addr)
{ {
u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr); u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(*shadow_addr)) { /* Unaligned 16-bytes access maps into 3 shadow bytes. */
u16 shadow_first_bytes = *(u16 *)shadow_addr; if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
return *shadow_addr || memory_is_poisoned_1(addr + 15);
if (unlikely(shadow_first_bytes)) return *shadow_addr;
return true;
/*
* If two shadow bytes covers 16-byte access, we don't
* need to do anything more. Otherwise, test the last
* shadow byte.
*/
if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
return false;
return memory_is_poisoned_1(addr + 15);
}
return false;
} }
static __always_inline unsigned long bytes_is_zero(const u8 *start, static __always_inline unsigned long bytes_is_zero(const u8 *start,
@ -292,11 +228,9 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
case 1: case 1:
return memory_is_poisoned_1(addr); return memory_is_poisoned_1(addr);
case 2: case 2:
return memory_is_poisoned_2(addr);
case 4: case 4:
return memory_is_poisoned_4(addr);
case 8: case 8:
return memory_is_poisoned_8(addr); return memory_is_poisoned_2_4_8(addr, size);
case 16: case 16:
return memory_is_poisoned_16(addr); return memory_is_poisoned_16(addr);
default: default: