x86/gart: Exclude GART aperture from kcore
On machines where the GART aperture is mapped over physical RAM,
/proc/kcore contains the GART aperture range. Accessing the GART range via
/proc/kcore results in a kernel crash.
vmcore used to have the same issue, until it was fixed with commit
2a3e83c6f9
("x86/gart: Exclude GART aperture from vmcore")', leveraging
existing hook infrastructure in vmcore to let /proc/vmcore return zeroes
when attempting to read the aperture region, and so it won't read from the
actual memory.
Apply the same workaround for kcore. First implement the same hook
infrastructure for kcore, then reuse the hook functions introduced in the
previous vmcore fix. Just with some minor adjustment, rename some functions
for more general usage, and simplify the hook infrastructure a bit as there
is no module usage yet.
Suggested-by: Baoquan He <bhe@redhat.com>
Signed-off-by: Kairui Song <kasong@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Jiri Bohac <jbohac@suse.cz>
Acked-by: Baoquan He <bhe@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Omar Sandoval <osandov@fb.com>
Cc: Dave Young <dyoung@redhat.com>
Link: https://lkml.kernel.org/r/20190308030508.13548-1-kasong@redhat.com
This commit is contained in:
Родитель
f7798711ad
Коммит
ffc8599aa9
|
@ -14,6 +14,7 @@
|
|||
#define pr_fmt(fmt) "AGP: " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kcore.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/memblock.h>
|
||||
|
@ -57,7 +58,7 @@ int fallback_aper_force __initdata;
|
|||
|
||||
int fix_aperture __initdata = 1;
|
||||
|
||||
#ifdef CONFIG_PROC_VMCORE
|
||||
#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
|
||||
/*
|
||||
* If the first kernel maps the aperture over e820 RAM, the kdump kernel will
|
||||
* use the same range because it will remain configured in the northbridge.
|
||||
|
@ -66,20 +67,25 @@ int fix_aperture __initdata = 1;
|
|||
*/
|
||||
static unsigned long aperture_pfn_start, aperture_page_count;
|
||||
|
||||
static int gart_oldmem_pfn_is_ram(unsigned long pfn)
|
||||
static int gart_mem_pfn_is_ram(unsigned long pfn)
|
||||
{
|
||||
return likely((pfn < aperture_pfn_start) ||
|
||||
(pfn >= aperture_pfn_start + aperture_page_count));
|
||||
}
|
||||
|
||||
static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
|
||||
static void __init exclude_from_core(u64 aper_base, u32 aper_order)
|
||||
{
|
||||
aperture_pfn_start = aper_base >> PAGE_SHIFT;
|
||||
aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
|
||||
WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
|
||||
#ifdef CONFIG_PROC_VMCORE
|
||||
WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram));
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_KCORE
|
||||
WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram));
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
|
||||
static void exclude_from_core(u64 aper_base, u32 aper_order)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@ -474,7 +480,7 @@ out:
|
|||
* may have allocated the range over its e820 RAM
|
||||
* and fixed up the northbridge
|
||||
*/
|
||||
exclude_from_vmcore(last_aper_base, last_aper_order);
|
||||
exclude_from_core(last_aper_base, last_aper_order);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -520,7 +526,7 @@ out:
|
|||
* overlap with the first kernel's memory. We can't access the
|
||||
* range through vmcore even though it should be part of the dump.
|
||||
*/
|
||||
exclude_from_vmcore(aper_alloc, aper_order);
|
||||
exclude_from_core(aper_alloc, aper_order);
|
||||
|
||||
/* Fix up the north bridges */
|
||||
for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
|
||||
|
|
|
@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head);
|
|||
static DECLARE_RWSEM(kclist_lock);
|
||||
static int kcore_need_update = 1;
|
||||
|
||||
/*
|
||||
* Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
|
||||
* Same as oldmem_pfn_is_ram in vmcore
|
||||
*/
|
||||
static int (*mem_pfn_is_ram)(unsigned long pfn);
|
||||
|
||||
int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
|
||||
{
|
||||
if (mem_pfn_is_ram)
|
||||
return -EBUSY;
|
||||
mem_pfn_is_ram = fn;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pfn_is_ram(unsigned long pfn)
|
||||
{
|
||||
if (mem_pfn_is_ram)
|
||||
return mem_pfn_is_ram(pfn);
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* This doesn't grab kclist_lock, so it should only be used at init time. */
|
||||
void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
|
||||
int type)
|
||||
|
@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
|
|||
goto out;
|
||||
}
|
||||
m = NULL; /* skip the list anchor */
|
||||
} else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
|
||||
if (clear_user(buffer, tsz)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
} else if (m->type == KCORE_VMALLOC) {
|
||||
vread(buf, (char *)start, tsz);
|
||||
/* we have to zero-fill user buffer even if no read */
|
||||
|
|
|
@ -44,6 +44,8 @@ void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
|
|||
m->vaddr = (unsigned long)vaddr;
|
||||
kclist_add(m, addr, sz, KCORE_REMAP);
|
||||
}
|
||||
|
||||
extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
|
||||
#else
|
||||
static inline
|
||||
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
|
||||
|
|
Загрузка…
Ссылка в новой задаче