x86: kexec_file: purge system-ram walking from prepare_elf64_headers()
While prepare_elf64_headers() in x86 looks pretty generic for other architectures' use, it contains some code which tries to list crash memory regions by walking through system resources, which is not always architecture agnostic. To make this function more generic, the related code should be purged. In this patch, prepare_elf64_headers() simply scans crash_mem buffer passed and add all the listed regions to elf header as a PT_LOAD segment. So walk_system_ram_res(prepare_elf64_headers_callback) have been moved forward before prepare_elf64_headers() where the callback, prepare_elf64_headers_callback(), is now responsible for filling up crash_mem buffer. Meanwhile exclude_elf_header_ranges() used to be called every time in this callback it is rather redundant and now called only once in prepare_elf_headers() as well. Link: http://lkml.kernel.org/r/20180306102303.9063-4-takahiro.akashi@linaro.org Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org> Acked-by: Dave Young <dyoung@redhat.com> Tested-by: Dave Young <dyoung@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Baoquan He <bhe@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
9ec4ecef0a
Коммит
cbe6601617
|
@ -317,18 +317,11 @@ static int exclude_mem_range(struct crash_mem *mem,
|
||||||
* Look for any unwanted ranges between mstart, mend and remove them. This
|
* Look for any unwanted ranges between mstart, mend and remove them. This
|
||||||
* might lead to split and split ranges are put in ced->mem.ranges[] array
|
* might lead to split and split ranges are put in ced->mem.ranges[] array
|
||||||
*/
|
*/
|
||||||
static int elf_header_exclude_ranges(struct crash_elf_data *ced,
|
static int elf_header_exclude_ranges(struct crash_elf_data *ced)
|
||||||
unsigned long long mstart, unsigned long long mend)
|
|
||||||
{
|
{
|
||||||
struct crash_mem *cmem = &ced->mem;
|
struct crash_mem *cmem = &ced->mem;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
memset(cmem->ranges, 0, sizeof(cmem->ranges));
|
|
||||||
|
|
||||||
cmem->ranges[0].start = mstart;
|
|
||||||
cmem->ranges[0].end = mend;
|
|
||||||
cmem->nr_ranges = 1;
|
|
||||||
|
|
||||||
/* Exclude crashkernel region */
|
/* Exclude crashkernel region */
|
||||||
ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
|
ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -346,53 +339,13 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
|
||||||
static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
|
static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
|
||||||
{
|
{
|
||||||
struct crash_elf_data *ced = arg;
|
struct crash_elf_data *ced = arg;
|
||||||
Elf64_Ehdr *ehdr;
|
struct crash_mem *cmem = &ced->mem;
|
||||||
Elf64_Phdr *phdr;
|
|
||||||
unsigned long mstart, mend;
|
|
||||||
struct kimage *image = ced->image;
|
|
||||||
struct crash_mem *cmem;
|
|
||||||
int ret, i;
|
|
||||||
|
|
||||||
ehdr = ced->ehdr;
|
cmem->ranges[cmem->nr_ranges].start = res->start;
|
||||||
|
cmem->ranges[cmem->nr_ranges].end = res->end;
|
||||||
|
cmem->nr_ranges++;
|
||||||
|
|
||||||
/* Exclude unwanted mem ranges */
|
return 0;
|
||||||
ret = elf_header_exclude_ranges(ced, res->start, res->end);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
|
|
||||||
cmem = &ced->mem;
|
|
||||||
|
|
||||||
for (i = 0; i < cmem->nr_ranges; i++) {
|
|
||||||
mstart = cmem->ranges[i].start;
|
|
||||||
mend = cmem->ranges[i].end;
|
|
||||||
|
|
||||||
phdr = ced->bufp;
|
|
||||||
ced->bufp += sizeof(Elf64_Phdr);
|
|
||||||
|
|
||||||
phdr->p_type = PT_LOAD;
|
|
||||||
phdr->p_flags = PF_R|PF_W|PF_X;
|
|
||||||
phdr->p_offset = mstart;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If a range matches backup region, adjust offset to backup
|
|
||||||
* segment.
|
|
||||||
*/
|
|
||||||
if (mstart == image->arch.backup_src_start &&
|
|
||||||
(mend - mstart + 1) == image->arch.backup_src_sz)
|
|
||||||
phdr->p_offset = image->arch.backup_load_addr;
|
|
||||||
|
|
||||||
phdr->p_paddr = mstart;
|
|
||||||
phdr->p_vaddr = (unsigned long long) __va(mstart);
|
|
||||||
phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
|
|
||||||
phdr->p_align = 0;
|
|
||||||
ehdr->e_phnum++;
|
|
||||||
pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
|
|
||||||
phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
|
|
||||||
ehdr->e_phnum, phdr->p_offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int prepare_elf64_headers(struct crash_elf_data *ced,
|
static int prepare_elf64_headers(struct crash_elf_data *ced,
|
||||||
|
@ -402,9 +355,10 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
|
||||||
Elf64_Phdr *phdr;
|
Elf64_Phdr *phdr;
|
||||||
unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
|
unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
|
||||||
unsigned char *buf, *bufp;
|
unsigned char *buf, *bufp;
|
||||||
unsigned int cpu;
|
unsigned int cpu, i;
|
||||||
unsigned long long notes_addr;
|
unsigned long long notes_addr;
|
||||||
int ret;
|
struct crash_mem *cmem = &ced->mem;
|
||||||
|
unsigned long mstart, mend;
|
||||||
|
|
||||||
/* extra phdr for vmcoreinfo elf note */
|
/* extra phdr for vmcoreinfo elf note */
|
||||||
nr_phdr = nr_cpus + 1;
|
nr_phdr = nr_cpus + 1;
|
||||||
|
@ -473,13 +427,25 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
|
||||||
(ehdr->e_phnum)++;
|
(ehdr->e_phnum)++;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Prepare PT_LOAD headers for system ram chunks. */
|
/* Go through all the ranges in cmem->ranges[] and prepare phdr */
|
||||||
ced->ehdr = ehdr;
|
for (i = 0; i < cmem->nr_ranges; i++) {
|
||||||
ced->bufp = bufp;
|
mstart = cmem->ranges[i].start;
|
||||||
ret = walk_system_ram_res(0, -1, ced,
|
mend = cmem->ranges[i].end;
|
||||||
prepare_elf64_ram_headers_callback);
|
|
||||||
if (ret < 0)
|
phdr->p_type = PT_LOAD;
|
||||||
return ret;
|
phdr->p_flags = PF_R|PF_W|PF_X;
|
||||||
|
phdr->p_offset = mstart;
|
||||||
|
|
||||||
|
phdr->p_paddr = mstart;
|
||||||
|
phdr->p_vaddr = (unsigned long long) __va(mstart);
|
||||||
|
phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
|
||||||
|
phdr->p_align = 0;
|
||||||
|
ehdr->e_phnum++;
|
||||||
|
phdr++;
|
||||||
|
pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
|
||||||
|
phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
|
||||||
|
ehdr->e_phnum, phdr->p_offset);
|
||||||
|
}
|
||||||
|
|
||||||
*addr = buf;
|
*addr = buf;
|
||||||
*sz = elf_sz;
|
*sz = elf_sz;
|
||||||
|
@ -491,7 +457,9 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
|
||||||
unsigned long *sz)
|
unsigned long *sz)
|
||||||
{
|
{
|
||||||
struct crash_elf_data *ced;
|
struct crash_elf_data *ced;
|
||||||
int ret;
|
Elf64_Ehdr *ehdr;
|
||||||
|
Elf64_Phdr *phdr;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
ced = kzalloc(sizeof(*ced), GFP_KERNEL);
|
ced = kzalloc(sizeof(*ced), GFP_KERNEL);
|
||||||
if (!ced)
|
if (!ced)
|
||||||
|
@ -499,8 +467,35 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
|
||||||
|
|
||||||
fill_up_crash_elf_data(ced, image);
|
fill_up_crash_elf_data(ced, image);
|
||||||
|
|
||||||
|
ret = walk_system_ram_res(0, -1, ced,
|
||||||
|
prepare_elf64_ram_headers_callback);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/* Exclude unwanted mem ranges */
|
||||||
|
ret = elf_header_exclude_ranges(ced);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
/* By default prepare 64bit headers */
|
/* By default prepare 64bit headers */
|
||||||
ret = prepare_elf64_headers(ced, addr, sz);
|
ret = prepare_elf64_headers(ced, addr, sz);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a range matches backup region, adjust offset to backup
|
||||||
|
* segment.
|
||||||
|
*/
|
||||||
|
ehdr = (Elf64_Ehdr *)*addr;
|
||||||
|
phdr = (Elf64_Phdr *)(ehdr + 1);
|
||||||
|
for (i = 0; i < ehdr->e_phnum; phdr++, i++)
|
||||||
|
if (phdr->p_type == PT_LOAD &&
|
||||||
|
phdr->p_paddr == image->arch.backup_src_start &&
|
||||||
|
phdr->p_memsz == image->arch.backup_src_sz) {
|
||||||
|
phdr->p_offset = image->arch.backup_load_addr;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
out:
|
||||||
kfree(ced);
|
kfree(ced);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче