Merge branch 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Peter Anvin:
 "A pile of fixes related to the VDSO, EFI and 32-bit badsys handling.

  It turns out that removing the section headers from the VDSO breaks
  gdb, so this puts back most of them.  A very simple typo broke
  rt_sigreturn on some versions of glibc, with obviously disastrous
  results.  The rest is pretty much fixes for the corresponding fallout.

  The EFI fixes fixes an arithmetic overflow on 32-bit systems and
  quiets some build warnings.

  Finally, when invoking an invalid system call number on x86-32, we
  bypass a bunch of handling, which can make the audit code oops"

* 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  efi-pstore: Fix an overflow on 32-bit builds
  x86/vdso: Error out in vdso2c if DT_RELA is present
  x86/vdso: Move DISABLE_BRANCH_PROFILING into the vdso makefile
  x86_32, signal: Fix vdso rt_sigreturn
  x86_32, entry: Do syscall exit work on badsys (CVE-2014-4508)
  x86/vdso: Create .build-id links for unstripped vdso files
  x86/vdso: Remove some redundant in-memory section headers
  x86/vdso: Improve the fake section headers
  x86/vdso2c: Use better macros for ELF bitness
  x86/vdso: Discard the __bug_table section
  efi: Fix compiler warnings (unused, const, type)
This commit is contained in:
Linus Torvalds 2014-06-27 18:43:03 -07:00
Родитель c9a606660e ba3f35c7ce
Коммит d1fc98ba96
14 изменённых файлов: 302 добавлений и 129 удалений

Просмотреть файл

@ -423,9 +423,10 @@ sysenter_past_esp:
jnz sysenter_audit
sysenter_do_call:
cmpl $(NR_syscalls), %eax
jae syscall_badsys
jae sysenter_badsys
call *sys_call_table(,%eax,4)
movl %eax,PT_EAX(%esp)
sysenter_after_call:
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
@ -675,7 +676,12 @@ END(syscall_fault)
syscall_badsys:
movl $-ENOSYS,PT_EAX(%esp)
jmp resume_userspace
jmp syscall_exit
END(syscall_badsys)
sysenter_badsys:
movl $-ENOSYS,PT_EAX(%esp)
jmp sysenter_after_call
END(syscall_badsys)
CFI_ENDPROC

Просмотреть файл

@ -363,7 +363,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
/* Set up to return from userspace. */
restorer = current->mm->context.vdso +
selected_vdso32->sym___kernel_sigreturn;
selected_vdso32->sym___kernel_rt_sigreturn;
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
put_user_ex(restorer, &frame->pretcode);

Просмотреть файл

@ -11,7 +11,6 @@ VDSO32-$(CONFIG_COMPAT) := y
# files to link into the vdso
vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o
vobjs-nox32 := vdso-fakesections.o
# files to link into kernel
obj-y += vma.o
@ -67,7 +66,8 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso2c FORCE
#
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-fno-omit-frame-pointer -foptimize-sibling-calls
-fno-omit-frame-pointer -foptimize-sibling-calls \
-DDISABLE_BRANCH_PROFILING
$(vobjs): KBUILD_CFLAGS += $(CFL)
@ -134,7 +134,7 @@ override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
targets += vdso32/vdso32.lds
targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
targets += vdso32/vclock_gettime.o
targets += vdso32/vclock_gettime.o vdso32/vdso-fakesections.o
$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
@ -150,11 +150,13 @@ KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
$(obj)/vdso32/vdso32.lds \
$(obj)/vdso32/vclock_gettime.o \
$(obj)/vdso32/vdso-fakesections.o \
$(obj)/vdso32/note.o \
$(obj)/vdso32/%.o
$(call if_changed,vdso)
@ -169,14 +171,24 @@ quiet_cmd_vdso = VDSO $@
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
-Wl,-Bsymbolic $(LTO_CFLAGS)
$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
GCOV_PROFILE := n
#
# Install the unstripped copies of vdso*.so.
# Install the unstripped copies of vdso*.so. If our toolchain supports
# build-id, install .build-id links as well.
#
quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
cmd_vdso_install = cp $< $(MODLIB)/vdso/$(@:install_%=%)
define cmd_vdso_install
cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
if readelf -n $< |grep -q 'Build ID'; then \
buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
first=`echo $$buildid | cut -b-2`; \
last=`echo $$buildid | cut -b3-`; \
mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
fi
endef
vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)

Просмотреть файл

@ -11,9 +11,6 @@
* Check with readelf after changing.
*/
/* Disable profiling for userspace code: */
#define DISABLE_BRANCH_PROFILING
#include <uapi/linux/time.h>
#include <asm/vgtod.h>
#include <asm/hpet.h>

Просмотреть файл

@ -2,31 +2,20 @@
* Copyright 2014 Andy Lutomirski
* Subject to the GNU Public License, v.2
*
* Hack to keep broken Go programs working.
*
* The Go runtime had a couple of bugs: it would read the section table to try
* to figure out how many dynamic symbols there were (it shouldn't have looked
* at the section table at all) and, if there were no SHT_SYNDYM section table
* entry, it would use an uninitialized value for the number of symbols. As a
* workaround, we supply a minimal section table. vdso2c will adjust the
* in-memory image so that "vdso_fake_sections" becomes the section table.
*
* The bug was introduced by:
* https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31)
* and is being addressed in the Go runtime in this issue:
* https://code.google.com/p/go/issues/detail?id=8197
* String table for loadable section headers. See vdso2c.h for why
* this exists.
*/
#ifndef __x86_64__
#error This hack is specific to the 64-bit vDSO
#endif
#include <linux/elf.h>
extern const __visible struct elf64_shdr vdso_fake_sections[];
const __visible struct elf64_shdr vdso_fake_sections[] = {
{
.sh_type = SHT_DYNSYM,
.sh_entsize = sizeof(Elf64_Sym),
}
};
const char fake_shstrtab[] __attribute__((section(".fake_shstrtab"))) =
".hash\0"
".dynsym\0"
".dynstr\0"
".gnu.version\0"
".gnu.version_d\0"
".dynamic\0"
".rodata\0"
".fake_shstrtab\0" /* Yay, self-referential code. */
".note\0"
".eh_frame_hdr\0"
".eh_frame\0"
".text";

Просмотреть файл

@ -6,6 +6,16 @@
* This script controls its layout.
*/
#if defined(BUILD_VDSO64)
# define SHDR_SIZE 64
#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32)
# define SHDR_SIZE 40
#else
# error unknown VDSO target
#endif
#define NUM_FAKE_SHDRS 13
SECTIONS
{
. = SIZEOF_HEADERS;
@ -18,35 +28,52 @@ SECTIONS
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : {
*(.rodata*)
*(.data*)
*(.sdata*)
*(.got.plt) *(.got)
*(.gnu.linkonce.d.*)
*(.bss*)
*(.dynbss*)
*(.gnu.linkonce.b.*)
/*
* Ideally this would live in a C file, but that won't
* work cleanly for x32 until we start building the x32
* C code using an x32 toolchain.
*/
VDSO_FAKE_SECTION_TABLE_START = .;
. = . + NUM_FAKE_SHDRS * SHDR_SIZE;
VDSO_FAKE_SECTION_TABLE_END = .;
} :text
.fake_shstrtab : { *(.fake_shstrtab) } :text
.note : { *(.note.*) } :text :note
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : { *(.rodata*) } :text
.data : {
*(.data*)
*(.sdata*)
*(.got.plt) *(.got)
*(.gnu.linkonce.d.*)
*(.bss*)
*(.dynbss*)
*(.gnu.linkonce.b.*)
}
.altinstructions : { *(.altinstructions) }
.altinstr_replacement : { *(.altinstr_replacement) }
/*
* Align the actual code well away from the non-instruction data.
* This is the best thing for the I-cache.
* Text is well-separated from actual data: there's plenty of
* stuff that isn't used at runtime in between.
*/
. = ALIGN(0x100);
.text : { *(.text*) } :text =0x90909090,
/*
* At the end so that eu-elflint stays happy when vdso2c strips
* these. A better implementation would avoid allocating space
* for these.
*/
.altinstructions : { *(.altinstructions) } :text
.altinstr_replacement : { *(.altinstr_replacement) } :text
/*
* The remainder of the vDSO consists of special pages that are
* shared between the kernel and userspace. It needs to be at the
@ -75,6 +102,7 @@ SECTIONS
/DISCARD/ : {
*(.discard)
*(.discard.*)
*(__bug_table)
}
}

Просмотреть файл

@ -6,6 +6,8 @@
* the DSO.
*/
#define BUILD_VDSO64
#include "vdso-layout.lds.S"
/*

Просмотреть файл

@ -23,6 +23,8 @@ enum {
sym_vvar_page,
sym_hpet_page,
sym_end_mapping,
sym_VDSO_FAKE_SECTION_TABLE_START,
sym_VDSO_FAKE_SECTION_TABLE_END,
};
const int special_pages[] = {
@ -30,15 +32,26 @@ const int special_pages[] = {
sym_hpet_page,
};
char const * const required_syms[] = {
[sym_vvar_page] = "vvar_page",
[sym_hpet_page] = "hpet_page",
[sym_end_mapping] = "end_mapping",
"VDSO32_NOTE_MASK",
"VDSO32_SYSENTER_RETURN",
"__kernel_vsyscall",
"__kernel_sigreturn",
"__kernel_rt_sigreturn",
struct vdso_sym {
const char *name;
bool export;
};
struct vdso_sym required_syms[] = {
[sym_vvar_page] = {"vvar_page", true},
[sym_hpet_page] = {"hpet_page", true},
[sym_end_mapping] = {"end_mapping", true},
[sym_VDSO_FAKE_SECTION_TABLE_START] = {
"VDSO_FAKE_SECTION_TABLE_START", false
},
[sym_VDSO_FAKE_SECTION_TABLE_END] = {
"VDSO_FAKE_SECTION_TABLE_END", false
},
{"VDSO32_NOTE_MASK", true},
{"VDSO32_SYSENTER_RETURN", true},
{"__kernel_vsyscall", true},
{"__kernel_sigreturn", true},
{"__kernel_rt_sigreturn", true},
};
__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
@ -83,37 +96,21 @@ extern void bad_put_le(void);
#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
#define BITS 64
#define GOFUNC go64
#define Elf_Ehdr Elf64_Ehdr
#define Elf_Shdr Elf64_Shdr
#define Elf_Phdr Elf64_Phdr
#define Elf_Sym Elf64_Sym
#define Elf_Dyn Elf64_Dyn
#include "vdso2c.h"
#undef BITS
#undef GOFUNC
#undef Elf_Ehdr
#undef Elf_Shdr
#undef Elf_Phdr
#undef Elf_Sym
#undef Elf_Dyn
#define BITSFUNC3(name, bits) name##bits
#define BITSFUNC2(name, bits) BITSFUNC3(name, bits)
#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS)
#define BITS 32
#define GOFUNC go32
#define Elf_Ehdr Elf32_Ehdr
#define Elf_Shdr Elf32_Shdr
#define Elf_Phdr Elf32_Phdr
#define Elf_Sym Elf32_Sym
#define Elf_Dyn Elf32_Dyn
#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
#define ELF_BITS 64
#include "vdso2c.h"
#undef BITS
#undef GOFUNC
#undef Elf_Ehdr
#undef Elf_Shdr
#undef Elf_Phdr
#undef Elf_Sym
#undef Elf_Dyn
#undef ELF_BITS
#define ELF_BITS 32
#include "vdso2c.h"
#undef ELF_BITS
static void go(void *addr, size_t len, FILE *outfile, const char *name)
{

Просмотреть файл

@ -4,23 +4,136 @@
* are built for 32-bit userspace.
*/
static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
/*
* We're writing a section table for a few reasons:
*
* The Go runtime had a couple of bugs: it would read the section
* table to try to figure out how many dynamic symbols there were (it
* shouldn't have looked at the section table at all) and, if there
* were no SHT_SYNDYM section table entry, it would use an
* uninitialized value for the number of symbols. An empty DYNSYM
* table would work, but I see no reason not to write a valid one (and
* keep full performance for old Go programs). This hack is only
* needed on x86_64.
*
* The bug was introduced on 2012-08-31 by:
* https://code.google.com/p/go/source/detail?r=56ea40aac72b
* and was fixed on 2014-06-13 by:
* https://code.google.com/p/go/source/detail?r=fc1cd5e12595
*
* Binutils has issues debugging the vDSO: it reads the section table to
* find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
* would break build-id if we removed the section table. Binutils
* also requires that shstrndx != 0. See:
* https://sourceware.org/bugzilla/show_bug.cgi?id=17064
*
* elfutils might not look for PT_NOTE if there is a section table at
* all. I don't know whether this matters for any practical purpose.
*
* For simplicity, rather than hacking up a partial section table, we
* just write a mostly complete one. We omit non-dynamic symbols,
* though, since they're rather large.
*
* Once binutils gets fixed, we might be able to drop this for all but
* the 64-bit vdso, since build-id only works in kernel RPMs, and
* systems that update to new enough kernel RPMs will likely update
* binutils in sync. build-id has never worked for home-built kernel
* RPMs without manual symlinking, and I suspect that no one ever does
* that.
*/
struct BITSFUNC(fake_sections)
{
ELF(Shdr) *table;
unsigned long table_offset;
int count, max_count;
int in_shstrndx;
unsigned long shstr_offset;
const char *shstrtab;
size_t shstrtab_len;
int out_shstrndx;
};
static unsigned int BITSFUNC(find_shname)(struct BITSFUNC(fake_sections) *out,
const char *name)
{
const char *outname = out->shstrtab;
while (outname - out->shstrtab < out->shstrtab_len) {
if (!strcmp(name, outname))
return (outname - out->shstrtab) + out->shstr_offset;
outname += strlen(outname) + 1;
}
if (*name)
printf("Warning: could not find output name \"%s\"\n", name);
return out->shstr_offset + out->shstrtab_len - 1; /* Use a null. */
}
static void BITSFUNC(init_sections)(struct BITSFUNC(fake_sections) *out)
{
if (!out->in_shstrndx)
fail("didn't find the fake shstrndx\n");
memset(out->table, 0, out->max_count * sizeof(ELF(Shdr)));
if (out->max_count < 1)
fail("we need at least two fake output sections\n");
PUT_LE(&out->table[0].sh_type, SHT_NULL);
PUT_LE(&out->table[0].sh_name, BITSFUNC(find_shname)(out, ""));
out->count = 1;
}
static void BITSFUNC(copy_section)(struct BITSFUNC(fake_sections) *out,
int in_idx, const ELF(Shdr) *in,
const char *name)
{
uint64_t flags = GET_LE(&in->sh_flags);
bool copy = flags & SHF_ALLOC &&
strcmp(name, ".altinstructions") &&
strcmp(name, ".altinstr_replacement");
if (!copy)
return;
if (out->count >= out->max_count)
fail("too many copied sections (max = %d)\n", out->max_count);
if (in_idx == out->in_shstrndx)
out->out_shstrndx = out->count;
out->table[out->count] = *in;
PUT_LE(&out->table[out->count].sh_name,
BITSFUNC(find_shname)(out, name));
/* elfutils requires that a strtab have the correct type. */
if (!strcmp(name, ".fake_shstrtab"))
PUT_LE(&out->table[out->count].sh_type, SHT_STRTAB);
out->count++;
}
static void BITSFUNC(go)(void *addr, size_t len,
FILE *outfile, const char *name)
{
int found_load = 0;
unsigned long load_size = -1; /* Work around bogus warning */
unsigned long data_size;
Elf_Ehdr *hdr = (Elf_Ehdr *)addr;
ELF(Ehdr) *hdr = (ELF(Ehdr) *)addr;
int i;
unsigned long j;
Elf_Shdr *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
*alt_sec = NULL;
Elf_Dyn *dyn = 0, *dyn_end = 0;
ELF(Dyn) *dyn = 0, *dyn_end = 0;
const char *secstrings;
uint64_t syms[NSYMS] = {};
uint64_t fake_sections_value = 0, fake_sections_size = 0;
struct BITSFUNC(fake_sections) fake_sections = {};
Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff));
ELF(Phdr) *pt = (ELF(Phdr) *)(addr + GET_LE(&hdr->e_phoff));
/* Walk the segment table. */
for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
@ -51,7 +164,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
for (i = 0; dyn + i < dyn_end &&
GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
if (tag == DT_REL || tag == DT_RELSZ ||
if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
tag == DT_RELENT || tag == DT_TEXTREL)
fail("vdso image contains dynamic relocations\n");
}
@ -61,7 +174,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
secstrings = addr + GET_LE(&secstrings_hdr->sh_offset);
for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
Elf_Shdr *sh = addr + GET_LE(&hdr->e_shoff) +
ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
GET_LE(&hdr->e_shentsize) * i;
if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
symtab_hdr = sh;
@ -82,29 +195,63 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
i++) {
int k;
Elf_Sym *sym = addr + GET_LE(&symtab_hdr->sh_offset) +
ELF(Sym) *sym = addr + GET_LE(&symtab_hdr->sh_offset) +
GET_LE(&symtab_hdr->sh_entsize) * i;
const char *name = addr + GET_LE(&strtab_hdr->sh_offset) +
GET_LE(&sym->st_name);
for (k = 0; k < NSYMS; k++) {
if (!strcmp(name, required_syms[k])) {
if (!strcmp(name, required_syms[k].name)) {
if (syms[k]) {
fail("duplicate symbol %s\n",
required_syms[k]);
required_syms[k].name);
}
syms[k] = GET_LE(&sym->st_value);
}
}
if (!strcmp(name, "vdso_fake_sections")) {
if (fake_sections_value)
fail("duplicate vdso_fake_sections\n");
fake_sections_value = GET_LE(&sym->st_value);
fake_sections_size = GET_LE(&sym->st_size);
if (!strcmp(name, "fake_shstrtab")) {
ELF(Shdr) *sh;
fake_sections.in_shstrndx = GET_LE(&sym->st_shndx);
fake_sections.shstrtab = addr + GET_LE(&sym->st_value);
fake_sections.shstrtab_len = GET_LE(&sym->st_size);
sh = addr + GET_LE(&hdr->e_shoff) +
GET_LE(&hdr->e_shentsize) *
fake_sections.in_shstrndx;
fake_sections.shstr_offset = GET_LE(&sym->st_value) -
GET_LE(&sh->sh_addr);
}
}
/* Build the output section table. */
if (!syms[sym_VDSO_FAKE_SECTION_TABLE_START] ||
!syms[sym_VDSO_FAKE_SECTION_TABLE_END])
fail("couldn't find fake section table\n");
if ((syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
syms[sym_VDSO_FAKE_SECTION_TABLE_START]) % sizeof(ELF(Shdr)))
fail("fake section table size isn't a multiple of sizeof(Shdr)\n");
fake_sections.table = addr + syms[sym_VDSO_FAKE_SECTION_TABLE_START];
fake_sections.table_offset = syms[sym_VDSO_FAKE_SECTION_TABLE_START];
fake_sections.max_count = (syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
syms[sym_VDSO_FAKE_SECTION_TABLE_START]) /
sizeof(ELF(Shdr));
BITSFUNC(init_sections)(&fake_sections);
for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
GET_LE(&hdr->e_shentsize) * i;
BITSFUNC(copy_section)(&fake_sections, i, sh,
secstrings + GET_LE(&sh->sh_name));
}
if (!fake_sections.out_shstrndx)
fail("didn't generate shstrndx?!?\n");
PUT_LE(&hdr->e_shoff, fake_sections.table_offset);
PUT_LE(&hdr->e_shentsize, sizeof(ELF(Shdr)));
PUT_LE(&hdr->e_shnum, fake_sections.count);
PUT_LE(&hdr->e_shstrndx, fake_sections.out_shstrndx);
/* Validate mapping addresses. */
for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
if (!syms[i])
@ -112,25 +259,17 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
if (syms[i] % 4096)
fail("%s must be a multiple of 4096\n",
required_syms[i]);
required_syms[i].name);
if (syms[i] < data_size)
fail("%s must be after the text mapping\n",
required_syms[i]);
required_syms[i].name);
if (syms[sym_end_mapping] < syms[i] + 4096)
fail("%s overruns end_mapping\n", required_syms[i]);
fail("%s overruns end_mapping\n",
required_syms[i].name);
}
if (syms[sym_end_mapping] % 4096)
fail("end_mapping must be a multiple of 4096\n");
/* Remove sections or use fakes */
if (fake_sections_size % sizeof(Elf_Shdr))
fail("vdso_fake_sections size is not a multiple of %ld\n",
(long)sizeof(Elf_Shdr));
PUT_LE(&hdr->e_shoff, fake_sections_value);
PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0);
PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr));
PUT_LE(&hdr->e_shstrndx, SHN_UNDEF);
if (!name) {
fwrite(addr, load_size, 1, outfile);
return;
@ -168,9 +307,9 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
(unsigned long)GET_LE(&alt_sec->sh_size));
}
for (i = 0; i < NSYMS; i++) {
if (syms[i])
if (required_syms[i].export && syms[i])
fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n",
required_syms[i], syms[i]);
required_syms[i].name, syms[i]);
}
fprintf(outfile, "};\n");
}

Просмотреть файл

@ -0,0 +1 @@
#include "../vdso-fakesections.c"

Просмотреть файл

@ -6,6 +6,8 @@
* the DSO.
*/
#define BUILD_VDSOX32
#include "vdso-layout.lds.S"
/*

Просмотреть файл

@ -40,7 +40,7 @@ struct pstore_read_data {
static inline u64 generic_id(unsigned long timestamp,
unsigned int part, int count)
{
return (timestamp * 100 + part) * 1000 + count;
return ((u64) timestamp * 100 + part) * 1000 + count;
}
static int efi_pstore_read_func(struct efivar_entry *entry, void *data)

Просмотреть файл

@ -353,10 +353,10 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
int depth, void *data)
{
struct param_info *info = data;
void *prop, *dest;
unsigned long len;
const void *prop;
void *dest;
u64 val;
int i;
int i, len;
if (depth != 1 ||
(strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))

Просмотреть файл

@ -63,7 +63,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
*/
prev = 0;
for (;;) {
const char *type, *name;
const char *type;
int len;
node = fdt_next_node(fdt, prev, NULL);