Merge branches 'x86-efi-for-linus', 'x86-gart-for-linus', 'x86-irq-for-linus' and 'x86-mce-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, efi: Ensure that the entirity of a region is mapped x86, efi: Pass a minimal map to SetVirtualAddressMap() x86, efi: Merge contiguous memory regions of the same type and attribute x86, efi: Consolidate EFI nx control x86, efi: Remove virtual-mode SetVirtualAddressMap call * 'x86-gart-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, gart: Don't enforce GART aperture lower-bound by alignment * 'x86-irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: Don't unmask disabled irqs when migrating them x86: Skip migrating IRQF_PER_CPU irqs in fixup_irqs() * 'x86-mce-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, mce: Drop the default decoding notifier x86, MCE: Do not taint when handling correctable errors
This commit is contained in:
Коммит
ac2941f59a
|
@ -90,6 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
|
|||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
extern int add_efi_memmap;
|
||||
extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
|
||||
extern void efi_memblock_x86_reserve_range(void);
|
||||
extern void efi_call_phys_prelog(void);
|
||||
extern void efi_call_phys_epilog(void);
|
||||
|
|
|
@ -142,8 +142,6 @@ static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
|
|||
static inline void enable_p5_mce(void) {}
|
||||
#endif
|
||||
|
||||
extern void (*x86_mce_decode_callback)(struct mce *m);
|
||||
|
||||
void mce_setup(struct mce *m);
|
||||
void mce_log(struct mce *m);
|
||||
DECLARE_PER_CPU(struct sys_device, mce_dev);
|
||||
|
|
|
@ -30,6 +30,22 @@
|
|||
#include <asm/amd_nb.h>
|
||||
#include <asm/x86_init.h>
|
||||
|
||||
/*
|
||||
* Using 512M as goal, in case kexec will load kernel_big
|
||||
* that will do the on-position decompress, and could overlap with
|
||||
* with the gart aperture that is used.
|
||||
* Sequence:
|
||||
* kernel_small
|
||||
* ==> kexec (with kdump trigger path or gart still enabled)
|
||||
* ==> kernel_small (gart area become e820_reserved)
|
||||
* ==> kexec (with kdump trigger path or gart still enabled)
|
||||
* ==> kerne_big (uncompressed size will be big than 64M or 128M)
|
||||
* So don't use 512M below as gart iommu, leave the space for kernel
|
||||
* code for safe.
|
||||
*/
|
||||
#define GART_MIN_ADDR (512ULL << 20)
|
||||
#define GART_MAX_ADDR (1ULL << 32)
|
||||
|
||||
int gart_iommu_aperture;
|
||||
int gart_iommu_aperture_disabled __initdata;
|
||||
int gart_iommu_aperture_allowed __initdata;
|
||||
|
@ -70,21 +86,9 @@ static u32 __init allocate_aperture(void)
|
|||
* memory. Unfortunately we cannot move it up because that would
|
||||
* make the IOMMU useless.
|
||||
*/
|
||||
/*
|
||||
* using 512M as goal, in case kexec will load kernel_big
|
||||
* that will do the on position decompress, and could overlap with
|
||||
* that position with gart that is used.
|
||||
* sequende:
|
||||
* kernel_small
|
||||
* ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
|
||||
* ==> kernel_small(gart area become e820_reserved)
|
||||
* ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
|
||||
* ==> kerne_big (uncompressed size will be big than 64M or 128M)
|
||||
* so don't use 512M below as gart iommu, leave the space for kernel
|
||||
* code for safe
|
||||
*/
|
||||
addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20);
|
||||
if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) {
|
||||
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
|
||||
aper_size, aper_size);
|
||||
if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) {
|
||||
printk(KERN_ERR
|
||||
"Cannot allocate aperture memory hole (%lx,%uK)\n",
|
||||
addr, aper_size>>10);
|
||||
|
|
|
@ -105,20 +105,6 @@ static int cpu_missing;
|
|||
ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
|
||||
EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
|
||||
|
||||
static int default_decode_mce(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
pr_emerg(HW_ERR "No human readable MCE decoding support on this CPU type.\n");
|
||||
pr_emerg(HW_ERR "Run the message through 'mcelog --ascii' to decode.\n");
|
||||
|
||||
return NOTIFY_STOP;
|
||||
}
|
||||
|
||||
static struct notifier_block mce_dec_nb = {
|
||||
.notifier_call = default_decode_mce,
|
||||
.priority = -1,
|
||||
};
|
||||
|
||||
/* MCA banks polled by the period polling timer for corrected events */
|
||||
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
|
||||
[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
|
||||
|
@ -212,6 +198,8 @@ void mce_log(struct mce *mce)
|
|||
|
||||
static void print_mce(struct mce *m)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
|
||||
m->extcpu, m->mcgstatus, m->bank, m->status);
|
||||
|
||||
|
@ -239,7 +227,11 @@ static void print_mce(struct mce *m)
|
|||
* Print out human-readable details about the MCE error,
|
||||
* (if the CPU has an implementation for that)
|
||||
*/
|
||||
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
|
||||
ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
|
||||
if (ret == NOTIFY_STOP)
|
||||
return;
|
||||
|
||||
pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
|
||||
}
|
||||
|
||||
#define PANIC_TIMEOUT 5 /* 5 seconds */
|
||||
|
@ -590,7 +582,6 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
|
|||
if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) {
|
||||
mce_log(&m);
|
||||
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m);
|
||||
add_taint(TAINT_MACHINE_CHECK);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1722,8 +1713,6 @@ __setup("mce", mcheck_enable);
|
|||
|
||||
int __init mcheck_init(void)
|
||||
{
|
||||
atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb);
|
||||
|
||||
mcheck_intel_therm_init();
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -187,8 +187,6 @@ static int therm_throt_process(bool new_event, int event, int level)
|
|||
this_cpu,
|
||||
level == CORE_LEVEL ? "Core" : "Package",
|
||||
state->count);
|
||||
|
||||
add_taint(TAINT_MACHINE_CHECK);
|
||||
return 1;
|
||||
}
|
||||
if (old_event) {
|
||||
|
@ -393,7 +391,6 @@ static void unexpected_thermal_interrupt(void)
|
|||
{
|
||||
printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
|
||||
smp_processor_id());
|
||||
add_taint(TAINT_MACHINE_CHECK);
|
||||
}
|
||||
|
||||
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
|
||||
|
|
|
@ -249,7 +249,7 @@ void fixup_irqs(void)
|
|||
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
affinity = data->affinity;
|
||||
if (!irq_has_action(irq) ||
|
||||
if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
|
||||
cpumask_subset(affinity, cpu_online_mask)) {
|
||||
raw_spin_unlock(&desc->lock);
|
||||
continue;
|
||||
|
@ -276,7 +276,8 @@ void fixup_irqs(void)
|
|||
else if (!(warned++))
|
||||
set_affinity = 0;
|
||||
|
||||
if (!irqd_can_move_in_process_context(data) && chip->irq_unmask)
|
||||
if (!irqd_can_move_in_process_context(data) &&
|
||||
!irqd_irq_disabled(data) && chip->irq_unmask)
|
||||
chip->irq_unmask(data);
|
||||
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
|
|
@ -145,17 +145,6 @@ static void virt_efi_reset_system(int reset_type,
|
|||
data_size, data);
|
||||
}
|
||||
|
||||
static efi_status_t virt_efi_set_virtual_address_map(
|
||||
unsigned long memory_map_size,
|
||||
unsigned long descriptor_size,
|
||||
u32 descriptor_version,
|
||||
efi_memory_desc_t *virtual_map)
|
||||
{
|
||||
return efi_call_virt4(set_virtual_address_map,
|
||||
memory_map_size, descriptor_size,
|
||||
descriptor_version, virtual_map);
|
||||
}
|
||||
|
||||
static efi_status_t __init phys_efi_set_virtual_address_map(
|
||||
unsigned long memory_map_size,
|
||||
unsigned long descriptor_size,
|
||||
|
@ -468,11 +457,25 @@ void __init efi_init(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
|
||||
{
|
||||
u64 addr, npages;
|
||||
|
||||
addr = md->virt_addr;
|
||||
npages = md->num_pages;
|
||||
|
||||
memrange_efi_to_native(&addr, &npages);
|
||||
|
||||
if (executable)
|
||||
set_memory_x(addr, npages);
|
||||
else
|
||||
set_memory_nx(addr, npages);
|
||||
}
|
||||
|
||||
static void __init runtime_code_page_mkexec(void)
|
||||
{
|
||||
efi_memory_desc_t *md;
|
||||
void *p;
|
||||
u64 addr, npages;
|
||||
|
||||
/* Make EFI runtime service code area executable */
|
||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||
|
@ -481,10 +484,7 @@ static void __init runtime_code_page_mkexec(void)
|
|||
if (md->type != EFI_RUNTIME_SERVICES_CODE)
|
||||
continue;
|
||||
|
||||
addr = md->virt_addr;
|
||||
npages = md->num_pages;
|
||||
memrange_efi_to_native(&addr, &npages);
|
||||
set_memory_x(addr, npages);
|
||||
efi_set_executable(md, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -498,13 +498,42 @@ static void __init runtime_code_page_mkexec(void)
|
|||
*/
|
||||
void __init efi_enter_virtual_mode(void)
|
||||
{
|
||||
efi_memory_desc_t *md;
|
||||
efi_memory_desc_t *md, *prev_md = NULL;
|
||||
efi_status_t status;
|
||||
unsigned long size;
|
||||
u64 end, systab, addr, npages, end_pfn;
|
||||
void *p, *va;
|
||||
void *p, *va, *new_memmap = NULL;
|
||||
int count = 0;
|
||||
|
||||
efi.systab = NULL;
|
||||
|
||||
/* Merge contiguous regions of the same type and attribute */
|
||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||
u64 prev_size;
|
||||
md = p;
|
||||
|
||||
if (!prev_md) {
|
||||
prev_md = md;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (prev_md->type != md->type ||
|
||||
prev_md->attribute != md->attribute) {
|
||||
prev_md = md;
|
||||
continue;
|
||||
}
|
||||
|
||||
prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
|
||||
|
||||
if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
|
||||
prev_md->num_pages += md->num_pages;
|
||||
md->type = EFI_RESERVED_TYPE;
|
||||
md->attribute = 0;
|
||||
continue;
|
||||
}
|
||||
prev_md = md;
|
||||
}
|
||||
|
||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||
md = p;
|
||||
if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
||||
|
@ -541,15 +570,21 @@ void __init efi_enter_virtual_mode(void)
|
|||
systab += md->virt_addr - md->phys_addr;
|
||||
efi.systab = (efi_system_table_t *) (unsigned long) systab;
|
||||
}
|
||||
new_memmap = krealloc(new_memmap,
|
||||
(count + 1) * memmap.desc_size,
|
||||
GFP_KERNEL);
|
||||
memcpy(new_memmap + (count * memmap.desc_size), md,
|
||||
memmap.desc_size);
|
||||
count++;
|
||||
}
|
||||
|
||||
BUG_ON(!efi.systab);
|
||||
|
||||
status = phys_efi_set_virtual_address_map(
|
||||
memmap.desc_size * memmap.nr_map,
|
||||
memmap.desc_size * count,
|
||||
memmap.desc_size,
|
||||
memmap.desc_version,
|
||||
memmap.phys_map);
|
||||
(efi_memory_desc_t *)__pa(new_memmap));
|
||||
|
||||
if (status != EFI_SUCCESS) {
|
||||
printk(KERN_ALERT "Unable to switch EFI into virtual mode "
|
||||
|
@ -572,11 +607,12 @@ void __init efi_enter_virtual_mode(void)
|
|||
efi.set_variable = virt_efi_set_variable;
|
||||
efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
|
||||
efi.reset_system = virt_efi_reset_system;
|
||||
efi.set_virtual_address_map = virt_efi_set_virtual_address_map;
|
||||
efi.set_virtual_address_map = NULL;
|
||||
if (__supported_pte_mask & _PAGE_NX)
|
||||
runtime_code_page_mkexec();
|
||||
early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
|
||||
memmap.map = NULL;
|
||||
kfree(new_memmap);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -41,22 +41,7 @@
|
|||
static pgd_t save_pgd __initdata;
|
||||
static unsigned long efi_flags __initdata;
|
||||
|
||||
static void __init early_mapping_set_exec(unsigned long start,
|
||||
unsigned long end,
|
||||
int executable)
|
||||
{
|
||||
unsigned long num_pages;
|
||||
|
||||
start &= PMD_MASK;
|
||||
end = (end + PMD_SIZE - 1) & PMD_MASK;
|
||||
num_pages = (end - start) >> PAGE_SHIFT;
|
||||
if (executable)
|
||||
set_memory_x((unsigned long)__va(start), num_pages);
|
||||
else
|
||||
set_memory_nx((unsigned long)__va(start), num_pages);
|
||||
}
|
||||
|
||||
static void __init early_runtime_code_mapping_set_exec(int executable)
|
||||
static void __init early_code_mapping_set_exec(int executable)
|
||||
{
|
||||
efi_memory_desc_t *md;
|
||||
void *p;
|
||||
|
@ -67,11 +52,8 @@ static void __init early_runtime_code_mapping_set_exec(int executable)
|
|||
/* Make EFI runtime service code area executable */
|
||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||
md = p;
|
||||
if (md->type == EFI_RUNTIME_SERVICES_CODE) {
|
||||
unsigned long end;
|
||||
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
|
||||
early_mapping_set_exec(md->phys_addr, end, executable);
|
||||
}
|
||||
if (md->type == EFI_RUNTIME_SERVICES_CODE)
|
||||
efi_set_executable(md, executable);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,7 +61,7 @@ void __init efi_call_phys_prelog(void)
|
|||
{
|
||||
unsigned long vaddress;
|
||||
|
||||
early_runtime_code_mapping_set_exec(1);
|
||||
early_code_mapping_set_exec(1);
|
||||
local_irq_save(efi_flags);
|
||||
vaddress = (unsigned long)__va(0x0UL);
|
||||
save_pgd = *pgd_offset_k(0x0UL);
|
||||
|
@ -95,7 +77,7 @@ void __init efi_call_phys_epilog(void)
|
|||
set_pgd(pgd_offset_k(0x0UL), save_pgd);
|
||||
__flush_tlb_all();
|
||||
local_irq_restore(efi_flags);
|
||||
early_runtime_code_mapping_set_exec(0);
|
||||
early_code_mapping_set_exec(0);
|
||||
}
|
||||
|
||||
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
|
||||
|
@ -107,8 +89,10 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
|
|||
return ioremap(phys_addr, size);
|
||||
|
||||
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
|
||||
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
|
||||
return NULL;
|
||||
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
|
||||
unsigned long top = last_map_pfn << PAGE_SHIFT;
|
||||
efi_ioremap(top, size - (top - phys_addr), type);
|
||||
}
|
||||
|
||||
return (void __iomem *)__va(phys_addr);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче