Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, mce: Clean up thermal init by introducing intel_thermal_supported()
  x86, mce: Thermal monitoring depends on APIC being enabled
  x86: Gart: fix breakage due to IOMMU initialization cleanup
  x86: Move swiotlb initialization before dma32_free_bootmem
  x86: Fix build warning in arch/x86/mm/mmio-mod.c
  x86: Remove usedac in feature-removal-schedule.txt
  x86: Fix duplicated UV BAU interrupt vector
  nvram: Fix write beyond end condition; prove to gcc copy is safe
  mm: Adjust do_pages_stat() so gcc can see copy_from_user() is safe
  x86: Limit the number of processor bootup messages
  x86: Remove enabling x2apic message for every CPU
  doc: Add documentation for bootloader_{type,version}
  x86, msr: Add support for non-contiguous cpumasks
  x86: Use find_e820() instead of hard coded trampoline address
  x86, AMD: Fix stale cpuid4_info shared_map data in shared_cpu_map cpumasks

Trivial percpu-naming-introduced conflicts in arch/x86/kernel/cpu/intel_cacheinfo.c
This commit is contained in:
Linus Torvalds 2009-12-14 12:36:46 -08:00
Родитель fb1beb29b5 70fe440718
Коммит 75b08038ce
27 изменённых файлов: 192 добавлений и 119 удалений

Просмотреть файл

@ -291,13 +291,6 @@ Who: Michael Buesch <mb@bu3sch.de>
---------------------------
What: usedac i386 kernel parameter
When: 2.6.27
Why: replaced by allowdac and no dac combination
Who: Glauber Costa <gcosta@redhat.com>
---------------------------
What: print_fn_descriptor_symbol()
When: October 2009
Why: The %pF vsprintf format provides the same functionality in a

Просмотреть файл

@ -19,6 +19,8 @@ Currently, these files might (depending on your configuration)
show up in /proc/sys/kernel:
- acpi_video_flags
- acct
- bootloader_type [ X86 only ]
- bootloader_version [ X86 only ]
- callhome [ S390 only ]
- auto_msgmni
- core_pattern
@ -93,6 +95,35 @@ valid for 30 seconds.
==============================================================
bootloader_type:
x86 bootloader identification
This gives the bootloader type number as indicated by the bootloader,
shifted left by 4, and OR'd with the low four bits of the bootloader
version. The reason for this encoding is that this used to match the
type_of_loader field in the kernel header; the encoding is kept for
backwards compatibility. That is, if the full bootloader type number
is 0x15 and the full version number is 0x234, this file will contain
the value 340 = 0x154.
See the type_of_loader and ext_loader_type fields in
Documentation/x86/boot.txt for additional information.
==============================================================
bootloader_version:
x86 bootloader version
The complete bootloader version number. In the example above, this
file will contain the value 564 = 0x234.
See the type_of_loader and ext_loader_ver fields in
Documentation/x86/boot.txt for additional information.
==============================================================
callhome:
Controls the kernel's callhome behavior in case of a kernel panic.

Просмотреть файл

@ -113,7 +113,7 @@
*/
#define LOCAL_PENDING_VECTOR 0xec
#define UV_BAU_MESSAGE 0xec
#define UV_BAU_MESSAGE 0xea
/*
* Self IPI vector for machine checks

Просмотреть файл

@ -244,6 +244,9 @@ do { \
#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
struct msr *msrs_alloc(void);
void msrs_free(struct msr *msrs);
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);

Просмотреть файл

@ -16,7 +16,6 @@ extern unsigned long initial_code;
extern unsigned long initial_gs;
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
#define TRAMPOLINE_BASE 0x6000
extern unsigned long setup_trampoline(void);
extern void __init reserve_trampoline_memory(void);

Просмотреть файл

@ -280,7 +280,8 @@ void __init early_gart_iommu_check(void)
* or BIOS forget to put that in reserved.
* try to update e820 to make that region as reserved.
*/
int i, fix, slot;
u32 agp_aper_base = 0, agp_aper_order = 0;
int i, fix, slot, valid_agp = 0;
u32 ctl;
u32 aper_size = 0, aper_order = 0, last_aper_order = 0;
u64 aper_base = 0, last_aper_base = 0;
@ -290,6 +291,8 @@ void __init early_gart_iommu_check(void)
return;
/* This is mostly duplicate of iommu_hole_init */
agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp);
fix = 0;
for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
int bus;
@ -342,10 +345,10 @@ void __init early_gart_iommu_check(void)
}
}
if (!fix)
if (valid_agp)
return;
/* different nodes have different setting, disable them all at first*/
/* disable them all at first */
for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
int bus;
int dev_base, dev_limit;
@ -458,8 +461,6 @@ out:
if (aper_alloc) {
/* Got the aperture from the AGP bridge */
} else if (!valid_agp) {
/* Do nothing */
} else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
force_iommu ||
valid_agp ||

Просмотреть файл

@ -1341,7 +1341,7 @@ void enable_x2apic(void)
rdmsr(MSR_IA32_APICBASE, msr, msr2);
if (!(msr & X2APIC_ENABLE)) {
pr_info("Enabling x2apic\n");
printk_once(KERN_INFO "Enabling x2apic\n");
wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
}
}

Просмотреть файл

@ -74,6 +74,7 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
static bool printed;
if (c->cpuid_level < 0xb)
return;
@ -127,12 +128,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
c->phys_proc_id);
if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id);
if (!printed) {
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
c->phys_proc_id);
if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id);
printed = 1;
}
return;
#endif
}

Просмотреть файл

@ -375,8 +375,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
node = nearby_node(apicid);
}
numa_set_node(cpu, node);
printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
#endif
}

Просмотреть файл

@ -427,6 +427,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_HT
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;
static bool printed;
if (!cpu_has(c, X86_FEATURE_HT))
return;
@ -442,7 +443,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
goto out;
}
@ -469,11 +470,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
((1 << core_bits) - 1);
out:
if ((c->x86_max_cores * smp_num_siblings) > 1) {
if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
c->phys_proc_id);
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id);
printed = 1;
}
#endif
}
@ -1115,7 +1117,7 @@ void __cpuinit cpu_init(void)
if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
panic("CPU#%d already initialized!\n", cpu);
printk(KERN_INFO "Initializing CPU#%d\n", cpu);
pr_debug("Initializing CPU#%d\n", cpu);
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);

Просмотреть файл

@ -270,8 +270,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
node = cpu_to_node(cpu);
}
numa_set_node(cpu, node);
printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
#endif
}

Просмотреть файл

@ -507,18 +507,19 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing;
int index_msb, i;
int index_msb, i, sibling;
struct cpuinfo_x86 *c = &cpu_data(cpu);
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
struct cpuinfo_x86 *d;
for_each_online_cpu(i) {
for_each_cpu(i, c->llc_shared_map) {
if (!per_cpu(ici_cpuid4_info, i))
continue;
d = &cpu_data(i);
this_leaf = CPUID4_INFO_IDX(i, index);
cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
d->llc_shared_map);
for_each_cpu(sibling, c->llc_shared_map) {
if (!cpu_online(sibling))
continue;
set_bit(sibling, this_leaf->shared_cpu_map);
}
}
return;
}

Просмотреть файл

@ -256,6 +256,16 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
ack_APIC_irq();
}
/* Thermal monitoring depends on APIC, ACPI and clock modulation */
static int intel_thermal_supported(struct cpuinfo_x86 *c)
{
if (!cpu_has_apic)
return 0;
if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
return 0;
return 1;
}
void __init mcheck_intel_therm_init(void)
{
/*
@ -263,8 +273,7 @@ void __init mcheck_intel_therm_init(void)
* LVT value on BSP and use that value to restore APs' thermal LVT
* entry BIOS programmed later
*/
if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) &&
cpu_has(&boot_cpu_data, X86_FEATURE_ACC))
if (intel_thermal_supported(&boot_cpu_data))
lvtthmr_init = apic_read(APIC_LVTTHMR);
}
@ -274,8 +283,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
int tm2 = 0;
u32 l, h;
/* Thermal monitoring depends on ACPI and clock modulation*/
if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
if (!intel_thermal_supported(c))
return;
/*
@ -339,8 +347,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
cpu, tm2 ? "TM2" : "TM1");
printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
tm2 ? "TM2" : "TM1");
/* enable thermal throttle processing */
atomic_set(&therm_throt_en, 1);

Просмотреть файл

@ -732,7 +732,16 @@ struct early_res {
char overlap_ok;
};
static struct early_res early_res[MAX_EARLY_RES] __initdata = {
{ 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
{ 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
#ifdef CONFIG_X86_32
/*
* But first pinch a few for the stack/trampoline stuff
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
{ PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 },
#endif
{}
};

Просмотреть файл

@ -29,8 +29,6 @@ static void __init i386_default_early_setup(void)
void __init i386_start_kernel(void)
{
reserve_trampoline_memory();
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD

Просмотреть файл

@ -98,8 +98,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
{
copy_bootdata(__va(real_mode_data));
reserve_trampoline_memory();
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD

Просмотреть файл

@ -945,9 +945,6 @@ void __init early_reserve_e820_mpc_new(void)
{
if (enable_update_mptable && alloc_mptable) {
u64 startt = 0;
#ifdef CONFIG_X86_TRAMPOLINE
startt = TRAMPOLINE_BASE;
#endif
mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4);
}
}

Просмотреть файл

@ -120,11 +120,14 @@ static void __init dma32_free_bootmem(void)
void __init pci_iommu_alloc(void)
{
int use_swiotlb;
use_swiotlb = pci_swiotlb_init();
#ifdef CONFIG_X86_64
/* free the range so iommu could get some range less than 4G */
dma32_free_bootmem();
#endif
if (pci_swiotlb_init())
if (use_swiotlb)
return;
gart_iommu_hole_init();

Просмотреть файл

@ -710,7 +710,8 @@ static void gart_iommu_shutdown(void)
struct pci_dev *dev;
int i;
if (no_agp)
/* don't shutdown it if there is AGP installed */
if (!no_agp)
return;
for (i = 0; i < num_k8_northbridges; i++) {

Просмотреть файл

@ -73,6 +73,7 @@
#include <asm/mtrr.h>
#include <asm/apic.h>
#include <asm/trampoline.h>
#include <asm/e820.h>
#include <asm/mpspec.h>
#include <asm/setup.h>
@ -875,6 +876,13 @@ void __init setup_arch(char **cmdline_p)
reserve_brk();
/*
* Find and reserve possible boot-time SMP configuration:
*/
find_smp_config();
reserve_trampoline_memory();
#ifdef CONFIG_ACPI_SLEEP
/*
* Reserve low memory region for sleep support.
@ -921,11 +929,6 @@ void __init setup_arch(char **cmdline_p)
early_acpi_boot_init();
/*
* Find and reserve possible boot-time SMP configuration:
*/
find_smp_config();
#ifdef CONFIG_ACPI_NUMA
/*
* Parse SRAT to discover nodes.

Просмотреть файл

@ -671,6 +671,26 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
complete(&c_idle->done);
}
/* reduce the number of lines printed when booting a large cpu count system */
static void __cpuinit announce_cpu(int cpu, int apicid)
{
static int current_node = -1;
int node = cpu_to_node(cpu);
if (system_state == SYSTEM_BOOTING) {
if (node != current_node) {
if (current_node > (-1))
pr_cont(" Ok.\n");
current_node = node;
pr_info("Booting Node %3d, Processors ", node);
}
pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
return;
} else
pr_info("Booting Node %d Processor %d APIC 0x%x\n",
node, cpu, apicid);
}
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@ -737,9 +757,8 @@ do_rest:
/* start_ip had better be page-aligned! */
start_ip = setup_trampoline();
/* So we see what's up */
printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
cpu, apicid, start_ip);
/* So we see what's up */
announce_cpu(cpu, apicid);
/*
* This grunge runs the startup process for
@ -788,21 +807,17 @@ do_rest:
udelay(100);
}
if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
/* number CPUs logically, starting from 1 (BSP is 0) */
pr_debug("OK.\n");
printk(KERN_INFO "CPU%d: ", cpu);
print_cpu_info(&cpu_data(cpu));
pr_debug("CPU has booted.\n");
} else {
if (cpumask_test_cpu(cpu, cpu_callin_mask))
pr_debug("CPU%d: has booted.\n", cpu);
else {
boot_error = 1;
if (*((volatile unsigned char *)trampoline_base)
== 0xA5)
/* trampoline started but...? */
printk(KERN_ERR "Stuck ??\n");
pr_err("CPU%d: Stuck ??\n", cpu);
else
/* trampoline code not run */
printk(KERN_ERR "Not responding.\n");
pr_err("CPU%d: Not responding.\n", cpu);
if (apic->inquire_remote_apic)
apic->inquire_remote_apic(apicid);
}
@ -1293,14 +1308,16 @@ void native_cpu_die(unsigned int cpu)
for (i = 0; i < 10; i++) {
/* They ack this in play_dead by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
printk(KERN_INFO "CPU %d is now offline\n", cpu);
if (system_state == SYSTEM_RUNNING)
pr_info("CPU %u is now offline\n", cpu);
if (1 == num_online_cpus())
alternatives_smp_switch(0);
return;
}
msleep(100);
}
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
pr_err("CPU %u didn't die...\n", cpu);
}
void play_dead_common(void)

Просмотреть файл

@ -12,21 +12,19 @@
#endif
/* ready for x86_64 and x86 */
unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
unsigned char *__trampinitdata trampoline_base;
void __init reserve_trampoline_memory(void)
{
#ifdef CONFIG_X86_32
/*
* But first pinch a few for the stack/trampoline stuff
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
#endif
unsigned long mem;
/* Has to be in very low memory so we can execute real-mode AP code. */
reserve_early(TRAMPOLINE_BASE, TRAMPOLINE_BASE + TRAMPOLINE_SIZE,
"TRAMPOLINE");
mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE);
if (mem == -1L)
panic("Cannot allocate trampoline\n");
trampoline_base = __va(mem);
reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE");
}
/*

Просмотреть файл

@ -7,7 +7,6 @@ struct msr_info {
u32 msr_no;
struct msr reg;
struct msr *msrs;
int off;
int err;
};
@ -18,7 +17,7 @@ static void __rdmsr_on_cpu(void *info)
int this_cpu = raw_smp_processor_id();
if (rv->msrs)
reg = &rv->msrs[this_cpu - rv->off];
reg = per_cpu_ptr(rv->msrs, this_cpu);
else
reg = &rv->reg;
@ -32,7 +31,7 @@ static void __wrmsr_on_cpu(void *info)
int this_cpu = raw_smp_processor_id();
if (rv->msrs)
reg = &rv->msrs[this_cpu - rv->off];
reg = per_cpu_ptr(rv->msrs, this_cpu);
else
reg = &rv->reg;
@ -80,7 +79,6 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
memset(&rv, 0, sizeof(rv));
rv.off = cpumask_first(mask);
rv.msrs = msrs;
rv.msr_no = msr_no;
@ -120,6 +118,26 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
}
EXPORT_SYMBOL(wrmsr_on_cpus);
struct msr *msrs_alloc(void)
{
struct msr *msrs = NULL;
msrs = alloc_percpu(struct msr);
if (!msrs) {
pr_warning("%s: error allocating msrs\n", __func__);
return NULL;
}
return msrs;
}
EXPORT_SYMBOL(msrs_alloc);
void msrs_free(struct msr *msrs)
{
free_percpu(msrs);
}
EXPORT_SYMBOL(msrs_free);
/* These "safe" variants are slower and should be used when the target MSR
may not actually exist. */
static void __rdmsr_safe_on_cpu(void *info)

Просмотреть файл

@ -20,7 +20,7 @@
* Derived from the read-mod example from relay-examples by Tom Zanussi.
*/
#define pr_fmt(fmt) "mmiotrace: "
#define pr_fmt(fmt) "mmiotrace: " fmt
#define DEBUG 1

Просмотреть файл

@ -264,10 +264,16 @@ static ssize_t nvram_write(struct file *file, const char __user *buf,
unsigned char contents[NVRAM_BYTES];
unsigned i = *ppos;
unsigned char *tmp;
int len;
len = (NVRAM_BYTES - i) < count ? (NVRAM_BYTES - i) : count;
if (copy_from_user(contents, buf, len))
if (i >= NVRAM_BYTES)
return 0; /* Past EOF */
if (count > NVRAM_BYTES - i)
count = NVRAM_BYTES - i;
if (count > NVRAM_BYTES)
return -EFAULT; /* Can't happen, but prove it to gcc */
if (copy_from_user(contents, buf, count))
return -EFAULT;
spin_lock_irq(&rtc_lock);
@ -275,7 +281,7 @@ static ssize_t nvram_write(struct file *file, const char __user *buf,
if (!__nvram_check_checksum())
goto checksum_err;
for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
for (tmp = contents; count--; ++i, ++tmp)
__nvram_write_byte(*tmp, i);
__nvram_set_checksum();

Просмотреть файл

@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 0644);
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);
static struct msr *msrs;
/* Lookup table for all possible MC control instances */
struct amd64_pvt;
static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
@ -2495,8 +2497,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
static bool amd64_nb_mce_bank_enabled_on_node(int nid)
{
cpumask_var_t mask;
struct msr *msrs;
int cpu, nbe, idx = 0;
int cpu, nbe;
bool ret = false;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
@ -2507,32 +2508,22 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
get_cpus_on_this_dct_cpumask(mask, nid);
msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
if (!msrs) {
amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
__func__);
free_cpumask_var(mask);
return false;
}
rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, mask) {
nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
struct msr *reg = per_cpu_ptr(msrs, cpu);
nbe = reg->l & K8_MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
cpu, msrs[idx].q,
cpu, reg->q,
(nbe ? "enabled" : "disabled"));
if (!nbe)
goto out;
idx++;
}
ret = true;
out:
kfree(msrs);
free_cpumask_var(mask);
return ret;
}
@ -2540,8 +2531,7 @@ out:
static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
{
cpumask_var_t cmask;
struct msr *msrs = NULL;
int cpu, idx = 0;
int cpu;
if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
@ -2551,34 +2541,27 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
if (!msrs) {
amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
__func__);
return -ENOMEM;
}
rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, cmask) {
struct msr *reg = per_cpu_ptr(msrs, cpu);
if (on) {
if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
if (reg->l & K8_MSR_MCGCTL_NBE)
pvt->flags.ecc_report = 1;
msrs[idx].l |= K8_MSR_MCGCTL_NBE;
reg->l |= K8_MSR_MCGCTL_NBE;
} else {
/*
* Turn off ECC reporting only when it was off before
*/
if (!pvt->flags.ecc_report)
msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
reg->l &= ~K8_MSR_MCGCTL_NBE;
}
idx++;
}
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
kfree(msrs);
free_cpumask_var(cmask);
return 0;
@ -3036,6 +3019,8 @@ static int __init amd64_edac_init(void)
if (cache_k8_northbridges() < 0)
return err;
msrs = msrs_alloc();
err = pci_register_driver(&amd64_pci_driver);
if (err)
return err;
@ -3071,6 +3056,9 @@ static void __exit amd64_edac_exit(void)
edac_pci_release_generic_ctl(amd64_ctl_pci);
pci_unregister_driver(&amd64_pci_driver);
msrs_free(msrs);
msrs = NULL;
}
module_init(amd64_edac_init);

Просмотреть файл

@ -1044,7 +1044,7 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
int err;
for (i = 0; i < nr_pages; i += chunk_nr) {
if (chunk_nr + i > nr_pages)
if (chunk_nr > nr_pages - i)
chunk_nr = nr_pages - i;
err = copy_from_user(chunk_pages, &pages[i],