Merge branch 'perf/core' into oprofile/master

Merge reason: Resolve conflicts with Don's NMI rework:

    commit 9c48f1c629
    Author: Don Zickus <dzickus@redhat.com>
    Date:   Fri Sep 30 15:06:21 2011 -0400
    x86, nmi: Wire up NMI handlers to new routines

Conflicts:
	arch/x86/oprofile/nmi_timer_int.c

Signed-off-by: Robert Richter <robert.richter@amd.com>
This commit is contained in:
Robert Richter 2011-11-08 15:52:15 +01:00
Родитель dcfce4a095 9c48f1c629
Коммит de346b6949
210 изменённых файлов: 2139 добавлений и 1496 удалений

Просмотреть файл

@ -35,13 +35,6 @@ the Out-Of-Spec bit. Following table summarizes the exported sysfs files:
All Sysfs entries are named with their core_id (represented here by 'X').
tempX_input - Core temperature (in millidegrees Celsius).
tempX_max - All cooling devices should be turned on (on Core2).
Initialized with IA32_THERM_INTERRUPT. When the CPU
temperature reaches this temperature, an interrupt is
generated and tempX_max_alarm is set.
tempX_max_hyst - If the CPU temperature falls below than temperature,
an interrupt is generated and tempX_max_alarm is reset.
tempX_max_alarm - Set if the temperature reaches or exceeds tempX_max.
Reset if the temperature drops to or below tempX_max_hyst.
tempX_crit - Maximum junction temperature (in millidegrees Celsius).
tempX_crit_alarm - Set when Out-of-spec bit is set, never clears.
Correct CPU operation is no longer guaranteed.
@ -49,9 +42,10 @@ tempX_label - Contains string "Core X", where X is processor
number. For Package temp, this will be "Physical id Y",
where Y is the package number.
The TjMax temperature is set to 85 degrees C if undocumented model specific
register (UMSR) 0xee has bit 30 set. If not the TjMax is 100 degrees C as
(sometimes) documented in processor datasheet.
On CPU models which support it, TjMax is read from a model-specific register.
On other models, it is set to an arbitrary value based on weak heuristics.
If these heuristics don't work for you, you can pass the correct TjMax value
as a module parameter (tjmax).
Appendix A. Known TjMax lists (TBD):
Some information comes from ark.intel.com

Просмотреть файл

@ -1042,7 +1042,7 @@ conf/interface/*:
The functional behaviour for certain settings is different
depending on whether local forwarding is enabled or not.
accept_ra - BOOLEAN
accept_ra - INTEGER
Accept Router Advertisements; autoconfigure using them.
Possible values are:
@ -1106,7 +1106,7 @@ dad_transmits - INTEGER
The amount of Duplicate Address Detection probes to send.
Default: 1
forwarding - BOOLEAN
forwarding - INTEGER
Configure interface-specific Host/Router behaviour.
Note: It is recommended to have the same setting on all

Просмотреть файл

@ -243,7 +243,7 @@ configured. The number of entries in the global flow table is set through:
The number of entries in the per-queue flow table are set through:
/sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt
/sys/class/net/<dev>/queues/rx-<n>/rps_flow_cnt
== Suggested Configuration

Просмотреть файл

@ -123,10 +123,11 @@ be automatically shutdown if it's set to "never".
khugepaged runs usually at low frequency so while one may not want to
invoke defrag algorithms synchronously during the page faults, it
should be worth invoking defrag at least in khugepaged. However it's
also possible to disable defrag in khugepaged:
also possible to disable defrag in khugepaged by writing 0 or enable
defrag in khugepaged by writing 1:
echo yes >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
echo no >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
You can also control how many pages khugepaged should scan at each
pass:

Просмотреть файл

@ -6374,7 +6374,6 @@ S: Supported
F: arch/arm/mach-tegra
TEHUTI ETHERNET DRIVER
M: Alexander Indenbaum <baum@tehutinetworks.net>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
S: Supported

Просмотреть файл

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 1
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION = -rc9
NAME = "Divemaster Edition"
# *DOCUMENTATION*

Просмотреть файл

@ -1283,6 +1283,20 @@ config ARM_ERRATA_364296
processor into full low interrupt latency mode. ARM11MPCore
is not affected.
config ARM_ERRATA_764369
bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
depends on CPU_V7 && SMP
help
This option enables the workaround for erratum 764369
affecting Cortex-A9 MPCore with two or more processors (all
current revisions). Under certain timing circumstances, a data
cache line maintenance operation by MVA targeting an Inner
Shareable memory region may fail to proceed up to either the
Point of Coherency or to the Point of Unification of the
system. This workaround adds a DSB instruction before the
relevant cache maintenance functions and sets a specific bit
in the diagnostic control register of the SCU.
endmenu
source "arch/arm/common/Kconfig"

Просмотреть файл

@ -25,17 +25,17 @@
#ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \
__asm__ __volatile__( \
"1: ldrex %1, [%2]\n" \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
"2: strex %1, %0, [%2]\n" \
" teq %1, #0\n" \
"2: strex %2, %0, [%3]\n" \
" teq %2, #0\n" \
" bne 1b\n" \
" mov %0, #0\n" \
__futex_atomic_ex_table("%4") \
: "=&r" (ret), "=&r" (oldval) \
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <linux/preempt.h>
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
__asm__ __volatile__( \
"1: " T(ldr) " %1, [%2]\n" \
"1: " T(ldr) " %1, [%3]\n" \
" " insn "\n" \
"2: " T(str) " %0, [%2]\n" \
"2: " T(str) " %0, [%3]\n" \
" mov %0, #0\n" \
__futex_atomic_ex_table("%4") \
: "=&r" (ret), "=&r" (oldval) \
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
int oldval = 0, ret, tmp;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
__futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
__futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg);
__futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg);
__futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg);
__futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
default:
ret = -ENOSYS;

Просмотреть файл

@ -478,8 +478,8 @@
/*
* Unimplemented (or alternatively implemented) syscalls
*/
#define __IGNORE_fadvise64_64 1
#define __IGNORE_migrate_pages 1
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_UNISTD_H */

Просмотреть файл

@ -13,6 +13,7 @@
#include <asm/smp_scu.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#define SCU_CTRL 0x00
#define SCU_CONFIG 0x04
@ -37,6 +38,15 @@ void __init scu_enable(void __iomem *scu_base)
{
u32 scu_ctrl;
#ifdef CONFIG_ARM_ERRATA_764369
/* Cortex-A9 only */
if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
scu_ctrl = __raw_readl(scu_base + 0x30);
if (!(scu_ctrl & 1))
__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
}
#endif
scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
/* already enabled? */
if (scu_ctrl & 1)

Просмотреть файл

@ -23,8 +23,10 @@
#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
#define ARM_EXIT_KEEP(x) x
#define ARM_EXIT_DISCARD(x)
#else
#define ARM_EXIT_KEEP(x)
#define ARM_EXIT_DISCARD(x) x
#endif
OUTPUT_ARCH(arm)
@ -39,6 +41,11 @@ jiffies = jiffies_64 + 4;
SECTIONS
{
/*
* XXX: The linker does not define how output sections are
* assigned to input sections when there are multiple statements
* matching the same input section name. There is no documented
* order of matching.
*
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
*/
@ -47,6 +54,9 @@ SECTIONS
*(.ARM.extab.exit.text)
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
ARM_EXIT_DISCARD(EXIT_TEXT)
ARM_EXIT_DISCARD(EXIT_DATA)
EXIT_CALL
#ifndef CONFIG_HOTPLUG
*(.ARM.exidx.devexit.text)
*(.ARM.extab.devexit.text)
@ -58,6 +68,8 @@ SECTIONS
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
*(.discard)
*(.discard.*)
}
#ifdef CONFIG_XIP_KERNEL
@ -279,9 +291,6 @@ SECTIONS
STABS_DEBUG
.comment 0 : { *(.comment) }
/* Default discards */
DISCARDS
}
/*

Просмотреть файл

@ -899,8 +899,7 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 28, .size = 4 },
}, {
.clk = {
.name = "sclk_cam",
.devname = "exynos4-fimc.0",
.name = "sclk_cam0",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 16),
},
@ -909,8 +908,7 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 16, .size = 4 },
}, {
.clk = {
.name = "sclk_cam",
.devname = "exynos4-fimc.1",
.name = "sclk_cam1",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 20),
},

Просмотреть файл

@ -128,7 +128,7 @@ static int s3c2443_armclk_setrate(struct clk *clk, unsigned long rate)
unsigned long clkcon0;
clkcon0 = __raw_readl(S3C2443_CLKDIV0);
clkcon0 &= S3C2443_CLKDIV0_ARMDIV_MASK;
clkcon0 &= ~S3C2443_CLKDIV0_ARMDIV_MASK;
clkcon0 |= val << S3C2443_CLKDIV0_ARMDIV_SHIFT;
__raw_writel(clkcon0, S3C2443_CLKDIV0);
}

Просмотреть файл

@ -815,8 +815,7 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 },
}, {
.clk = {
.name = "sclk_cam",
.devname = "s5pv210-fimc.0",
.name = "sclk_cam0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 3),
},
@ -825,8 +824,7 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 },
}, {
.clk = {
.name = "sclk_cam",
.devname = "s5pv210-fimc.1",
.name = "sclk_cam1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 4),
},

Просмотреть файл

@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r12, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
add r12, r12, r2
@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area)
add r1, r0, r1
sub r3, r2, #1
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2
@ -247,6 +255,10 @@ v7_dma_inv_range:
sub r3, r2, #1
tst r0, r3
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
tst r1, r3
@ -270,6 +282,10 @@ v7_dma_clean_range:
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
add r0, r0, r2
@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
add r0, r0, r2

Просмотреть файл

@ -324,6 +324,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
else
__dma_free_buffer(page, size);
return addr;
}

Просмотреть файл

@ -114,17 +114,18 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
{
static int used_gpioint_groups = 0;
int group = chip->group;
struct s5p_gpioint_bank *bank = NULL;
struct s5p_gpioint_bank *b, *bank = NULL;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT)
return -ENOMEM;
list_for_each_entry(bank, &banks, list) {
if (group >= bank->start &&
group < bank->start + bank->nr_groups)
list_for_each_entry(b, &banks, list) {
if (group >= b->start && group < b->start + b->nr_groups) {
bank = b;
break;
}
}
if (!bank)
return -EINVAL;

Просмотреть файл

@ -561,6 +561,20 @@ static struct pci_ops u4_pcie_pci_ops =
.write = u4_pcie_write_config,
};
static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
{
/* Apple's device-tree "hides" the root complex virtual P2P bridge
* on U4. However, Linux sees it, causing the PCI <-> OF matching
* code to fail to properly match devices below it. This works around
* it by setting the node of the bridge to point to the PHB node,
* which is not entirely correct but fixes the matching code and
* doesn't break anything else. It's also the simplest possible fix.
*/
if (dev->dev.of_node == NULL)
dev->dev.of_node = pcibios_get_phb_of_node(dev->bus);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node);
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32

Просмотреть файл

@ -188,7 +188,8 @@ extern char elf_platform[];
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX); \
set_personality(PER_LINUX | \
(current->personality & ~PER_MASK)); \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
set_thread_flag(TIF_31BIT); \
else \

Просмотреть файл

@ -658,12 +658,14 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
* struct gmap_struct - guest address space
* @mm: pointer to the parent mm_struct
* @table: pointer to the page directory
* @asce: address space control element for gmap page table
* @crst_list: list of all crst tables used in the guest address space
*/
struct gmap {
struct list_head list;
struct mm_struct *mm;
unsigned long *table;
unsigned long asce;
struct list_head crst_list;
};

Просмотреть файл

@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <asm/vdso.h>
#include <asm/sigp.h>
#include <asm/pgtable.h>
/*
* Make sure that the compiler is new enough. We want a compiler that
@ -126,6 +127,7 @@ int main(void)
DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
@ -151,6 +153,7 @@ int main(void)
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
#endif /* CONFIG_32BIT */
return 0;
}

Просмотреть файл

@ -1076,6 +1076,11 @@ sie_loop:
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
tm __TI_flags+7(%r14),_TIF_EXIT_SIE
jnz sie_exit
lg %r14,__LC_GMAP # get gmap pointer
ltgr %r14,%r14
jz sie_gmap
lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
sie_gmap:
lg %r14,__SF_EMPTY(%r15) # get control block pointer
SPP __SF_EMPTY(%r15) # set guest id
sie 0(%r14)
@ -1083,6 +1088,7 @@ sie_done:
SPP __LC_CMF_HPP # set host id
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
sie_exit:
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
stmg %r0,%r13,0(%r14) # save guest gprs 0-13

Просмотреть файл

@ -123,6 +123,7 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) {
case KVM_CAP_S390_PSW:
case KVM_CAP_S390_GMAP:
r = 1;
break;
default:
@ -263,10 +264,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
restore_fp_regs(&vcpu->arch.guest_fpregs);
restore_access_regs(vcpu->arch.guest_acrs);
gmap_enable(vcpu->arch.gmap);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
gmap_disable(vcpu->arch.gmap);
save_fp_regs(&vcpu->arch.guest_fpregs);
save_access_regs(vcpu->arch.guest_acrs);
restore_fp_regs(&vcpu->arch.host_fpregs);
@ -461,7 +464,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
local_irq_disable();
kvm_guest_enter();
local_irq_enable();
gmap_enable(vcpu->arch.gmap);
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@ -470,7 +472,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
}
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
gmap_disable(vcpu->arch.gmap);
local_irq_disable();
kvm_guest_exit();
local_irq_enable();

Просмотреть файл

@ -160,6 +160,8 @@ struct gmap *gmap_alloc(struct mm_struct *mm)
table = (unsigned long *) page_to_phys(page);
crst_table_init(table, _REGION1_ENTRY_EMPTY);
gmap->table = table;
gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | __pa(table);
list_add(&gmap->list, &mm->context.gmap_list);
return gmap;
@ -240,10 +242,6 @@ EXPORT_SYMBOL_GPL(gmap_free);
*/
void gmap_enable(struct gmap *gmap)
{
/* Load primary space page table origin. */
S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | __pa(gmap->table);
asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
S390_lowcore.gmap = (unsigned long) gmap;
}
EXPORT_SYMBOL_GPL(gmap_enable);
@ -254,10 +252,6 @@ EXPORT_SYMBOL_GPL(gmap_enable);
*/
void gmap_disable(struct gmap *gmap)
{
/* Load primary space page table origin. */
S390_lowcore.user_asce =
gmap->mm->context.asce_bits | __pa(gmap->mm->pgd);
asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
S390_lowcore.gmap = 0UL;
}
EXPORT_SYMBOL_GPL(gmap_disable);
@ -309,15 +303,15 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
/* Walk the guest addr space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff);
if (*table & _REGION_ENTRY_INV)
return 0;
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 42) & 0x7ff);
if (*table & _REGION_ENTRY_INV)
return 0;
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 31) & 0x7ff);
if (*table & _REGION_ENTRY_INV)
return 0;
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 20) & 0x7ff);
@ -325,6 +319,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
flush |= gmap_unlink_segment(gmap, table);
*table = _SEGMENT_ENTRY_INV;
}
out:
up_read(&gmap->mm->mmap_sem);
if (flush)
gmap_flush_tlb(gmap);

Просмотреть файл

@ -43,6 +43,8 @@
#define SUN4V_CHIP_NIAGARA1 0x01
#define SUN4V_CHIP_NIAGARA2 0x02
#define SUN4V_CHIP_NIAGARA3 0x03
#define SUN4V_CHIP_NIAGARA4 0x04
#define SUN4V_CHIP_NIAGARA5 0x05
#define SUN4V_CHIP_UNKNOWN 0xff
#ifndef __ASSEMBLY__

Просмотреть файл

@ -66,6 +66,8 @@ static struct xor_block_template xor_block_niagara = {
((tlb_type == hypervisor && \
(sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \
sun4v_chip_type == SUN4V_CHIP_NIAGARA3)) ? \
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || \
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || \
sun4v_chip_type == SUN4V_CHIP_NIAGARA5)) ? \
&xor_block_niagara : \
&xor_block_VIS)

Просмотреть файл

@ -481,6 +481,18 @@ static void __init sun4v_cpu_probe(void)
sparc_pmu_type = "niagara3";
break;
case SUN4V_CHIP_NIAGARA4:
sparc_cpu_type = "UltraSparc T4 (Niagara4)";
sparc_fpu_type = "UltraSparc T4 integrated FPU";
sparc_pmu_type = "niagara4";
break;
case SUN4V_CHIP_NIAGARA5:
sparc_cpu_type = "UltraSparc T5 (Niagara5)";
sparc_fpu_type = "UltraSparc T5 integrated FPU";
sparc_pmu_type = "niagara5";
break;
default:
printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
prom_cpu_compatible);

Просмотреть файл

@ -325,6 +325,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
case SUN4V_CHIP_NIAGARA1:
case SUN4V_CHIP_NIAGARA2:
case SUN4V_CHIP_NIAGARA3:
case SUN4V_CHIP_NIAGARA4:
case SUN4V_CHIP_NIAGARA5:
rover_inc_table = niagara_iterate_method;
break;
default:

Просмотреть файл

@ -133,7 +133,7 @@ prom_sun4v_name:
prom_niagara_prefix:
.asciz "SUNW,UltraSPARC-T"
prom_sparc_prefix:
.asciz "SPARC-T"
.asciz "SPARC-"
.align 4
prom_root_compatible:
.skip 64
@ -396,7 +396,7 @@ sun4v_chip_type:
or %g1, %lo(prom_cpu_compatible), %g1
sethi %hi(prom_sparc_prefix), %g7
or %g7, %lo(prom_sparc_prefix), %g7
mov 7, %g3
mov 6, %g3
90: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
@ -408,10 +408,23 @@ sun4v_chip_type:
sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
ldub [%g1 + 7], %g2
ldub [%g1 + 6], %g2
cmp %g2, 'T'
be,pt %xcc, 70f
cmp %g2, 'M'
bne,pn %xcc, 4f
nop
70: ldub [%g1 + 7], %g2
cmp %g2, '3'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA3, %g4
cmp %g2, '4'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA4, %g4
cmp %g2, '5'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA5, %g4
ba,pt %xcc, 4f
nop
@ -543,6 +556,12 @@ niagara_tlb_fixup:
be,pt %xcc, niagara2_patch
nop
cmp %g1, SUN4V_CHIP_NIAGARA3
be,pt %xcc, niagara2_patch
nop
cmp %g1, SUN4V_CHIP_NIAGARA4
be,pt %xcc, niagara2_patch
nop
cmp %g1, SUN4V_CHIP_NIAGARA5
be,pt %xcc, niagara2_patch
nop

Просмотреть файл

@ -380,8 +380,7 @@ void flush_thread(void)
#endif
}
/* Now, this task is no longer a kernel thread. */
current->thread.current_ds = USER_DS;
/* This task is no longer a kernel thread. */
if (current->thread.flags & SPARC_FLAG_KTHREAD) {
current->thread.flags &= ~SPARC_FLAG_KTHREAD;

Просмотреть файл

@ -368,9 +368,6 @@ void flush_thread(void)
/* Clear FPU register state. */
t->fpsaved[0] = 0;
if (get_thread_current_ds() != ASI_AIUS)
set_fs(USER_DS);
}
/* It's a bit more tricky when 64-bit tasks are involved... */

Просмотреть файл

@ -137,7 +137,7 @@ static void __init process_switch(char c)
prom_halt();
break;
case 'p':
/* Just ignore, this behavior is now the default. */
prom_early_console.flags &= ~CON_BOOT;
break;
default:
printk("Unknown boot switch (-%c)\n", c);

Просмотреть файл

@ -106,7 +106,7 @@ static void __init process_switch(char c)
prom_halt();
break;
case 'p':
/* Just ignore, this behavior is now the default. */
prom_early_console.flags &= ~CON_BOOT;
break;
case 'P':
/* Force UltraSPARC-III P-Cache on. */
@ -425,10 +425,14 @@ static void __init init_sparc64_elf_hwcap(void)
else if (tlb_type == hypervisor) {
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= HWCAP_SPARC_N2;
}
@ -452,11 +456,15 @@ static void __init init_sparc64_elf_hwcap(void)
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
cap |= AV_SPARC_ASI_BLK_INIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT |
AV_SPARC_POPC);
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
AV_SPARC_FMAF);
}

Просмотреть файл

@ -511,6 +511,11 @@ static void __init read_obp_translations(void)
for (i = 0; i < prom_trans_ents; i++)
prom_trans[i].data &= ~0x0003fe0000000000UL;
}
/* Force execute bit on. */
for (i = 0; i < prom_trans_ents; i++)
prom_trans[i].data |= (tlb_type == hypervisor ?
_PAGE_EXEC_4V : _PAGE_EXEC_4U);
}
static void __init hypervisor_tlb_lock(unsigned long vaddr,

Просмотреть файл

@ -22,25 +22,23 @@ void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
#endif
/*
* Define some priorities for the nmi notifier call chain.
*
* Create a local nmi bit that has a higher priority than
* external nmis, because the local ones are more frequent.
*
* Also setup some default high/normal/low settings for
* subsystems to registers with. Using 4 bits to separate
* the priorities. This can go a lot higher if needed be.
*/
#define NMI_FLAG_FIRST 1
#define NMI_LOCAL_SHIFT 16 /* randomly picked */
#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT)
#define NMI_HIGH_PRIOR (1ULL << 8)
#define NMI_NORMAL_PRIOR (1ULL << 4)
#define NMI_LOW_PRIOR (1ULL << 0)
#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)
enum {
NMI_LOCAL=0,
NMI_UNKNOWN,
NMI_MAX
};
#define NMI_DONE 0
#define NMI_HANDLED 1
typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
int register_nmi_handler(unsigned int, nmi_handler_t, unsigned long,
const char *);
void unregister_nmi_handler(unsigned int, const char *);
void stop_nmi(void);
void restart_nmi(void);

Просмотреть файл

@ -29,6 +29,9 @@
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40)
#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41)
#define AMD64_EVENTSEL_EVENT \
(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
#define INTEL_ARCH_EVENT_MASK \
@ -159,7 +162,19 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
); \
}
struct perf_guest_switch_msr {
unsigned msr;
u64 host, guest;
};
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
#else
static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
*nr = 0;
return NULL;
}
static inline void perf_events_lapic_init(void) { }
#endif

Просмотреть файл

@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type);
#define MRR_BIOS 0
#define MRR_APM 1
typedef void (*nmi_shootdown_cb)(int, struct die_args*);
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
#endif /* _ASM_X86_REBOOT_H */

Просмотреть файл

@ -19,7 +19,7 @@ endif
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o
obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o

Просмотреть файл

@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void)
}
static int __kprobes
arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
{
struct die_args *args = __args;
struct pt_regs *regs;
int cpu;
switch (cmd) {
case DIE_NMI:
break;
default:
return NOTIFY_DONE;
}
regs = args->regs;
cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
show_regs(regs);
arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return NOTIFY_STOP;
return NMI_HANDLED;
}
return NOTIFY_DONE;
return NMI_DONE;
}
static __read_mostly struct notifier_block backtrace_notifier = {
.notifier_call = arch_trigger_all_cpu_backtrace_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
static int __init register_trigger_all_cpu_backtrace(void)
{
register_die_notifier(&backtrace_notifier);
register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
0, "arch_bt");
return 0;
}
early_initcall(register_trigger_all_cpu_backtrace);

Просмотреть файл

@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void)
/*
* When NMI is received, print a stack trace.
*/
int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
{
unsigned long real_uv_nmi;
int bid;
if (reason != DIE_NMIUNKNOWN)
return NOTIFY_OK;
if (in_crash_kexec)
/* do nothing if entering the crash kernel */
return NOTIFY_OK;
/*
* Each blade has an MMR that indicates when an NMI has been sent
* to cpus on the blade. If an NMI is detected, atomically
@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
}
if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
return NOTIFY_DONE;
return NMI_DONE;
__get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
dump_stack();
spin_unlock(&uv_nmi_lock);
return NOTIFY_STOP;
return NMI_HANDLED;
}
static struct notifier_block uv_dump_stack_nmi_nb = {
.notifier_call = uv_handle_nmi,
.priority = NMI_LOCAL_LOW_PRIOR - 1,
};
void uv_register_nmi_notifier(void)
{
if (register_die_notifier(&uv_dump_stack_nmi_nb))
if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
printk(KERN_WARNING "UV NMI handler failed to register\n");
}

Просмотреть файл

@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
static cpumask_var_t mce_inject_cpumask;
static int mce_raise_notify(struct notifier_block *self,
unsigned long val, void *data)
static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
{
struct die_args *args = (struct die_args *)data;
int cpu = smp_processor_id();
struct mce *m = &__get_cpu_var(injectm);
if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
return NOTIFY_DONE;
if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
return NMI_DONE;
cpumask_clear_cpu(cpu, mce_inject_cpumask);
if (m->inject_flags & MCJ_EXCEPTION)
raise_exception(m, args->regs);
raise_exception(m, regs);
else if (m->status)
raise_poll(m);
return NOTIFY_STOP;
return NMI_HANDLED;
}
static struct notifier_block mce_raise_nb = {
.notifier_call = mce_raise_notify,
.priority = NMI_LOCAL_NORMAL_PRIOR,
};
/* Inject mce on current CPU */
static int raise_local(void)
{
@ -216,7 +209,8 @@ static int inject_init(void)
return -ENOMEM;
printk(KERN_INFO "Machine check injector initialized\n");
mce_chrdev_ops.write = mce_write;
register_die_notifier(&mce_raise_nb);
register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
"mce_notify");
return 0;
}

Просмотреть файл

@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
percpu_inc(mce_exception_count);
if (notify_die(DIE_NMI, "machine check", regs, error_code,
18, SIGKILL) == NOTIFY_STOP)
goto out;
if (!banks)
goto out;
@ -1140,6 +1137,15 @@ static void mce_start_timer(unsigned long data)
add_timer_on(t, smp_processor_id());
}
/* Must not be called in IRQ context where del_timer_sync() can deadlock */
static void mce_timer_delete_all(void)
{
int cpu;
for_each_online_cpu(cpu)
del_timer_sync(&per_cpu(mce_timer, cpu));
}
static void mce_do_trigger(struct work_struct *work)
{
call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
@ -1750,7 +1756,6 @@ static struct syscore_ops mce_syscore_ops = {
static void mce_cpu_restart(void *data)
{
del_timer_sync(&__get_cpu_var(mce_timer));
if (!mce_available(__this_cpu_ptr(&cpu_info)))
return;
__mcheck_cpu_init_generic();
@ -1760,16 +1765,15 @@ static void mce_cpu_restart(void *data)
/* Reinit MCEs after user configuration changes */
static void mce_restart(void)
{
mce_timer_delete_all();
on_each_cpu(mce_cpu_restart, NULL, 1);
}
/* Toggle features for corrected errors */
static void mce_disable_ce(void *all)
static void mce_disable_cmci(void *data)
{
if (!mce_available(__this_cpu_ptr(&cpu_info)))
return;
if (all)
del_timer_sync(&__get_cpu_var(mce_timer));
cmci_clear();
}
@ -1852,7 +1856,8 @@ static ssize_t set_ignore_ce(struct sys_device *s,
if (mce_ignore_ce ^ !!new) {
if (new) {
/* disable ce features */
on_each_cpu(mce_disable_ce, (void *)1, 1);
mce_timer_delete_all();
on_each_cpu(mce_disable_cmci, NULL, 1);
mce_ignore_ce = 1;
} else {
/* enable ce features */
@ -1875,7 +1880,7 @@ static ssize_t set_cmci_disabled(struct sys_device *s,
if (mce_cmci_disabled ^ !!new) {
if (new) {
/* disable cmci */
on_each_cpu(mce_disable_ce, NULL, 1);
on_each_cpu(mce_disable_cmci, NULL, 1);
mce_cmci_disabled = 1;
} else {
/* enable cmci */

Просмотреть файл

@ -1058,76 +1058,15 @@ void perf_events_lapic_init(void)
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
struct pmu_nmi_state {
unsigned int marked;
int handled;
};
static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
struct die_args *args = __args;
unsigned int this_nmi;
int handled;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
return NMI_DONE;
switch (cmd) {
case DIE_NMI:
break;
case DIE_NMIUNKNOWN:
this_nmi = percpu_read(irq_stat.__nmi_count);
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
/* let the kernel handle the unknown nmi */
return NOTIFY_DONE;
/*
* This one is a PMU back-to-back nmi. Two events
* trigger 'simultaneously' raising two back-to-back
* NMIs. If the first NMI handles both, the latter
* will be empty and daze the CPU. So, we drop it to
* avoid false-positive 'unknown nmi' messages.
*/
return NOTIFY_STOP;
default:
return NOTIFY_DONE;
}
handled = x86_pmu.handle_irq(args->regs);
if (!handled)
return NOTIFY_DONE;
this_nmi = percpu_read(irq_stat.__nmi_count);
if ((handled > 1) ||
/* the next nmi could be a back-to-back nmi */
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
(__this_cpu_read(pmu_nmi.handled) > 1))) {
/*
* We could have two subsequent back-to-back nmis: The
* first handles more than one counter, the 2nd
* handles only one counter and the 3rd handles no
* counter.
*
* This is the 2nd nmi because the previous was
* handling more than one counter. We will mark the
* next (3rd) and then drop it if unhandled.
*/
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
__this_cpu_write(pmu_nmi.handled, handled);
}
return NOTIFY_STOP;
return x86_pmu.handle_irq(regs);
}
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
struct event_constraint emptyconstraint;
struct event_constraint unconstrained;
@ -1232,7 +1171,7 @@ static int __init init_hw_perf_events(void)
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier);
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,

Просмотреть файл

@ -130,6 +130,13 @@ struct cpu_hw_events {
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
/*
* Intel host/guest exclude bits
*/
u64 intel_ctrl_guest_mask;
u64 intel_ctrl_host_mask;
struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
/*
* manage shared (per-core, per-cpu) registers
* used on Intel NHM/WSM/SNB
@ -295,6 +302,11 @@ struct x86_pmu {
*/
struct extra_reg *extra_regs;
unsigned int er_flags;
/*
* Intel host/guest support (KVM)
*/
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
};
#define ERF_NO_HT_SHARING 1

Просмотреть файл

@ -138,6 +138,19 @@ static int amd_pmu_hw_config(struct perf_event *event)
if (ret)
return ret;
if (event->attr.exclude_host && event->attr.exclude_guest)
/*
* When HO == GO == 1 the hardware treats that as GO == HO == 0
* and will count in both modes. We don't want to count in that
* case so we emulate no-counting by setting US = OS = 0.
*/
event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
ARCH_PERFMON_EVENTSEL_OS);
else if (event->attr.exclude_host)
event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
else if (event->attr.exclude_guest)
event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
if (event->attr.type != PERF_TYPE_RAW)
return 0;

Просмотреть файл

@ -749,7 +749,8 @@ static void intel_pmu_enable_all(int added)
intel_pmu_pebs_enable_all();
intel_pmu_lbr_enable_all();
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
struct perf_event *event =
@ -872,6 +873,7 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
static void intel_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
@ -879,6 +881,9 @@ static void intel_pmu_disable_event(struct perf_event *event)
return;
}
cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
@ -924,6 +929,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
static void intel_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
if (!__this_cpu_read(cpu_hw_events.enabled))
@ -933,6 +939,11 @@ static void intel_pmu_enable_event(struct perf_event *event)
return;
}
if (event->attr.exclude_host)
cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
if (event->attr.exclude_guest)
cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc);
return;
@ -1302,12 +1313,84 @@ static int intel_pmu_hw_config(struct perf_event *event)
return 0;
}
struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
if (x86_pmu.guest_get_msrs)
return x86_pmu.guest_get_msrs(nr);
*nr = 0;
return NULL;
}
EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
*nr = 1;
return arr;
}
static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
arr[idx].msr = x86_pmu_config_addr(idx);
arr[idx].host = arr[idx].guest = 0;
if (!test_bit(idx, cpuc->active_mask))
continue;
arr[idx].host = arr[idx].guest =
event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
if (event->attr.exclude_host)
arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
else if (event->attr.exclude_guest)
arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
}
*nr = x86_pmu.num_counters;
return arr;
}
static void core_pmu_enable_event(struct perf_event *event)
{
if (!event->attr.exclude_host)
x86_pmu_enable_event(event);
}
static void core_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask) ||
cpuc->events[idx]->attr.exclude_host)
continue;
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
}
static __initconst const struct x86_pmu core_pmu = {
.name = "core",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.enable_all = core_pmu_enable_all,
.enable = core_pmu_enable_event,
.disable = x86_pmu_disable_event,
.hw_config = x86_pmu_hw_config,
.schedule_events = x86_schedule_events,
@ -1325,6 +1408,7 @@ static __initconst const struct x86_pmu core_pmu = {
.get_event_constraints = intel_get_event_constraints,
.put_event_constraints = intel_put_event_constraints,
.event_constraints = intel_core_event_constraints,
.guest_get_msrs = core_guest_get_msrs,
};
struct intel_shared_regs *allocate_shared_regs(int cpu)
@ -1431,6 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
.guest_get_msrs = intel_guest_get_msrs,
};
static void intel_clovertown_quirks(void)

Просмотреть файл

@ -32,15 +32,12 @@ int in_crash_kexec;
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct die_args *args)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
{
struct pt_regs *regs;
#ifdef CONFIG_X86_32
struct pt_regs fixed_regs;
#endif
regs = args->regs;
#ifdef CONFIG_X86_32
if (!user_mode_vm(regs)) {
crash_fixup_ss_esp(&fixed_regs, regs);

Просмотреть файл

@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)
static int was_in_debug_nmi[NR_CPUS];
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
struct pt_regs *regs = args->regs;
switch (cmd) {
case DIE_NMI:
case NMI_LOCAL:
if (atomic_read(&kgdb_active) != -1) {
/* KGDB CPU roundup */
kgdb_nmicallback(raw_smp_processor_id(), regs);
was_in_debug_nmi[raw_smp_processor_id()] = 1;
touch_nmi_watchdog();
return NOTIFY_STOP;
return NMI_HANDLED;
}
return NOTIFY_DONE;
break;
case DIE_NMIUNKNOWN:
case NMI_UNKNOWN:
if (was_in_debug_nmi[raw_smp_processor_id()]) {
was_in_debug_nmi[raw_smp_processor_id()] = 0;
return NOTIFY_STOP;
return NMI_HANDLED;
}
return NOTIFY_DONE;
break;
default:
/* do nothing */
break;
}
return NMI_DONE;
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
switch (cmd) {
case DIE_DEBUG:
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
if (user_mode(regs))
@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = NMI_LOCAL_LOW_PRIOR,
};
/**
@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = {
*/
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
int retval;
retval = register_die_notifier(&kgdb_notifier);
if (retval)
goto out;
retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
0, "kgdb");
if (retval)
goto out1;
retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
0, "kgdb");
if (retval)
goto out2;
return retval;
out2:
unregister_nmi_handler(NMI_LOCAL, "kgdb");
out1:
unregister_die_notifier(&kgdb_notifier);
out:
return retval;
}
static void kgdb_hw_overflow_handler(struct perf_event *event,
@ -673,6 +701,8 @@ void kgdb_arch_exit(void)
breakinfo[i].pev = NULL;
}
}
unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
unregister_nmi_handler(NMI_LOCAL, "kgdb");
unregister_die_notifier(&kgdb_notifier);
}

336
arch/x86/kernel/nmi.c Normal file
Просмотреть файл

@ -0,0 +1,336 @@
/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
* Copyright (C) 2011 Don Zickus Red Hat, Inc.
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*/
/*
* Handle hardware traps and faults.
*/
#include <linux/spinlock.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/slab.h>
#if defined(CONFIG_EDAC)
#include <linux/edac.h>
#endif
#include <linux/atomic.h>
#include <asm/traps.h>
#include <asm/mach_traps.h>
#include <asm/nmi.h>
#define NMI_MAX_NAMELEN 16
struct nmiaction {
struct list_head list;
nmi_handler_t handler;
unsigned int flags;
char *name;
};
struct nmi_desc {
spinlock_t lock;
struct list_head head;
};
static struct nmi_desc nmi_desc[NMI_MAX] =
{
{
.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
.head = LIST_HEAD_INIT(nmi_desc[0].head),
},
{
.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
.head = LIST_HEAD_INIT(nmi_desc[1].head),
},
};
static int ignore_nmis;
int unknown_nmi_panic;
/*
* Prevent NMI reason port (0x61) being accessed simultaneously, can
* only be used in NMI handler.
*/
static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
static int __init setup_unknown_nmi_panic(char *str)
{
unknown_nmi_panic = 1;
return 1;
}
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
#define nmi_to_desc(type) (&nmi_desc[type])
static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs)
{
struct nmi_desc *desc = nmi_to_desc(type);
struct nmiaction *a;
int handled=0;
rcu_read_lock();
/*
* NMIs are edge-triggered, which means if you have enough
* of them concurrently, you can lose some because only one
* can be latched at any given time. Walk the whole list
* to handle those situations.
*/
list_for_each_entry_rcu(a, &desc->head, list) {
handled += a->handler(type, regs);
}
rcu_read_unlock();
/* return total number of NMI events handled */
return handled;
}
static int __setup_nmi(unsigned int type, struct nmiaction *action)
{
struct nmi_desc *desc = nmi_to_desc(type);
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
/*
* some handlers need to be executed first otherwise a fake
* event confuses some handlers (kdump uses this flag)
*/
if (action->flags & NMI_FLAG_FIRST)
list_add_rcu(&action->list, &desc->head);
else
list_add_tail_rcu(&action->list, &desc->head);
spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
static struct nmiaction *__free_nmi(unsigned int type, const char *name)
{
struct nmi_desc *desc = nmi_to_desc(type);
struct nmiaction *n;
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
list_for_each_entry_rcu(n, &desc->head, list) {
/*
* the name passed in to describe the nmi handler
* is used as the lookup key
*/
if (!strcmp(n->name, name)) {
WARN(in_nmi(),
"Trying to free NMI (%s) from NMI context!\n", n->name);
list_del_rcu(&n->list);
break;
}
}
spin_unlock_irqrestore(&desc->lock, flags);
synchronize_rcu();
return (n);
}
int register_nmi_handler(unsigned int type, nmi_handler_t handler,
unsigned long nmiflags, const char *devname)
{
struct nmiaction *action;
int retval = -ENOMEM;
if (!handler)
return -EINVAL;
action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL);
if (!action)
goto fail_action;
action->handler = handler;
action->flags = nmiflags;
action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL);
if (!action->name)
goto fail_action_name;
retval = __setup_nmi(type, action);
if (retval)
goto fail_setup_nmi;
return retval;
fail_setup_nmi:
kfree(action->name);
fail_action_name:
kfree(action);
fail_action:
return retval;
}
EXPORT_SYMBOL_GPL(register_nmi_handler);
void unregister_nmi_handler(unsigned int type, const char *name)
{
struct nmiaction *a;
a = __free_nmi(type, name);
if (a) {
kfree(a->name);
kfree(a);
}
}
EXPORT_SYMBOL_GPL(unregister_nmi_handler);
static notrace __kprobes void
pci_serr_error(unsigned char reason, struct pt_regs *regs)
{
pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
reason, smp_processor_id());
/*
* On some machines, PCI SERR line is used to report memory
* errors. EDAC makes use of it.
*/
#if defined(CONFIG_EDAC)
if (edac_handler_set()) {
edac_atomic_assert_error();
return;
}
#endif
if (panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
pr_emerg("Dazed and confused, but trying to continue\n");
/* Clear and disable the PCI SERR error line. */
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
outb(reason, NMI_REASON_PORT);
}
static notrace __kprobes void
io_check_error(unsigned char reason, struct pt_regs *regs)
{
unsigned long i;
pr_emerg(
"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
reason, smp_processor_id());
show_registers(regs);
if (panic_on_io_nmi)
panic("NMI IOCK error: Not continuing");
/* Re-enable the IOCK line, wait for a few seconds */
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
outb(reason, NMI_REASON_PORT);
i = 20000;
while (--i) {
touch_nmi_watchdog();
udelay(100);
}
reason &= ~NMI_REASON_CLEAR_IOCHK;
outb(reason, NMI_REASON_PORT);
}
static notrace __kprobes void
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{
int handled;
handled = nmi_handle(NMI_UNKNOWN, regs);
if (handled)
return;
#ifdef CONFIG_MCA
/*
* Might actually be able to figure out what the guilty party
* is:
*/
if (MCA_bus) {
mca_handle_nmi();
return;
}
#endif
pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
reason, smp_processor_id());
pr_emerg("Do you have a strange power saving mode enabled?\n");
if (unknown_nmi_panic || panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
pr_emerg("Dazed and confused, but trying to continue\n");
}
static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
int handled;
/*
* CPU-specific NMI must be processed before non-CPU-specific
* NMI, otherwise we may lose it, because the CPU-specific
* NMI can not be detected/processed on other CPUs.
*/
handled = nmi_handle(NMI_LOCAL, regs);
if (handled)
return;
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
raw_spin_lock(&nmi_reason_lock);
reason = get_nmi_reason();
if (reason & NMI_REASON_MASK) {
if (reason & NMI_REASON_SERR)
pci_serr_error(reason, regs);
else if (reason & NMI_REASON_IOCHK)
io_check_error(reason, regs);
#ifdef CONFIG_X86_32
/*
* Reassert NMI in case it became active
* meanwhile as it's edge-triggered:
*/
reassert_nmi();
#endif
raw_spin_unlock(&nmi_reason_lock);
return;
}
raw_spin_unlock(&nmi_reason_lock);
unknown_nmi_error(reason, regs);
}
dotraplinkage notrace __kprobes void
do_nmi(struct pt_regs *regs, long error_code)
{
nmi_enter();
inc_irq_stat(__nmi_count);
if (!ignore_nmis)
default_do_nmi(regs);
nmi_exit();
}
void stop_nmi(void)
{
ignore_nmis++;
}
void restart_nmi(void)
{
ignore_nmis--;
}

Просмотреть файл

@ -464,7 +464,7 @@ static inline void kb_wait(void)
}
}
static void vmxoff_nmi(int cpu, struct die_args *args)
static void vmxoff_nmi(int cpu, struct pt_regs *regs)
{
cpu_emergency_vmxoff();
}
@ -736,14 +736,10 @@ static nmi_shootdown_cb shootdown_callback;
static atomic_t waiting_for_crash_ipi;
static int crash_nmi_callback(struct notifier_block *self,
unsigned long val, void *data)
static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
{
int cpu;
if (val != DIE_NMI)
return NOTIFY_OK;
cpu = raw_smp_processor_id();
/* Don't do anything if this handler is invoked on crashing cpu.
@ -751,10 +747,10 @@ static int crash_nmi_callback(struct notifier_block *self,
* an NMI if system was initially booted with nmi_watchdog parameter.
*/
if (cpu == crashing_cpu)
return NOTIFY_STOP;
return NMI_HANDLED;
local_irq_disable();
shootdown_callback(cpu, (struct die_args *)data);
shootdown_callback(cpu, regs);
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
@ -762,7 +758,7 @@ static int crash_nmi_callback(struct notifier_block *self,
for (;;)
cpu_relax();
return 1;
return NMI_HANDLED;
}
static void smp_send_nmi_allbutself(void)
@ -770,12 +766,6 @@ static void smp_send_nmi_allbutself(void)
apic->send_IPI_allbutself(NMI_VECTOR);
}
static struct notifier_block crash_nmi_nb = {
.notifier_call = crash_nmi_callback,
/* we want to be the first one called */
.priority = NMI_LOCAL_HIGH_PRIOR+1,
};
/* Halt all other CPUs, calling the specified function on each of them
*
* This function can be used to halt all other CPUs on crash
@ -794,7 +784,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
/* Would it be better to replace the trap vector here? */
if (register_die_notifier(&crash_nmi_nb))
if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
NMI_FLAG_FIRST, "crash"))
return; /* return what? */
/* Ensure the new callback function is set before sending
* out the NMI

Просмотреть файл

@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime)
{
int real_seconds, real_minutes, cmos_minutes;
unsigned char save_control, save_freq_select;
unsigned long flags;
int retval = 0;
spin_lock_irqsave(&rtc_lock, flags);
/* tell the clock it's being set */
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime)
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
spin_unlock_irqrestore(&rtc_lock, flags);
return retval;
}
unsigned long mach_get_cmos_time(void)
{
unsigned int status, year, mon, day, hour, min, sec, century = 0;
unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
/*
* If UIP is clear, then we have >= 244 microseconds before
@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void)
status = CMOS_READ(RTC_CONTROL);
WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY));
spin_unlock_irqrestore(&rtc_lock, flags);
if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) {
sec = bcd2bin(sec);
min = bcd2bin(min);
@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write);
int update_persistent_clock(struct timespec now)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&rtc_lock, flags);
retval = x86_platform.set_wallclock(now.tv_sec);
spin_unlock_irqrestore(&rtc_lock, flags);
return retval;
return x86_platform.set_wallclock(now.tv_sec);
}
/* not static: needed by APM */
void read_persistent_clock(struct timespec *ts)
{
unsigned long retval, flags;
unsigned long retval;
spin_lock_irqsave(&rtc_lock, flags);
retval = x86_platform.get_wallclock();
spin_unlock_irqrestore(&rtc_lock, flags);
ts->tv_sec = retval;
ts->tv_nsec = 0;

Просмотреть файл

@ -81,15 +81,6 @@ gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
DECLARE_BITMAP(used_vectors, NR_VECTORS);
EXPORT_SYMBOL_GPL(used_vectors);
static int ignore_nmis;
int unknown_nmi_panic;
/*
* Prevent NMI reason port (0x61) being accessed simultaneously, can
* only be used in NMI handler.
*/
static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
static inline void conditional_sti(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
@ -307,152 +298,6 @@ gp_in_kernel:
die("general protection fault", regs, error_code);
}
static int __init setup_unknown_nmi_panic(char *str)
{
unknown_nmi_panic = 1;
return 1;
}
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
static notrace __kprobes void
pci_serr_error(unsigned char reason, struct pt_regs *regs)
{
pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
reason, smp_processor_id());
/*
* On some machines, PCI SERR line is used to report memory
* errors. EDAC makes use of it.
*/
#if defined(CONFIG_EDAC)
if (edac_handler_set()) {
edac_atomic_assert_error();
return;
}
#endif
if (panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
pr_emerg("Dazed and confused, but trying to continue\n");
/* Clear and disable the PCI SERR error line. */
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
outb(reason, NMI_REASON_PORT);
}
static notrace __kprobes void
io_check_error(unsigned char reason, struct pt_regs *regs)
{
unsigned long i;
pr_emerg(
"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
reason, smp_processor_id());
show_registers(regs);
if (panic_on_io_nmi)
panic("NMI IOCK error: Not continuing");
/* Re-enable the IOCK line, wait for a few seconds */
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
outb(reason, NMI_REASON_PORT);
i = 20000;
while (--i) {
touch_nmi_watchdog();
udelay(100);
}
reason &= ~NMI_REASON_CLEAR_IOCHK;
outb(reason, NMI_REASON_PORT);
}
static notrace __kprobes void
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
NOTIFY_STOP)
return;
#ifdef CONFIG_MCA
/*
* Might actually be able to figure out what the guilty party
* is:
*/
if (MCA_bus) {
mca_handle_nmi();
return;
}
#endif
pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
reason, smp_processor_id());
pr_emerg("Do you have a strange power saving mode enabled?\n");
if (unknown_nmi_panic || panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
pr_emerg("Dazed and confused, but trying to continue\n");
}
static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
/*
* CPU-specific NMI must be processed before non-CPU-specific
* NMI, otherwise we may lose it, because the CPU-specific
* NMI can not be detected/processed on other CPUs.
*/
if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
return;
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
raw_spin_lock(&nmi_reason_lock);
reason = get_nmi_reason();
if (reason & NMI_REASON_MASK) {
if (reason & NMI_REASON_SERR)
pci_serr_error(reason, regs);
else if (reason & NMI_REASON_IOCHK)
io_check_error(reason, regs);
#ifdef CONFIG_X86_32
/*
* Reassert NMI in case it became active
* meanwhile as it's edge-triggered:
*/
reassert_nmi();
#endif
raw_spin_unlock(&nmi_reason_lock);
return;
}
raw_spin_unlock(&nmi_reason_lock);
unknown_nmi_error(reason, regs);
}
dotraplinkage notrace __kprobes void
do_nmi(struct pt_regs *regs, long error_code)
{
nmi_enter();
inc_irq_stat(__nmi_count);
if (!ignore_nmis)
default_do_nmi(regs);
nmi_exit();
}
void stop_nmi(void)
{
ignore_nmis++;
}
void restart_nmi(void)
{
ignore_nmis--;
}
/* May run on IST stack. */
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
{

Просмотреть файл

@ -3603,7 +3603,7 @@ done_prefixes:
break;
case Src2CL:
ctxt->src2.bytes = 1;
ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0x8;
ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
break;
case Src2ImmByte:
rc = decode_imm(ctxt, &ctxt->src2, 1, true);

Просмотреть файл

@ -400,7 +400,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
/* xchg acts as a barrier before the setting of the high bits */
orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
orig.spte_high = ssptep->spte_high = sspte.spte_high;
orig.spte_high = ssptep->spte_high;
ssptep->spte_high = sspte.spte_high;
count_spte_clear(sptep, spte);
return orig.spte;

Просмотреть файл

@ -61,26 +61,15 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
}
static int profile_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
switch (val) {
case DIE_NMI:
if (ctr_running)
model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
else if (!nmi_enabled)
break;
else
model->stop(&__get_cpu_var(cpu_msrs));
ret = NOTIFY_STOP;
break;
default:
break;
}
return ret;
if (ctr_running)
model->check_ctrs(regs, &__get_cpu_var(cpu_msrs));
else if (!nmi_enabled)
return NMI_DONE;
else
model->stop(&__get_cpu_var(cpu_msrs));
return NMI_HANDLED;
}
static void nmi_cpu_save_registers(struct op_msrs *msrs)
@ -363,12 +352,6 @@ static void nmi_cpu_setup(void *dummy)
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
static struct notifier_block profile_exceptions_nb = {
.notifier_call = profile_exceptions_notify,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
static void nmi_cpu_restore_registers(struct op_msrs *msrs)
{
struct op_msr *counters = msrs->counters;
@ -508,7 +491,8 @@ static int nmi_setup(void)
ctr_running = 0;
/* make variables visible to the nmi handler: */
smp_mb();
err = register_die_notifier(&profile_exceptions_nb);
err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
0, "oprofile");
if (err)
goto fail;
@ -538,7 +522,7 @@ static void nmi_shutdown(void)
put_online_cpus();
/* make variables visible to the nmi handler: */
smp_mb();
unregister_die_notifier(&profile_exceptions_nb);
unregister_nmi_handler(NMI_LOCAL, "oprofile");
msrs = &get_cpu_var(cpu_msrs);
model->shutdown(msrs);
free_msrs();

Просмотреть файл

@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write);
unsigned long vrtc_get_time(void)
{
u8 sec, min, hour, mday, mon;
unsigned long flags;
u32 year;
spin_lock_irqsave(&rtc_lock, flags);
while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
cpu_relax();
@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void)
mon = vrtc_cmos_read(RTC_MONTH);
year = vrtc_cmos_read(RTC_YEAR);
spin_unlock_irqrestore(&rtc_lock, flags);
/* vRTC YEAR reg contains the offset to 1960 */
year += 1960;
@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void)
int vrtc_set_mmss(unsigned long nowtime)
{
int real_sec, real_min;
unsigned long flags;
int vrtc_min;
spin_lock_irqsave(&rtc_lock, flags);
vrtc_min = vrtc_cmos_read(RTC_MINUTES);
real_sec = nowtime % 60;
@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime)
vrtc_cmos_write(real_sec, RTC_SECONDS);
vrtc_cmos_write(real_min, RTC_MINUTES);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}

Просмотреть файл

@ -348,9 +348,10 @@ void blk_put_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_put_queue);
/*
* Note: If a driver supplied the queue lock, it should not zap that lock
* unexpectedly as some queue cleanup components like elevator_exit() and
* blk_throtl_exit() need queue lock.
* Note: If a driver supplied the queue lock, it is disconnected
* by this function. The actual state of the lock doesn't matter
* here as the request_queue isn't accessible after this point
* (QUEUE_FLAG_DEAD is set) and no other requests will be queued.
*/
void blk_cleanup_queue(struct request_queue *q)
{
@ -367,10 +368,8 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
mutex_unlock(&q->sysfs_lock);
if (q->elevator)
elevator_exit(q->elevator);
blk_throtl_exit(q);
if (q->queue_lock != &q->__queue_lock)
q->queue_lock = &q->__queue_lock;
blk_put_queue(q);
}

Просмотреть файл

@ -479,6 +479,11 @@ static void blk_release_queue(struct kobject *kobj)
blk_sync_queue(q);
if (q->elevator)
elevator_exit(q->elevator);
blk_throtl_exit(q);
if (rl->rq_pool)
mempool_destroy(rl->rq_pool);

Просмотреть файл

@ -50,6 +50,7 @@
#include <acpi/hed.h>
#include <asm/mce.h>
#include <asm/tlbflush.h>
#include <asm/nmi.h>
#include "apei-internal.h"
@ -749,15 +750,11 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
}
}
static int ghes_notify_nmi(struct notifier_block *this,
unsigned long cmd, void *data)
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
{
struct ghes *ghes, *ghes_global = NULL;
int sev, sev_global = -1;
int ret = NOTIFY_DONE;
if (cmd != DIE_NMI)
return ret;
int ret = NMI_DONE;
raw_spin_lock(&ghes_nmi_lock);
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
@ -770,10 +767,10 @@ static int ghes_notify_nmi(struct notifier_block *this,
sev_global = sev;
ghes_global = ghes;
}
ret = NOTIFY_STOP;
ret = NMI_HANDLED;
}
if (ret == NOTIFY_DONE)
if (ret == NMI_DONE)
goto out;
if (sev_global >= GHES_SEV_PANIC) {
@ -825,10 +822,6 @@ static struct notifier_block ghes_notifier_sci = {
.notifier_call = ghes_notify_sci,
};
static struct notifier_block ghes_notifier_nmi = {
.notifier_call = ghes_notify_nmi,
};
static unsigned long ghes_esource_prealloc_size(
const struct acpi_hest_generic *generic)
{
@ -918,7 +911,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
ghes_estatus_pool_expand(len);
mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_nmi))
register_die_notifier(&ghes_notifier_nmi);
register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0,
"ghes");
list_add_rcu(&ghes->list, &ghes_nmi);
mutex_unlock(&ghes_list_mutex);
break;
@ -964,7 +958,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
mutex_lock(&ghes_list_mutex);
list_del_rcu(&ghes->list);
if (list_empty(&ghes_nmi))
unregister_die_notifier(&ghes_notifier_nmi);
unregister_nmi_handler(NMI_LOCAL, "ghes");
mutex_unlock(&ghes_list_mutex);
/*
* To synchronize with NMI handler, ghes can only be

Просмотреть файл

@ -41,6 +41,22 @@ static struct pm_clk_data *__to_pcd(struct device *dev)
return dev ? dev->power.subsys_data : NULL;
}
/**
* pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @ce: PM clock entry corresponding to the clock.
*/
static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
{
ce->clk = clk_get(dev, ce->con_id);
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
} else {
ce->status = PCE_STATUS_ACQUIRED;
dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
}
}
/**
* pm_clk_add - Start using a device clock for power management.
* @dev: Device whose clock is going to be used for power management.
@ -73,6 +89,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
}
}
pm_clk_acquire(dev, ce);
spin_lock_irq(&pcd->lock);
list_add_tail(&ce->node, &pcd->clock_list);
spin_unlock_irq(&pcd->lock);
@ -82,17 +100,12 @@ int pm_clk_add(struct device *dev, const char *con_id)
/**
* __pm_clk_remove - Destroy PM clock entry.
* @ce: PM clock entry to destroy.
*
* This routine must be called under the spinlock protecting the PM list of
* clocks corresponding the the @ce's device.
*/
static void __pm_clk_remove(struct pm_clock_entry *ce)
{
if (!ce)
return;
list_del(&ce->node);
if (ce->status < PCE_STATUS_ERROR) {
if (ce->status == PCE_STATUS_ENABLED)
clk_disable(ce->clk);
@ -126,18 +139,22 @@ void pm_clk_remove(struct device *dev, const char *con_id)
spin_lock_irq(&pcd->lock);
list_for_each_entry(ce, &pcd->clock_list, node) {
if (!con_id && !ce->con_id) {
__pm_clk_remove(ce);
break;
} else if (!con_id || !ce->con_id) {
if (!con_id && !ce->con_id)
goto remove;
else if (!con_id || !ce->con_id)
continue;
} else if (!strcmp(con_id, ce->con_id)) {
__pm_clk_remove(ce);
break;
}
else if (!strcmp(con_id, ce->con_id))
goto remove;
}
spin_unlock_irq(&pcd->lock);
return;
remove:
list_del(&ce->node);
spin_unlock_irq(&pcd->lock);
__pm_clk_remove(ce);
}
/**
@ -175,43 +192,33 @@ void pm_clk_destroy(struct device *dev)
{
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce, *c;
struct list_head list;
if (!pcd)
return;
dev->power.subsys_data = NULL;
INIT_LIST_HEAD(&list);
spin_lock_irq(&pcd->lock);
list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
__pm_clk_remove(ce);
list_move(&ce->node, &list);
spin_unlock_irq(&pcd->lock);
kfree(pcd);
list_for_each_entry_safe_reverse(ce, c, &list, node) {
list_del(&ce->node);
__pm_clk_remove(ce);
}
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
/**
* pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @con_id: Connection ID of the clock.
*/
static void pm_clk_acquire(struct device *dev,
struct pm_clock_entry *ce)
{
ce->clk = clk_get(dev, ce->con_id);
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
} else {
ce->status = PCE_STATUS_ACQUIRED;
dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
}
}
/**
* pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
@ -230,9 +237,6 @@ int pm_clk_suspend(struct device *dev)
spin_lock_irqsave(&pcd->lock, flags);
list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
pm_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_disable(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
@ -262,9 +266,6 @@ int pm_clk_resume(struct device *dev)
spin_lock_irqsave(&pcd->lock, flags);
list_for_each_entry(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
pm_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_enable(ce->clk);
ce->status = PCE_STATUS_ENABLED;

Просмотреть файл

@ -65,6 +65,7 @@
* mechanism for it at that time.
*/
#include <asm/kdebug.h>
#include <asm/nmi.h>
#define HAVE_DIE_NMI
#endif
@ -1077,17 +1078,8 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
#ifdef HAVE_DIE_NMI
static int
ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
ipmi_nmi(unsigned int val, struct pt_regs *regs)
{
struct die_args *args = data;
if (val != DIE_NMIUNKNOWN)
return NOTIFY_OK;
/* Hack, if it's a memory or I/O error, ignore it. */
if (args->err & 0xc0)
return NOTIFY_OK;
/*
* If we get here, it's an NMI that's not a memory or I/O
* error. We can't truly tell if it's from IPMI or not
@ -1097,15 +1089,15 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
if (testing_nmi) {
testing_nmi = 2;
return NOTIFY_STOP;
return NMI_HANDLED;
}
/* If we are not expecting a timeout, ignore it. */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
return NOTIFY_OK;
return NMI_DONE;
if (preaction_val != WDOG_PRETIMEOUT_NMI)
return NOTIFY_OK;
return NMI_DONE;
/*
* If no one else handled the NMI, we assume it was the IPMI
@ -1120,12 +1112,8 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
panic(PFX "pre-timeout");
}
return NOTIFY_STOP;
return NMI_HANDLED;
}
static struct notifier_block ipmi_nmi_handler = {
.notifier_call = ipmi_nmi
};
#endif
static int wdog_reboot_handler(struct notifier_block *this,
@ -1290,7 +1278,8 @@ static void check_parms(void)
}
}
if (do_nmi && !nmi_handler_registered) {
rv = register_die_notifier(&ipmi_nmi_handler);
rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
"ipmi");
if (rv) {
printk(KERN_WARNING PFX
"Can't register nmi handler\n");
@ -1298,7 +1287,7 @@ static void check_parms(void)
} else
nmi_handler_registered = 1;
} else if (!do_nmi && nmi_handler_registered) {
unregister_die_notifier(&ipmi_nmi_handler);
unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
nmi_handler_registered = 0;
}
#endif
@ -1336,7 +1325,7 @@ static int __init ipmi_wdog_init(void)
if (rv) {
#ifdef HAVE_DIE_NMI
if (nmi_handler_registered)
unregister_die_notifier(&ipmi_nmi_handler);
unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
#endif
atomic_notifier_chain_unregister(&panic_notifier_list,
&wdog_panic_notifier);
@ -1357,7 +1346,7 @@ static void __exit ipmi_wdog_exit(void)
#ifdef HAVE_DIE_NMI
if (nmi_handler_registered)
unregister_die_notifier(&ipmi_nmi_handler);
unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
#endif
atomic_notifier_chain_unregister(&panic_notifier_list,

Просмотреть файл

@ -43,6 +43,7 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
depends on PPC64 || HAS_IOPORT
---help---
If you have a TPM security chip from Atmel say Yes and it
will be accessible from within Linux. To compile this driver

Просмотреть файл

@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
u32 count, ordinal;
unsigned long stop;
if (bufsiz > TPM_BUFSIZE)
bufsiz = TPM_BUFSIZE;
count = be32_to_cpu(*((__be32 *) (buf + 2)));
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
if (count == 0)
@ -1102,6 +1105,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
{
struct tpm_chip *chip = file->private_data;
ssize_t ret_size;
int rc;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
@ -1112,8 +1116,11 @@ ssize_t tpm_read(struct file *file, char __user *buf,
ret_size = size;
mutex_lock(&chip->buffer_mutex);
if (copy_to_user(buf, chip->data_buffer, ret_size))
rc = copy_to_user(buf, chip->data_buffer, ret_size);
memset(chip->data_buffer, 0, ret_size);
if (rc)
ret_size = -EFAULT;
mutex_unlock(&chip->buffer_mutex);
}

Просмотреть файл

@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void)
if (pdev) {
tpm_nsc_remove(&pdev->dev);
platform_device_unregister(pdev);
kfree(pdev);
pdev = NULL;
}
platform_driver_unregister(&nsc_drv);

Просмотреть файл

@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6 (default: true)");
unsigned int i915_enable_fbc __read_mostly = 1;
unsigned int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
"(default: false)");
"(default: -1 (use per-chip default))");
unsigned int i915_lvds_downclock __read_mostly = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);

Просмотреть файл

@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev)
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int enable_fbc;
DRM_DEBUG_KMS("\n");
@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev)
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
if (!i915_enable_fbc) {
DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
enable_fbc = i915_enable_fbc;
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
if (INTEL_INFO(dev)->gen <= 5)
enable_fbc = 0;
}
if (!enable_fbc) {
DRM_DEBUG_KMS("fbc disabled per module param\n");
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
bpc = 6; /* min is 18bpp */
break;
case 24:
bpc = min((unsigned int)8, display_bpc);
bpc = 8;
break;
case 30:
bpc = min((unsigned int)10, display_bpc);
bpc = 10;
break;
case 48:
bpc = min((unsigned int)12, display_bpc);
bpc = 12;
break;
default:
DRM_DEBUG("unsupported depth, assuming 24 bits\n");
@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
break;
}
display_bpc = min(display_bpc, bpc);
DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
bpc, display_bpc);
*pipe_bpp = bpc * 3;
*pipe_bpp = display_bpc * 3;
return display_bpc != bpc;
}

Просмотреть файл

@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct intel_load_detect_pipe *old);
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);

Просмотреть файл

@ -92,6 +92,11 @@ struct intel_sdvo {
*/
uint16_t attached_output;
/*
* Hotplug activation bits for this device
*/
uint8_t hotplug_active[2];
/**
* This is used to select the color range of RBG outputs in HDMI mode.
* It is only valid when using TMDS encoding and 8 bit per color mode.
@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
/* No use! */
#if 0
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
struct intel_sdvo *iout = NULL;
struct intel_sdvo *sdvo;
/* find the sdvo connector */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
iout = to_intel_sdvo(connector);
if (iout->type != INTEL_OUTPUT_SDVO)
continue;
sdvo = iout->dev_priv;
if (sdvo->sdvo_reg == SDVOB && sdvoB)
return connector;
if (sdvo->sdvo_reg == SDVOC && !sdvoB)
return connector;
}
return NULL;
}
int intel_sdvo_supports_hotplug(struct drm_connector *connector)
static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
{
u8 response[2];
u8 status;
struct intel_sdvo *intel_sdvo;
DRM_DEBUG_KMS("\n");
if (!connector)
return 0;
intel_sdvo = to_intel_sdvo(connector);
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&response, 2) && response[0];
}
void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
u8 response[2];
u8 status;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
intel_sdvo_read_response(intel_sdvo, &response, 2);
if (on) {
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
status = intel_sdvo_read_response(intel_sdvo, &response, 2);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
} else {
response[0] = 0;
response[1] = 0;
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
}
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
intel_sdvo_read_response(intel_sdvo, &response, 2);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
}
#endif
static bool
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_sdvo->hotplug_active[0] |= 1 << device;
/* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
}
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
/* Set up hotplug command - note paranoia about contents of reply.
* We assume that the hardware is in a sane state, and only touch
* the bits we think we understand.
*/
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
&intel_sdvo->hotplug_active, 2);
intel_sdvo->hotplug_active[0] &= ~0x3;
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",

Просмотреть файл

@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
u8 msg[20];
int msg_bytes = send_bytes + 4;
u8 ack;
unsigned retry;
if (send_bytes > 16)
return -1;
@ -125,20 +126,20 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes);
while (1) {
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
break;
return send_bytes;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else
return -EIO;
}
return send_bytes;
return -EIO;
}
static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
@ -149,26 +150,29 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
int msg_bytes = 4;
u8 ack;
int ret;
unsigned retry;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
while (1) {
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret == 0)
return -EPROTO;
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else if (ret == 0)
return -EPROTO;
else
return -EIO;
}
return -EIO;
}
static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,

Просмотреть файл

@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
static void evergreen_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
case 2:
case 3:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
}
switch (rdev->family) {
case CHIP_HEMLOCK:
case CHIP_CYPRESS:
case CHIP_BARTS:
tcp_chan_steer_lo = 0x54763210;
tcp_chan_steer_hi = 0x0000ba98;
break;
case CHIP_JUNIPER:
case CHIP_REDWOOD:
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
default:
tcp_chan_steer_lo = 0x76543210;
tcp_chan_steer_hi = 0x0000ba98;
break;
}
WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
static void evergreen_gpu_init(struct radeon_device *rdev)
{
u32 cc_rb_backend_disable = 0;
@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
evergreen_program_channel_remap(rdev);
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;

Просмотреть файл

@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
static void cayman_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
case 2:
case 3:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
}
switch (rdev->family) {
case CHIP_CAYMAN:
default:
//tcp_chan_steer_lo = 0x54763210
tcp_chan_steer_lo = 0x76543210;
tcp_chan_steer_hi = 0x0000ba98;
break;
}
WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
u32 disable_mask_per_se,
u32 max_disable_mask_per_se,
@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
cayman_program_channel_remap(rdev);
/* primary versions */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);

Просмотреть файл

@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
radeon_ring_write(rdev, cur_pages);
radeon_ring_write(rdev, cur_pages);
radeon_ring_write(rdev, num_gpu_pages);
radeon_ring_write(rdev, num_gpu_pages);
radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
}
radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));

Просмотреть файл

@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
int saved_dpms = connector->dpms;
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
radeon_dp_needs_link_train(radeon_connector))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
/* Only turn off the display it it's physically disconnected */
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
else if (radeon_dp_needs_link_train(radeon_connector))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
connector->dpms = saved_dpms;
}
}

Просмотреть файл

@ -208,24 +208,26 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int xorigin = 0, yorigin = 0;
int w = radeon_crtc->cursor_width;
if (x < 0)
xorigin = -x + 1;
if (y < 0)
yorigin = -y + 1;
if (xorigin >= CURSOR_WIDTH)
xorigin = CURSOR_WIDTH - 1;
if (yorigin >= CURSOR_HEIGHT)
yorigin = CURSOR_HEIGHT - 1;
if (ASIC_IS_AVIVO(rdev)) {
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
}
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
if (x < 0) {
xorigin = min(-x, CURSOR_WIDTH - 1);
x = 0;
}
if (y < 0) {
yorigin = min(-y, CURSOR_HEIGHT - 1);
y = 0;
}
if (ASIC_IS_AVIVO(rdev)) {
int i = 0;
struct drm_crtc *crtc_p;
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
/* avivo cursor image can't end on 128 pixel boundary or
* go past the end of the frame if both crtcs are enabled
*/
@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
radeon_lock_cursor(crtc, true);
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
((xorigin ? 0 : x) << 16) |
(yorigin ? 0 : y));
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
((xorigin ? 0 : x) << 16) |
(yorigin ? 0 : y));
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
| yorigin));
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
(RADEON_CUR_LOCK
| ((xorigin ? 0 : x) << 16)
| (yorigin ? 0 : y)));
| (x << 16)
| y));
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
(yorigin * 256)));

Просмотреть файл

@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
args.ucAction = ATOM_ENABLE;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
/* workaround for DVOOutputControl on some RS690 systems */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
WREG32(RADEON_BIOS_3_SCRATCH, reg);
} else
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLON;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);

Просмотреть файл

@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
static void rv770_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer, mc_shared_chremap, tmp;
bool force_no_swizzle;
switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
case 2:
case 3:
if (force_no_swizzle)
mc_shared_chremap = 0x00fac688;
else
mc_shared_chremap = 0x00bbc298;
break;
}
if (rdev->family == CHIP_RV740)
tcp_chan_steer = 0x00ef2a60;
else
tcp_chan_steer = 0x00fac688;
/* RV770 CE has special chremap setup */
if (rdev->pdev->device == 0x944e) {
tcp_chan_steer = 0x00b08b08;
mc_shared_chremap = 0x00b08b08;
}
WREG32(TCP_CHAN_STEER, tcp_chan_steer);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
rv770_program_channel_remap(rdev);
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);

Просмотреть файл

@ -36,17 +36,25 @@
#include <linux/cpu.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/moduleparam.h>
#include <asm/msr.h>
#include <asm/processor.h>
#define DRVNAME "coretemp"
/*
* force_tjmax only matters when TjMax can't be read from the CPU itself.
* When set, it replaces the driver's suboptimal heuristic.
*/
static int force_tjmax;
module_param_named(tjmax, force_tjmax, int, 0444);
MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
#ifdef CONFIG_SMP
@ -69,8 +77,6 @@
* This value is passed as "id" field to rdmsr/wrmsr functions.
* @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
* from where the temperature values should be read.
* @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
* from where the thresholds are read.
* @attr_size: Total number of pre-core attrs displayed in the sysfs.
* @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
* Otherwise, temp_data holds coretemp data.
@ -79,13 +85,11 @@
struct temp_data {
int temp;
int ttarget;
int tmin;
int tjmax;
unsigned long last_updated;
unsigned int cpu;
u32 cpu_core_id;
u32 status_reg;
u32 intrpt_reg;
int attr_size;
bool is_pkg_data;
bool valid;
@ -143,19 +147,6 @@ static ssize_t show_crit_alarm(struct device *dev,
return sprintf(buf, "%d\n", (eax >> 5) & 1);
}
static ssize_t show_max_alarm(struct device *dev,
struct device_attribute *devattr, char *buf)
{
u32 eax, edx;
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct platform_data *pdata = dev_get_drvdata(dev);
struct temp_data *tdata = pdata->core_data[attr->index];
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
}
static ssize_t show_tjmax(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@ -174,83 +165,6 @@ static ssize_t show_ttarget(struct device *dev,
return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
}
static ssize_t store_ttarget(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
struct platform_data *pdata = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct temp_data *tdata = pdata->core_data[attr->index];
u32 eax, edx;
unsigned long val;
int diff;
if (strict_strtoul(buf, 10, &val))
return -EINVAL;
/*
* THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
* of milli degree celsius. Hence don't accept val > (127 * 1000)
*/
if (val > tdata->tjmax || val > 127000)
return -EINVAL;
diff = (tdata->tjmax - val) / 1000;
mutex_lock(&tdata->update_lock);
rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
eax = (eax & ~THERM_MASK_THRESHOLD1) |
(diff << THERM_SHIFT_THRESHOLD1);
wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
tdata->ttarget = val;
mutex_unlock(&tdata->update_lock);
return count;
}
static ssize_t show_tmin(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct platform_data *pdata = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
}
static ssize_t store_tmin(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
struct platform_data *pdata = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct temp_data *tdata = pdata->core_data[attr->index];
u32 eax, edx;
unsigned long val;
int diff;
if (strict_strtoul(buf, 10, &val))
return -EINVAL;
/*
* THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
* of milli degree celsius. Hence don't accept val > (127 * 1000)
*/
if (val > tdata->tjmax || val > 127000)
return -EINVAL;
diff = (tdata->tjmax - val) / 1000;
mutex_lock(&tdata->update_lock);
rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
eax = (eax & ~THERM_MASK_THRESHOLD0) |
(diff << THERM_SHIFT_THRESHOLD0);
wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
tdata->tmin = val;
mutex_unlock(&tdata->update_lock);
return count;
}
static ssize_t show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@ -374,7 +288,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
{
/* The 100C is default for both mobile and non mobile CPUs */
int err;
u32 eax, edx;
u32 val;
@ -385,7 +298,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
*/
err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (err) {
dev_warn(dev, "Unable to read TjMax from CPU.\n");
if (c->x86_model > 0xe && c->x86_model != 0x1c)
dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
} else {
val = (eax >> 16) & 0xff;
/*
@ -393,11 +307,17 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
* will be used
*/
if (val) {
dev_info(dev, "TjMax is %d C.\n", val);
dev_dbg(dev, "TjMax is %d degrees C\n", val);
return val * 1000;
}
}
if (force_tjmax) {
dev_notice(dev, "TjMax forced to %d degrees C by user\n",
force_tjmax);
return force_tjmax * 1000;
}
/*
* An assumption is made for early CPUs and unreadable MSR.
* NOTE: the calculated value may not be correct.
@ -414,21 +334,6 @@ static void __devinit get_ucode_rev_on_cpu(void *edx)
rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
}
static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
{
int err;
u32 eax, edx, val;
err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (!err) {
val = (eax >> 16) & 0xff;
if (val)
return val * 1000;
}
dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
return 100000; /* Default TjMax: 100 degree celsius */
}
static int create_name_attr(struct platform_data *pdata, struct device *dev)
{
sysfs_attr_init(&pdata->name_attr.attr);
@ -442,19 +347,14 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
int attr_no)
{
int err, i;
static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev,
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
struct device_attribute *devattr, char *buf) = {
show_label, show_crit_alarm, show_temp, show_tjmax,
show_max_alarm, show_ttarget, show_tmin };
static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
struct device_attribute *devattr, const char *buf,
size_t count) = { NULL, NULL, NULL, NULL, NULL,
store_ttarget, store_tmin };
static const char *names[TOTAL_ATTRS] = {
show_ttarget };
static const char *const names[TOTAL_ATTRS] = {
"temp%d_label", "temp%d_crit_alarm",
"temp%d_input", "temp%d_crit",
"temp%d_max_alarm", "temp%d_max",
"temp%d_max_hyst" };
"temp%d_max" };
for (i = 0; i < tdata->attr_size; i++) {
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
@ -462,10 +362,6 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
if (rw_ptr[i]) {
tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
}
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
tdata->sd_attrs[i].index = attr_no;
err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
@ -481,9 +377,9 @@ exit_free:
}
static int __devinit chk_ucode_version(struct platform_device *pdev)
static int __cpuinit chk_ucode_version(unsigned int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(pdev->id);
struct cpuinfo_x86 *c = &cpu_data(cpu);
int err;
u32 edx;
@ -494,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev)
*/
if (c->x86_model == 0xe && c->x86_mask < 0xc) {
/* check for microcode update */
err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu,
err = smp_call_function_single(cpu, get_ucode_rev_on_cpu,
&edx, 1);
if (err) {
dev_err(&pdev->dev,
"Cannot determine microcode revision of "
"CPU#%u (%d)!\n", pdev->id, err);
pr_err("Cannot determine microcode revision of "
"CPU#%u (%d)!\n", cpu, err);
return -ENODEV;
} else if (edx < 0x39) {
dev_err(&pdev->dev,
"Errata AE18 not fixed, update BIOS or "
"microcode of the CPU!\n");
pr_err("Errata AE18 not fixed, update BIOS or "
"microcode of the CPU!\n");
return -ENODEV;
}
}
@ -538,8 +432,6 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
MSR_IA32_THERM_STATUS;
tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
MSR_IA32_THERM_INTERRUPT;
tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu;
tdata->cpu_core_id = TO_CORE_ID(cpu);
@ -548,11 +440,11 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
return tdata;
}
static int create_core_data(struct platform_data *pdata,
struct platform_device *pdev,
static int create_core_data(struct platform_device *pdev,
unsigned int cpu, int pkg_flag)
{
struct temp_data *tdata;
struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
int err, attr_no;
@ -588,25 +480,21 @@ static int create_core_data(struct platform_data *pdata,
goto exit_free;
/* We can access status register. Get Critical Temperature */
if (pkg_flag)
tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev);
else
tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
/*
* Test if we can access the intrpt register. If so, increase the
* 'size' enough to have ttarget/tmin/max_alarm interfaces.
* Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT
* Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET.
* The target temperature is available on older CPUs but not in this
* register. Atoms don't have the register at all.
*/
err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
if (!err) {
tdata->attr_size += MAX_THRESH_ATTRS;
tdata->tmin = tdata->tjmax -
((eax & THERM_MASK_THRESHOLD0) >>
THERM_SHIFT_THRESHOLD0) * 1000;
tdata->ttarget = tdata->tjmax -
((eax & THERM_MASK_THRESHOLD1) >>
THERM_SHIFT_THRESHOLD1) * 1000;
if (c->x86_model > 0xe && c->x86_model != 0x1c) {
err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET,
&eax, &edx);
if (!err) {
tdata->ttarget
= tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
tdata->attr_size++;
}
}
pdata->core_data[attr_no] = tdata;
@ -618,22 +506,20 @@ static int create_core_data(struct platform_data *pdata,
return 0;
exit_free:
pdata->core_data[attr_no] = NULL;
kfree(tdata);
return err;
}
static void coretemp_add_core(unsigned int cpu, int pkg_flag)
{
struct platform_data *pdata;
struct platform_device *pdev = coretemp_get_pdev(cpu);
int err;
if (!pdev)
return;
pdata = platform_get_drvdata(pdev);
err = create_core_data(pdata, pdev, cpu, pkg_flag);
err = create_core_data(pdev, cpu, pkg_flag);
if (err)
dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
}
@ -657,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
struct platform_data *pdata;
int err;
/* Check the microcode version of the CPU */
err = chk_ucode_version(pdev);
if (err)
return err;
/* Initialize the per-package data structures */
pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
if (!pdata)
@ -671,7 +552,7 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
if (err)
goto exit_free;
pdata->phys_proc_id = TO_PHYS_ID(pdev->id);
pdata->phys_proc_id = pdev->id;
platform_set_drvdata(pdev, pdata);
pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
@ -723,7 +604,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
mutex_lock(&pdev_list_mutex);
pdev = platform_device_alloc(DRVNAME, cpu);
pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
@ -743,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
}
pdev_entry->pdev = pdev;
pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
pdev_entry->phys_proc_id = pdev->id;
list_add_tail(&pdev_entry->list, &pdev_list);
mutex_unlock(&pdev_list_mutex);
@ -804,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu)
return;
if (!pdev) {
/* Check the microcode version of the CPU */
if (chk_ucode_version(cpu))
return;
/*
* Alright, we have DTS support.
* We are bringing the _first_ core in this pkg

Просмотреть файл

@ -72,7 +72,7 @@ struct ds620_data {
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
u16 temp[3]; /* Register values, word */
s16 temp[3]; /* Register values, word */
};
/*

Просмотреть файл

@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83791d_remove(struct i2c_client *client);
static int w83791d_read(struct i2c_client *client, u8 register);
static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
static int w83791d_read(struct i2c_client *client, u8 reg);
static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
static struct w83791d_data *w83791d_update_device(struct device *dev);
#ifdef DEBUG

Просмотреть файл

@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
if (!(rq->cmd_flags & REQ_FLUSH))
return BLKPREP_OK;
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (rq->special) {
cmd = rq->special;
memset(cmd, 0, sizeof(*cmd));
} else {
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
}
/* FIXME: map struct ide_taskfile on rq->cmd[] */
BUG_ON(cmd == NULL);

Просмотреть файл

@ -287,7 +287,7 @@ void __free_ep(struct kref *kref)
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
dst_release(ep->dst);
l2t_release(L2DATA(ep->com.tdev), ep->l2t);
l2t_release(ep->com.tdev, ep->l2t);
}
kfree(ep);
}
@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
release_tid(ep->com.tdev, GET_TID(rpl), NULL);
cxgb3_free_atid(ep->com.tdev, ep->atid);
dst_release(ep->dst);
l2t_release(L2DATA(ep->com.tdev), ep->l2t);
l2t_release(ep->com.tdev, ep->l2t);
put_ep(&ep->com);
return CPL_RET_BUF_DONE;
}
@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
if (!child_ep) {
printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
__func__);
l2t_release(L2DATA(tdev), l2t);
l2t_release(tdev, l2t);
dst_release(dst);
goto reject;
}
@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!err)
goto out;
l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
l2t_release(h->rdev.t3cdev_p, ep->l2t);
fail4:
dst_release(ep->dst);
fail3:
@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
l2t);
dst_hold(new);
l2t_release(L2DATA(ep->com.tdev), ep->l2t);
l2t_release(ep->com.tdev, ep->l2t);
ep->l2t = l2t;
dst_release(old);
ep->dst = new;

Просмотреть файл

@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev)
"'%s' Display already enabled\n",
def_display->name);
}
/* set the update mode */
if (def_display->caps &
OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
if (dssdrv->enable_te)
dssdrv->enable_te(def_display, 0);
if (dssdrv->set_update_mode)
dssdrv->set_update_mode(def_display,
OMAP_DSS_UPDATE_MANUAL);
} else {
if (dssdrv->set_update_mode)
dssdrv->set_update_mode(def_display,
OMAP_DSS_UPDATE_AUTO);
}
}
}

Просмотреть файл

@ -31,6 +31,7 @@
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/v4l2-event.h>
#include "isp.h"

Просмотреть файл

@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
list_for_each_entry(stream, &dev->streams, list) {
if (stream->intf == intf)
return uvc_video_resume(stream);
return uvc_video_resume(stream, reset);
}
uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "

Просмотреть файл

@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
if (remote == NULL)
return -EINVAL;
source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
? (remote->vdev ? &remote->vdev->entity : NULL)
: &remote->subdev.entity;
if (source == NULL)

Просмотреть файл

@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream)
* buffers, making sure userspace applications are notified of the problem
* instead of waiting forever.
*/
int uvc_video_resume(struct uvc_streaming *stream)
int uvc_video_resume(struct uvc_streaming *stream, int reset)
{
int ret;
/* If the bus has been reset on resume, set the alternate setting to 0.
* This should be the default value, but some devices crash or otherwise
* misbehave if they don't receive a SET_INTERFACE request before any
* other video control request.
*/
if (reset)
usb_set_interface(stream->dev->udev, stream->intfnum, 0);
stream->frozen = 0;
ret = uvc_commit_video(stream, &stream->ctrl);

Просмотреть файл

@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
/* Video */
extern int uvc_video_init(struct uvc_streaming *stream);
extern int uvc_video_suspend(struct uvc_streaming *stream);
extern int uvc_video_resume(struct uvc_streaming *stream);
extern int uvc_video_resume(struct uvc_streaming *stream, int reset);
extern int uvc_video_enable(struct uvc_streaming *stream, int enable);
extern int uvc_probe_video(struct uvc_streaming *stream,
struct uvc_streaming_control *probe);

Просмотреть файл

@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd)
media_device_unregister_entity(&vdev->entity);
#endif
/* Do not call v4l2_device_put if there is no release callback set.
* Drivers that have no v4l2_device release callback might free the
* v4l2_dev instance in the video_device release callback below, so we
* must perform this check here.
*
* TODO: In the long run all drivers that use v4l2_device should use the
* v4l2_device release callback. This check will then be unnecessary.
*/
if (v4l2_dev->release == NULL)
v4l2_dev = NULL;
/* Release video_device and perform other
cleanups as needed. */
vdev->release(vdev);

Просмотреть файл

@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
mutex_init(&v4l2_dev->ioctl_lock);
v4l2_prio_init(&v4l2_dev->prio);
kref_init(&v4l2_dev->ref);
get_device(dev);
v4l2_dev->dev = dev;
if (dev == NULL) {
/* If dev == NULL, then name must be filled in by the caller */
@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
dev_set_drvdata(v4l2_dev->dev, NULL);
put_device(v4l2_dev->dev);
v4l2_dev->dev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_device_disconnect);

Просмотреть файл

@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
ct->regs.ack = JZ_REG_ADC_STATUS;
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
ct->chip.irq_ack = irq_gc_ack;
ct->chip.irq_ack = irq_gc_ack_set_bit;
irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);

Просмотреть файл

@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
* both have been read. So the value read will always be correct.
* Set BOOT bit to refresh factory tuning values.
*/
lis3->read(lis3, CTRL_REG2, &reg);
if (lis3->whoami == WAI_12B)
reg |= CTRL2_BDU | CTRL2_BOOT;
else
reg |= CTRL2_BOOT_8B;
lis3->write(lis3, CTRL_REG2, reg);
if (lis3->pdata) {
lis3->read(lis3, CTRL_REG2, &reg);
if (lis3->whoami == WAI_12B)
reg |= CTRL2_BDU | CTRL2_BOOT;
else
reg |= CTRL2_BOOT_8B;
lis3->write(lis3, CTRL_REG2, reg);
}
/* LIS3 power on delay is quite long */
msleep(lis3->pwron_delay / lis3lv02d_get_odr());

Просмотреть файл

@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
break;
case DCB_CAP_ATTR_DCBX:
*cap = BNX2X_DCBX_CAPS;
break;
default:
rval = -EINVAL;
break;

Просмотреть файл

@ -4943,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
int igu_seg_id;
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
int reg_offset;
int reg_offset, reg_offset_en5;
u64 section;
int index;
struct hc_sp_status_block_data sp_sb_data;
@ -4966,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
int sindex;
/* take care of sig[0]..sig[4] */
@ -4980,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
* and not 16 between the different groups
*/
bp->attn_group[index].sig[4] = REG_RD(bp,
reg_offset + 0x10 + 0x4*index);
reg_offset_en5 + 0x4*index);
else
bp->attn_group[index].sig[4] = 0;
}
@ -7625,8 +7627,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
u32 val;
u16 pmc;
/* The mac address is written to entries 1-4 to
preserve entry 0 which is used by the PMF */
* preserve entry 0 which is used by the PMF
*/
u8 entry = (BP_VN(bp) + 1)*8;
val = (mac_addr[0] << 8) | mac_addr[1];
@ -7636,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
(mac_addr[4] << 8) | mac_addr[5];
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
/* Enable the PME and clear the status */
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
} else

Просмотреть файл

@ -1384,6 +1384,18 @@
Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
* as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
* attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
* mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
* parity; [31-10] Reserved; */
#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688
/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
* as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
* attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
* mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
* parity; [31-10] Reserved; */
#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0
/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
128 bit vector */
#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000

Просмотреть файл

@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
re_arm:
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
if (!bond->kill_timers)
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
out:
read_unlock(&bond->lock);
}

Просмотреть файл

@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work)
}
re_arm:
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
if (!bond->kill_timers)
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
out:
read_unlock(&bond->lock);
}

Просмотреть файл

@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
read_lock(&bond->lock);
if (bond->kill_timers)
goto out;
/* rejoin all groups on bond device */
__bond_resend_igmp_join_requests(bond->dev);
@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
__bond_resend_igmp_join_requests(vlan_dev);
}
if (--bond->igmp_retrans > 0)
if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
out:
read_unlock(&bond->lock);
}
@ -2538,7 +2541,7 @@ void bond_mii_monitor(struct work_struct *work)
}
re_arm:
if (bond->params.miimon)
if (bond->params.miimon && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mii_work,
msecs_to_jiffies(bond->params.miimon));
out:
@ -2886,7 +2889,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
}
re_arm:
if (bond->params.arp_interval)
if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock);
@ -3154,7 +3157,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond_ab_arp_probe(bond);
re_arm:
if (bond->params.arp_interval)
if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock);

Просмотреть файл

@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
if (te && te->ctx && te->client && te->client->redirect) {
update_tcb = te->client->redirect(te->ctx, old, new, e);
if (update_tcb) {
rcu_read_lock();
l2t_hold(L2DATA(tdev), e);
rcu_read_unlock();
set_l2t_ix(tdev, tid, e);
}
}
}
l2t_release(L2DATA(tdev), e);
l2t_release(tdev, e);
}
/*
@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
goto out_free;
err = -ENOMEM;
L2DATA(dev) = t3_init_l2t(l2t_capacity);
RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
if (!L2DATA(dev))
goto out_free;
@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter)
out_free_l2t:
t3_free_l2t(L2DATA(dev));
L2DATA(dev) = NULL;
rcu_assign_pointer(dev->l2opt, NULL);
out_free:
kfree(t);
return err;
}
static void clean_l2_data(struct rcu_head *head)
{
struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
t3_free_l2t(d);
}
void cxgb3_offload_deactivate(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
struct t3c_data *t = T3C_DATA(tdev);
struct l2t_data *d;
remove_adapter(adapter);
if (list_empty(&adapter_list))
@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
free_tid_maps(&t->tid_maps);
T3C_DATA(tdev) = NULL;
t3_free_l2t(L2DATA(tdev));
L2DATA(tdev) = NULL;
rcu_read_lock();
d = L2DATA(tdev);
rcu_read_unlock();
rcu_assign_pointer(tdev->l2opt, NULL);
call_rcu(&d->rcu_head, clean_l2_data);
if (t->nofail_skb)
kfree_skb(t->nofail_skb);
kfree(t);

Просмотреть файл

@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
struct net_device *dev)
{
struct l2t_entry *e;
struct l2t_data *d = L2DATA(cdev);
struct l2t_entry *e = NULL;
struct l2t_data *d;
int hash;
u32 addr = *(u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = arp_hash(addr, ifidx, d);
struct port_info *p = netdev_priv(dev);
int smt_idx = p->port_id;
rcu_read_lock();
d = L2DATA(cdev);
if (!d)
goto done_rcu;
hash = arp_hash(addr, ifidx, d);
write_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx &&
@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
}
done:
write_unlock_bh(&d->lock);
done_rcu:
rcu_read_unlock();
return e;
}

Просмотреть файл

@ -76,6 +76,7 @@ struct l2t_data {
atomic_t nfree; /* number of free entries */
rwlock_t lock;
struct l2t_entry l2tab[0];
struct rcu_head rcu_head; /* to handle rcu cleanup */
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
/*
* Getting to the L2 data from an offload device.
*/
#define L2DATA(dev) ((dev)->l2opt)
#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
#define W_TCB_L2T_IX 0
#define S_TCB_L2T_IX 7
@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
return t3_l2t_send_slow(dev, skb, e);
}
static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
{
if (atomic_dec_and_test(&e->refcnt))
struct l2t_data *d;
rcu_read_lock();
d = L2DATA(t);
if (atomic_dec_and_test(&e->refcnt) && d)
t3_l2e_free(d, e);
rcu_read_unlock();
}
static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
{
if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
atomic_dec(&d->nfree);
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше